repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Neetuj/softlayer-python
|
SoftLayer/CLI/block/snapshot_list.py
|
1
|
1781
|
"""List block storage snapshots."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import columns as column_helper
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
COLUMNS = [
column_helper.Column(
'id',
('snapshots', 'id',),
mask='snapshots.id'),
column_helper.Column('name', ('snapshots', 'notes',),
mask='snapshots.notes'),
column_helper.Column('created',
('snapshots', 'snapshotCreationTimestamp',),
mask='snapshots.snapshotCreationTimestamp'),
column_helper.Column('size_bytes', ('snapshots', 'snapshotSizeBytes',),
mask='snapshots.snapshotSizeBytes'),
]
DEFAULT_COLUMNS = [
'id',
'name',
'created',
'size_bytes'
]
@click.command()
@click.argument('volume_id')
@click.option('--sortby', help='Column to sort by',
default='created')
@click.option('--columns',
callback=column_helper.get_formatter(COLUMNS),
help='Columns to display. Options: {0}'.format(
', '.join(column.name for column in COLUMNS)),
default=','.join(DEFAULT_COLUMNS))
@environment.pass_env
def cli(env, sortby, columns, volume_id):
"""List block storage snapshots."""
block_manager = SoftLayer.BlockStorageManager(env.client)
snapshots = block_manager.get_block_volume_snapshot_list(
volume_id=volume_id,
mask=columns.mask(),
)
table = formatting.Table(columns.columns)
table.sortby = sortby
for snapshot in snapshots:
table.add_row([value or formatting.blank()
for value in columns.row(snapshot)])
env.fout(table)
|
mit
| 8,083,339,656,087,134,000 | 29.706897 | 75 | 0.613139 | false | 4.200472 | false | false | false |
AndriesSHP/Gellish
|
CommunicatorSource/GellishDict.py
|
1
|
5979
|
class GellishDict(dict):
''' A dictionary for names in a context that refer to the denoted concepts.
The roles of the names are indicated by alias relation_type_UIDs, such as for <is a code for>:
key = name_in_context(tuple) = (languageUID, communityUID, name).
value = value_triple = (UID, naming_relation_type_UID, description)
'''
def __init__(self, name):
self.name = name
def add_name_in_context(self, name_in_context, value_triple):
if name_in_context not in self:
#self.key = name_in_context(tuple) = (lanuageUID, communityUID, name).
#self.value = value_triple = (UID, naming_relation_type_UID, description)
self[name_in_context] = value_triple
print('add: ',name_in_context, self[name_in_context])
else:
value_triple2 = self.find_anything(name_in_context)
print('Error: Name in context: %s, %s is already known by uid (%s)' % (name_in_context, value_triple, value_triple2))
def find_anything(self, q_name_in_context):
if q_name_in_context in self:
print('Found: ', q_name_in_context, self[q_name_in_context])
return(self[q_name_in_context])
else:
print('Not found: ',q_name_in_context)
return(None)
def filter_on_key(self, q_string, string_commonality):
"""Search for q-string in the third part of the key of the dictionary,
where key = term_in_context = (language_uid, community_uid, name).
Returns a list of items (key, value_triple) that contain q_string as the third part of the key.
Example item: term_in_context, value_triple = {(910036, 193259, "anything"),(730000, 5117, 'descr'))
"""
# a list of tuples of [(key0, val0]), (key1, val1), ...]
items = self.items()
result_list = []
# create a filter function that returns true if
# 0) q_string is equal to the third position of the first(key) field of an item:
# case sensitive identical
# 1) q_string is in that field:
# case sensitive partially identical
# 2) q_string is in that field and starts with that string
# case sensitive front end identical
# 3), 4), 5) idem, but case insensitive
string_commonalities = ['csi', 'cspi', 'csfi', 'cii', 'cipi', 'cifi']
if string_commonality == string_commonalities[0]:
filt = lambda item: q_string == item[0][2]
elif string_commonality == string_commonalities[1]:
filt = lambda item: q_string in item[0][2]
elif string_commonality == string_commonalities[2]:
filt = lambda item: item[0][2].startswith(q_string)
elif string_commonality == string_commonalities[3]:
filt = lambda item: q_string.lower() == item[0][2].lower()
elif string_commonality == string_commonalities[4]:
filt = lambda item: q_string.lower() in item[0][2].lower()
elif string_commonality == string_commonalities[5]:
filt = lambda item: item[0][2].lower().startswith(q_string.lower())
else:
print('Error: string commonality %s unknown' % (string_commonality))
filt = ''
# use the filter to create a *list* of items that match the filter
result_list = filter(filt, items)
# convert the list to a Gellish dictionary
#result = GellishDict(result_list)
# and return the resulting list of filtered items
return(result_list)
class Preferences(dict):
'''A dictionary for preferences and defaults for the owner of the table of preferences'''
def __init__(self, dict_name):
self.name = dict_name
#----------------------------------------------------------------------------
if __name__ == "__main__":
d = GellishDict('Dictionary')
d[1, 4, "anything"] = (730000, 5117, 'what can be thought of')
d[1, 4, "THING"] = (2,1, 'thing')
d[1, 5, "pump"] = (4,1, 'that is intended to ...')
d[2, 5, "pomp"] = (4,1, 'die bedoeld is om ...')
d[3, 5, "Pumpe"] = (4,2, 'der is geeignet zu ...')
d[1, 5, "Pump"] = (4,1, 'synonym of pump')
print('Dictionary-0: ',d)
n = (2, 5, "iets")
v = (730000, 5117, 'waar aan gedacht kan worden.')
d.add_name_in_context(n,v)
print('Dictionary-1: ',d)
n2 = (2, 5, "iets")
v2 = (1, 1, 'verkeerde UID')
d.add_name_in_context(n2,v2)
print('Dictionary-2: ',d)
# print all items that have "pump" as the third field in the key:
candidates = d.filter_on_key("pump",'csi')
for candidate in candidates:
print ("case sensitive identical (pump): ",candidate)
# print all items that contain "Pu" at the front end of the third field of the key:
candidates = d.filter_on_key("Pu",'csfi')
for candidate in candidates:
print ("case sensitive front end identical (Pu): ",candidate)
# print all items that contain "ump" as a string somewhere in the third field of the key:
candidates = d.filter_on_key("ump",'cspi')
for candidate in candidates:
print ("case sensitive partially identical (ump): ",candidate)
# print all items that have "pump" as the third field in the key:
candidates = d.filter_on_key("pump",'cii')
for candidate in candidates:
print ("case insensitive identical (pump): ",candidate)
# print all items that contain "pu" at the front end of the third field of the key:
candidates = d.filter_on_key("pu",'cifi')
for candidate in candidates:
print ("case insensitive front end identical (pu): ",candidate)
# print all items that contain "i" as a string somewhere in the third field of the key:
candidates = d.filter_on_key("i",'cipi')
for candidate in candidates:
print ("case insensitive partially identical (i): ",candidate)
|
gpl-3.0
| 2,889,238,609,246,712,000 | 46.07874 | 129 | 0.597592 | false | 3.656881 | false | false | false |
jslootbeek/roundware-server
|
roundware/urls.py
|
1
|
1451
|
# Roundware Server is released under the GNU Affero General Public License v3.
# See COPYRIGHT.txt, AUTHORS.txt, and LICENSE.txt in the project root directory.
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, url
# Loading static files for debug mode
from django.conf.urls.static import static
from django.conf.urls import include
from django.contrib import admin
from adminplus.sites import AdminSitePlus
from roundware.rw import urls as rw_urls
admin.site = AdminSitePlus()
admin.sites.site = admin.site
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^tools/asset-map$', 'rw.views.asset_map'),
url(r'^tools/listen-map$', 'rw.views.listen_map'),
url(r'^dashboard/$', 'rw.views.chart_views'),
# V1 DRF API
url(r'^api/1/', include('roundware.api1.urls')),
# V2 RESTful DRF API
url(r'^api/2/', include('roundware.api2.urls')),
# Use Django Admin login as overall login
url(r'^accounts/login/$', 'django.contrib.auth.views.login',
{'template_name': 'admin/login.html'}),
url(r'^admin/', include(admin.site.urls)),
url(r'^rw/', include(rw_urls)),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
admin.site.site_header = 'Roundware Administration'
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns(
'',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
|
agpl-3.0
| 6,197,231,107,093,475,000 | 28.02 | 80 | 0.694693 | false | 3.471292 | false | false | false |
ingenieroariel/geonode
|
geonode/base/models.py
|
1
|
49348
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import datetime
import math
import os
import re
import logging
import traceback
import uuid
import urllib
import urllib2
import cookielib
from geonode.decorators import on_ogc_backend
from pyproj import transform, Proj
from urlparse import urljoin, urlsplit
from django.db import models
from django.core import serializers
from django.db.models import Q, signals
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
from django.conf import settings
from django.contrib.staticfiles.templatetags import staticfiles
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.core.files.storage import default_storage as storage
from django.core.files.base import ContentFile
from django.contrib.gis.geos import GEOSGeometry
from mptt.models import MPTTModel, TreeForeignKey
from polymorphic.models import PolymorphicModel
from polymorphic.managers import PolymorphicManager
from agon_ratings.models import OverallRating
from geonode import geoserver
from geonode.base.enumerations import ALL_LANGUAGES, \
HIERARCHY_LEVELS, UPDATE_FREQUENCIES, \
DEFAULT_SUPPLEMENTAL_INFORMATION, LINK_TYPES
from geonode.utils import bbox_to_wkt
from geonode.utils import forward_mercator
from geonode.security.models import PermissionLevelMixin
from taggit.managers import TaggableManager, _TaggableManager
from taggit.models import TagBase, ItemBase
from treebeard.mp_tree import MP_Node
from geonode.people.enumerations import ROLE_VALUES
from oauthlib.common import generate_token
from oauth2_provider.models import AccessToken, get_application_model
logger = logging.getLogger(__name__)
class ContactRole(models.Model):
"""
ContactRole is an intermediate model to bind Profiles as Contacts to Resources and apply roles.
"""
resource = models.ForeignKey('ResourceBase', blank=True, null=True)
contact = models.ForeignKey(settings.AUTH_USER_MODEL)
role = models.CharField(
choices=ROLE_VALUES,
max_length=255,
help_text=_(
'function performed by the responsible '
'party'))
def clean(self):
"""
Make sure there is only one poc and author per resource
"""
if (self.role == self.resource.poc_role) or (
self.role == self.resource.metadata_author_role):
contacts = self.resource.contacts.filter(
contactrole__role=self.role)
if contacts.count() == 1:
# only allow this if we are updating the same contact
if self.contact != contacts.get():
raise ValidationError(
'There can be only one %s for a given resource' %
self.role)
if self.contact.user is None:
# verify that any unbound contact is only associated to one
# resource
bounds = ContactRole.objects.filter(contact=self.contact).count()
if bounds > 1:
raise ValidationError(
'There can be one and only one resource linked to an unbound contact' %
self.role)
elif bounds == 1:
# verify that if there was one already, it corresponds to this
# instance
if ContactRole.objects.filter(
contact=self.contact).get().id != self.id:
raise ValidationError(
'There can be one and only one resource linked to an unbound contact' %
self.role)
class Meta:
unique_together = (("contact", "resource", "role"),)
class TopicCategory(models.Model):
"""
Metadata about high-level geographic data thematic classification.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_MD_TopicCategoryCode">
"""
identifier = models.CharField(max_length=255, default='location')
description = models.TextField(default='')
gn_description = models.TextField(
'GeoNode description', default='', null=True)
is_choice = models.BooleanField(default=True)
fa_class = models.CharField(max_length=64, default='fa-times')
def __unicode__(self):
return u"{0}".format(self.gn_description)
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Topic Categories'
class SpatialRepresentationType(models.Model):
"""
Metadata information about the spatial representation type.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_SpatialRepresentationTypeCode">
"""
identifier = models.CharField(max_length=255, editable=False)
description = models.CharField(max_length=255, editable=False)
gn_description = models.CharField('GeoNode description', max_length=255)
is_choice = models.BooleanField(default=True)
def __unicode__(self):
return self.gn_description
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Spatial Representation Types'
class RegionManager(models.Manager):
def get_by_natural_key(self, code):
return self.get(code=code)
class Region(MPTTModel):
# objects = RegionManager()
code = models.CharField(max_length=50, unique=True)
name = models.CharField(max_length=255)
parent = TreeForeignKey(
'self',
null=True,
blank=True,
related_name='children')
# Save bbox values in the database.
# This is useful for spatial searches and for generating thumbnail images
# and metadata records.
bbox_x0 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_x1 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_y0 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_y1 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
srid = models.CharField(max_length=255, default='EPSG:4326')
def __unicode__(self):
return self.name
@property
def bbox(self):
"""BBOX is in the format: [x0,x1,y0,y1]."""
return [
self.bbox_x0,
self.bbox_x1,
self.bbox_y0,
self.bbox_y1,
self.srid]
@property
def bbox_string(self):
"""BBOX is in the format: [x0,y0,x1,y1]."""
return ",".join([str(self.bbox_x0), str(self.bbox_y0),
str(self.bbox_x1), str(self.bbox_y1)])
@property
def geographic_bounding_box(self):
"""BBOX is in the format: [x0,x1,y0,y1]."""
return bbox_to_wkt(
self.bbox_x0,
self.bbox_x1,
self.bbox_y0,
self.bbox_y1,
srid=self.srid)
class Meta:
ordering = ("name",)
verbose_name_plural = 'Metadata Regions'
class MPTTMeta:
order_insertion_by = ['name']
class RestrictionCodeType(models.Model):
"""
Metadata information about the spatial representation type.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_RestrictionCode">
"""
identifier = models.CharField(max_length=255, editable=False)
description = models.TextField(max_length=255, editable=False)
gn_description = models.TextField('GeoNode description', max_length=255)
is_choice = models.BooleanField(default=True)
def __unicode__(self):
return self.gn_description
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Restriction Code Types'
class Backup(models.Model):
identifier = models.CharField(max_length=255, editable=False)
name = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True, blank=True)
description = models.TextField(null=True, blank=True)
base_folder = models.CharField(max_length=100)
location = models.TextField(null=True, blank=True)
class Meta:
ordering = ("date", )
verbose_name_plural = 'Backups'
class License(models.Model):
identifier = models.CharField(max_length=255, editable=False)
name = models.CharField(max_length=100)
abbreviation = models.CharField(max_length=20, null=True, blank=True)
description = models.TextField(null=True, blank=True)
url = models.URLField(max_length=2000, null=True, blank=True)
license_text = models.TextField(null=True, blank=True)
def __unicode__(self):
return self.name
@property
def name_long(self):
if self.abbreviation is None or len(self.abbreviation) == 0:
return self.name
else:
return self.name + " (" + self.abbreviation + ")"
@property
def description_bullets(self):
if self.description is None or len(self.description) == 0:
return ""
else:
bullets = []
lines = self.description.split("\n")
for line in lines:
bullets.append("+ " + line)
return bullets
class Meta:
ordering = ("name", )
verbose_name_plural = 'Licenses'
class HierarchicalKeyword(TagBase, MP_Node):
node_order_by = ['name']
@classmethod
def dump_bulk_tree(cls, parent=None, keep_ids=True):
"""Dumps a tree branch to a python data structure."""
qset = cls._get_serializable_model().get_tree(parent)
ret, lnk = [], {}
for pyobj in qset:
serobj = serializers.serialize('python', [pyobj])[0]
# django's serializer stores the attributes in 'fields'
fields = serobj['fields']
depth = fields['depth']
fields['text'] = fields['name']
fields['href'] = fields['slug']
del fields['name']
del fields['slug']
del fields['path']
del fields['numchild']
del fields['depth']
if 'id' in fields:
# this happens immediately after a load_bulk
del fields['id']
newobj = {}
for field in fields:
newobj[field] = fields[field]
if keep_ids:
newobj['id'] = serobj['pk']
if (not parent and depth == 1) or\
(parent and depth == parent.depth):
ret.append(newobj)
else:
parentobj = pyobj.get_parent()
parentser = lnk[parentobj.pk]
if 'nodes' not in parentser:
parentser['nodes'] = []
parentser['nodes'].append(newobj)
lnk[pyobj.pk] = newobj
return ret
class TaggedContentItem(ItemBase):
content_object = models.ForeignKey('ResourceBase')
tag = models.ForeignKey('HierarchicalKeyword', related_name='keywords')
# see https://github.com/alex/django-taggit/issues/101
@classmethod
def tags_for(cls, model, instance=None):
if instance is not None:
return cls.tag_model().objects.filter(**{
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**{
'%s__content_object__isnull' % cls.tag_relname(): False
}).distinct()
class _HierarchicalTagManager(_TaggableManager):
def add(self, *tags):
str_tags = set([
t
for t in tags
if not isinstance(t, self.through.tag_model())
])
tag_objs = set(tags) - str_tags
# If str_tags has 0 elements Django actually optimizes that to not do a
# query. Malcolm is very smart.
existing = self.through.tag_model().objects.filter(
name__in=str_tags
)
tag_objs.update(existing)
for new_tag in str_tags - set(t.name for t in existing):
tag_objs.add(HierarchicalKeyword.add_root(name=new_tag))
for tag in tag_objs:
try:
self.through.objects.get_or_create(
tag=tag, **self._lookup_kwargs())
except Exception as e:
logger.exception(e)
class Thesaurus(models.Model):
"""
Loadable thesaurus containing keywords in different languages
"""
identifier = models.CharField(
max_length=255,
null=False,
blank=False,
unique=True)
# read from the RDF file
title = models.CharField(max_length=255, null=False, blank=False)
# read from the RDF file
date = models.CharField(max_length=20, default='')
# read from the RDF file
description = models.TextField(max_length=255, default='')
slug = models.CharField(max_length=64, default='')
def __unicode__(self):
return u"{0}".format(self.identifier)
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Thesauri'
class ThesaurusKeyword(models.Model):
"""
Loadable thesaurus containing keywords in different languages
"""
# read from the RDF file
about = models.CharField(max_length=255, null=True, blank=True)
# read from the RDF file
alt_label = models.CharField(
max_length=255,
default='',
null=True,
blank=True)
thesaurus = models.ForeignKey('Thesaurus', related_name='thesaurus')
def __unicode__(self):
return u"{0}".format(self.alt_label)
class Meta:
ordering = ("alt_label",)
verbose_name_plural = 'Thesaurus Keywords'
unique_together = (("thesaurus", "alt_label"),)
class ThesaurusKeywordLabel(models.Model):
"""
Loadable thesaurus containing keywords in different languages
"""
# read from the RDF file
lang = models.CharField(max_length=3)
# read from the RDF file
label = models.CharField(max_length=255)
# note = models.CharField(max_length=511)
keyword = models.ForeignKey('ThesaurusKeyword', related_name='keyword')
def __unicode__(self):
return u"{0}".format(self.label)
class Meta:
ordering = ("keyword", "lang")
verbose_name_plural = 'Labels'
unique_together = (("keyword", "lang"),)
class ResourceBaseManager(PolymorphicManager):
def admin_contact(self):
# this assumes there is at least one superuser
superusers = get_user_model().objects.filter(is_superuser=True).order_by('id')
if superusers.count() == 0:
raise RuntimeError(
'GeoNode needs at least one admin/superuser set')
return superusers[0]
def get_queryset(self):
return super(
ResourceBaseManager,
self).get_queryset().non_polymorphic()
def polymorphic_queryset(self):
return super(ResourceBaseManager, self).get_queryset()
class ResourceBase(PolymorphicModel, PermissionLevelMixin, ItemBase):
"""
Base Resource Object loosely based on ISO 19115:2003
"""
VALID_DATE_TYPES = [(x.lower(), _(x))
for x in ['Creation', 'Publication', 'Revision']]
date_help_text = _('reference date for the cited resource')
date_type_help_text = _('identification of when a given event occurred')
edition_help_text = _('version of the cited resource')
abstract_help_text = _(
'brief narrative summary of the content of the resource(s)')
purpose_help_text = _(
'summary of the intentions with which the resource(s) was developed')
maintenance_frequency_help_text = _(
'frequency with which modifications and deletions are made to the data after '
'it is first produced')
keywords_help_text = _(
'commonly used word(s) or formalised word(s) or phrase(s) used to describe the subject '
'(space or comma-separated')
tkeywords_help_text = _(
'formalised word(s) or phrase(s) from a fixed thesaurus used to describe the subject '
'(space or comma-separated')
regions_help_text = _('keyword identifies a location')
restriction_code_type_help_text = _(
'limitation(s) placed upon the access or use of the data.')
constraints_other_help_text = _(
'other restrictions and legal prerequisites for accessing and using the resource or'
' metadata')
license_help_text = _('license of the dataset')
language_help_text = _('language used within the dataset')
category_help_text = _(
'high-level geographic data thematic classification to assist in the grouping and search of '
'available geographic data sets.')
spatial_representation_type_help_text = _(
'method used to represent geographic information in the dataset.')
temporal_extent_start_help_text = _(
'time period covered by the content of the dataset (start)')
temporal_extent_end_help_text = _(
'time period covered by the content of the dataset (end)')
data_quality_statement_help_text = _(
'general explanation of the data producer\'s knowledge about the lineage of a'
' dataset')
# internal fields
uuid = models.CharField(max_length=36)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name='owned_resource',
verbose_name=_("Owner"))
contacts = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through='ContactRole')
title = models.CharField(_('title'), max_length=255, help_text=_(
'name by which the cited resource is known'))
alternate = models.CharField(max_length=128, null=True, blank=True)
date = models.DateTimeField(
_('date'),
default=datetime.datetime.now,
help_text=date_help_text)
date_type = models.CharField(
_('date type'),
max_length=255,
choices=VALID_DATE_TYPES,
default='publication',
help_text=date_type_help_text)
edition = models.CharField(
_('edition'),
max_length=255,
blank=True,
null=True,
help_text=edition_help_text)
abstract = models.TextField(
_('abstract'),
max_length=2000,
blank=True,
help_text=abstract_help_text)
purpose = models.TextField(
_('purpose'),
max_length=500,
null=True,
blank=True,
help_text=purpose_help_text)
maintenance_frequency = models.CharField(
_('maintenance frequency'),
max_length=255,
choices=UPDATE_FREQUENCIES,
blank=True,
null=True,
help_text=maintenance_frequency_help_text)
keywords = TaggableManager(
_('keywords'),
through=TaggedContentItem,
blank=True,
help_text=keywords_help_text,
manager=_HierarchicalTagManager)
tkeywords = models.ManyToManyField(
ThesaurusKeyword,
help_text=tkeywords_help_text,
blank=True)
regions = models.ManyToManyField(
Region,
verbose_name=_('keywords region'),
blank=True,
help_text=regions_help_text)
restriction_code_type = models.ForeignKey(
RestrictionCodeType,
verbose_name=_('restrictions'),
help_text=restriction_code_type_help_text,
null=True,
blank=True,
limit_choices_to=Q(
is_choice=True))
constraints_other = models.TextField(
_('restrictions other'),
blank=True,
null=True,
help_text=constraints_other_help_text)
license = models.ForeignKey(License, null=True, blank=True,
verbose_name=_("License"),
help_text=license_help_text)
language = models.CharField(
_('language'),
max_length=3,
choices=ALL_LANGUAGES,
default='eng',
help_text=language_help_text)
category = models.ForeignKey(
TopicCategory,
null=True,
blank=True,
limit_choices_to=Q(
is_choice=True),
help_text=category_help_text)
spatial_representation_type = models.ForeignKey(
SpatialRepresentationType,
null=True,
blank=True,
limit_choices_to=Q(
is_choice=True),
verbose_name=_("spatial representation type"),
help_text=spatial_representation_type_help_text)
# Section 5
temporal_extent_start = models.DateTimeField(
_('temporal extent start'),
blank=True,
null=True,
help_text=temporal_extent_start_help_text)
temporal_extent_end = models.DateTimeField(
_('temporal extent end'),
blank=True,
null=True,
help_text=temporal_extent_end_help_text)
supplemental_information = models.TextField(
_('supplemental information'),
max_length=2000,
default=DEFAULT_SUPPLEMENTAL_INFORMATION,
help_text=_('any other descriptive information about the dataset'))
# Section 8
data_quality_statement = models.TextField(
_('data quality statement'),
max_length=2000,
blank=True,
null=True,
help_text=data_quality_statement_help_text)
group = models.ForeignKey(Group, null=True, blank=True)
# Section 9
# see metadata_author property definition below
# Save bbox values in the database.
# This is useful for spatial searches and for generating thumbnail images
# and metadata records.
bbox_x0 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_x1 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_y0 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_y1 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
srid = models.CharField(max_length=255, default='EPSG:4326')
# CSW specific fields
csw_typename = models.CharField(
_('CSW typename'),
max_length=32,
default='gmd:MD_Metadata',
null=False)
csw_schema = models.CharField(_('CSW schema'),
max_length=64,
default='http://www.isotc211.org/2005/gmd',
null=False)
csw_mdsource = models.CharField(
_('CSW source'),
max_length=256,
default='local',
null=False)
csw_insert_date = models.DateTimeField(
_('CSW insert date'), auto_now_add=True, null=True)
csw_type = models.CharField(
_('CSW type'),
max_length=32,
default='dataset',
null=False,
choices=HIERARCHY_LEVELS)
csw_anytext = models.TextField(_('CSW anytext'), null=True, blank=True)
csw_wkt_geometry = models.TextField(
_('CSW WKT geometry'),
null=False,
default='POLYGON((-180 -90,-180 90,180 90,180 -90,-180 -90))')
# metadata XML specific fields
metadata_uploaded = models.BooleanField(default=False)
metadata_uploaded_preserve = models.BooleanField(default=False)
metadata_xml = models.TextField(
null=True,
default='<gmd:MD_Metadata xmlns:gmd="http://www.isotc211.org/2005/gmd"/>',
blank=True)
popular_count = models.IntegerField(default=0)
share_count = models.IntegerField(default=0)
featured = models.BooleanField(_("Featured"), default=False, help_text=_(
'Should this resource be advertised in home page?'))
is_published = models.BooleanField(
_("Is Published"),
default=True,
help_text=_('Should this resource be published and searchable?'))
is_approved = models.BooleanField(
_("Approved"),
default=False,
help_text=_('Is this resource validated from a publisher or editor?'))
# fields necessary for the apis
thumbnail_url = models.TextField(null=True, blank=True)
detail_url = models.CharField(max_length=255, null=True, blank=True)
rating = models.IntegerField(default=0, null=True, blank=True)
def __unicode__(self):
return self.title
@property
def group_name(self):
if self.group:
return str(self.group)
return None
@property
def bbox(self):
"""BBOX is in the format: [x0,x1,y0,y1]."""
return [
self.bbox_x0,
self.bbox_x1,
self.bbox_y0,
self.bbox_y1,
self.srid]
@property
def bbox_string(self):
"""BBOX is in the format: [x0,y0,x1,y1]."""
return ",".join([str(self.bbox_x0), str(self.bbox_y0),
str(self.bbox_x1), str(self.bbox_y1)])
@property
def geographic_bounding_box(self):
"""BBOX is in the format: [x0,x1,y0,y1]."""
return bbox_to_wkt(
self.bbox_x0,
self.bbox_x1,
self.bbox_y0,
self.bbox_y1,
srid=self.srid)
@property
def license_light(self):
a = []
if not self.license:
return ''
if (not (self.license.name is None)) and (len(self.license.name) > 0):
a.append(self.license.name)
if (not (self.license.url is None)) and (len(self.license.url) > 0):
a.append("(" + self.license.url + ")")
return " ".join(a)
@property
def license_verbose(self):
a = []
if (not (self.license.name_long is None)) and (
len(self.license.name_long) > 0):
a.append(self.license.name_long + ":")
if (not (self.license.description is None)) and (
len(self.license.description) > 0):
a.append(self.license.description)
if (not (self.license.url is None)) and (len(self.license.url) > 0):
a.append("(" + self.license.url + ")")
return " ".join(a)
@property
def metadata_completeness(self):
required_fields = [
'abstract',
'category',
'data_quality_statement',
'date',
'date_type',
'language',
'license',
'regions',
'title']
if self.restriction_code_type == 'otherRestrictions':
required_fields.append('constraints_other')
filled_fields = []
for required_field in required_fields:
field = getattr(self, required_field, None)
if field:
if required_field is 'license':
if field.name is 'Not Specified':
continue
if required_field is 'regions':
if not field.all():
continue
if required_field is 'category':
if not field.identifier:
continue
filled_fields.append(field)
return '{}%'.format(len(filled_fields) * 100 / len(required_fields))
def keyword_list(self):
return [kw.name for kw in self.keywords.all()]
def keyword_slug_list(self):
return [kw.slug for kw in self.keywords.all()]
def region_name_list(self):
return [region.name for region in self.regions.all()]
def spatial_representation_type_string(self):
if hasattr(self.spatial_representation_type, 'identifier'):
return self.spatial_representation_type.identifier
else:
if hasattr(self, 'storeType'):
if self.storeType == 'coverageStore':
return 'grid'
return 'vector'
else:
return None
@property
def keyword_csv(self):
keywords_qs = self.get_real_instance().keywords.all()
if keywords_qs:
return ','.join([kw.name for kw in keywords_qs])
else:
return ''
def set_latlon_bounds(self, box):
"""
Set the four bounds in lat lon projection
"""
self.bbox_x0 = box[0]
self.bbox_x1 = box[1]
self.bbox_y0 = box[2]
self.bbox_y1 = box[3]
def set_bounds_from_center_and_zoom(self, center_x, center_y, zoom):
"""
Calculate zoom level and center coordinates in mercator.
"""
self.center_x = center_x
self.center_y = center_y
self.zoom = zoom
deg_len_equator = 40075160 / 360
# covert center in lat lon
def get_lon_lat():
wgs84 = Proj(init='epsg:4326')
mercator = Proj(init='epsg:3857')
lon, lat = transform(mercator, wgs84, center_x, center_y)
return lon, lat
# calculate the degree length at this latitude
def deg_len():
lon, lat = get_lon_lat()
return math.cos(lat) * deg_len_equator
lon, lat = get_lon_lat()
# taken from http://wiki.openstreetmap.org/wiki/Zoom_levels
# it might be not precise but enough for the purpose
distance_per_pixel = 40075160 * math.cos(lat) / 2**(zoom + 8)
# calculate the distance from the center of the map in degrees
# we use the calculated degree length on the x axis and the
# normal degree length on the y axis assumin that it does not change
# Assuming a map of 1000 px of width and 700 px of height
distance_x_degrees = distance_per_pixel * 500 / deg_len()
distance_y_degrees = distance_per_pixel * 350 / deg_len_equator
self.bbox_x0 = lon - distance_x_degrees
self.bbox_x1 = lon + distance_x_degrees
self.bbox_y0 = lat - distance_y_degrees
self.bbox_y1 = lat + distance_y_degrees
def set_bounds_from_bbox(self, bbox):
"""
Calculate zoom level and center coordinates in mercator.
:param bbox: BBOX is in the format: [x0, x1, y0, y1], which is:
[min lon, max lon, min lat, max lat] or
[xmin, xmax, ymin, ymax]
:type bbox: list
"""
self.set_latlon_bounds(bbox)
minx, maxx, miny, maxy = [float(c) for c in bbox]
x = (minx + maxx) / 2
y = (miny + maxy) / 2
(center_x, center_y) = forward_mercator((x, y))
xdiff = maxx - minx
ydiff = maxy - miny
zoom = 0
if xdiff > 0 and ydiff > 0:
width_zoom = math.log(360 / xdiff, 2)
height_zoom = math.log(360 / ydiff, 2)
zoom = math.ceil(min(width_zoom, height_zoom))
self.zoom = zoom
self.center_x = center_x
self.center_y = center_y
def download_links(self):
"""assemble download links for pycsw"""
links = []
for url in self.link_set.all():
if url.link_type == 'metadata': # avoid recursion
continue
if url.link_type == 'html':
links.append(
(self.title,
'Web address (URL)',
'WWW:LINK-1.0-http--link',
url.url))
elif url.link_type in ('OGC:WMS', 'OGC:WFS', 'OGC:WCS'):
links.append((self.title, url.name, url.link_type, url.url))
else:
description = '%s (%s Format)' % (self.title, url.name)
links.append(
(self.title,
description,
'WWW:DOWNLOAD-1.0-http--download',
url.url))
return links
def get_tiles_url(self):
"""Return URL for Z/Y/X mapping clients or None if it does not exist.
"""
try:
tiles_link = self.link_set.get(name='Tiles')
except Link.DoesNotExist:
return None
else:
return tiles_link.url
def get_legend(self):
"""Return Link for legend or None if it does not exist.
"""
try:
legends_link = self.link_set.get(name='Legend')
except Link.DoesNotExist:
return None
except Link.MultipleObjectsReturned:
return None
else:
return legends_link
def get_legend_url(self):
"""Return URL for legend or None if it does not exist.
The legend can be either an image (for Geoserver's WMS)
or a JSON object for ArcGIS.
"""
legend = self.get_legend()
if legend is None:
return None
return legend.url
def get_ows_url(self):
"""Return URL for OGC WMS server None if it does not exist.
"""
try:
ows_link = self.link_set.get(name='OGC:WMS')
except Link.DoesNotExist:
return None
else:
return ows_link.url
def get_thumbnail_url(self):
"""Return a thumbnail url.
It could be a local one if it exists, a remote one (WMS GetImage) for example
or a 'Missing Thumbnail' one.
"""
local_thumbnails = self.link_set.filter(name='Thumbnail')
if local_thumbnails.count() > 0:
return local_thumbnails[0].url
remote_thumbnails = self.link_set.filter(name='Remote Thumbnail')
if remote_thumbnails.count() > 0:
return remote_thumbnails[0].url
return staticfiles.static(settings.MISSING_THUMBNAIL)
def has_thumbnail(self):
"""Determine if the thumbnail object exists and an image exists"""
return self.link_set.filter(name='Thumbnail').exists()
def save_thumbnail(self, filename, image):
upload_to = 'thumbs/'
upload_path = os.path.join('thumbs/', filename)
try:
if storage.exists(upload_path):
# Delete if exists otherwise the (FileSystemStorage) implementation
# will create a new file with a unique name
storage.delete(os.path.join(upload_path))
storage.save(upload_path, ContentFile(image))
url_path = os.path.join(
settings.MEDIA_URL,
upload_to,
filename).replace(
'\\',
'/')
url = urljoin(settings.SITEURL, url_path)
Link.objects.get_or_create(resource=self,
url=url,
defaults=dict(
name='Thumbnail',
extension='png',
mime='image/png',
link_type='image',
))
ResourceBase.objects.filter(id=self.id).update(
thumbnail_url=url
)
except Exception:
logger.error(
'Error when generating the thumbnail for resource %s.' %
self.id)
logger.error('Check permissions for file %s.' % upload_path)
def set_missing_info(self):
"""Set default permissions and point of contacts.
It is mandatory to call it from descendant classes
but hard to enforce technically via signals or save overriding.
"""
from guardian.models import UserObjectPermission
logger.debug('Checking for permissions.')
# True if every key in the get_all_level_info dict is empty.
no_custom_permissions = UserObjectPermission.objects.filter(
content_type=ContentType.objects.get_for_model(
self.get_self_resource()), object_pk=str(
self.pk)).exists()
if not no_custom_permissions:
logger.debug(
'There are no permissions for this object, setting default perms.')
self.set_default_permissions()
user = None
if self.owner:
user = self.owner
else:
try:
user = ResourceBase.objects.admin_contact().user
except BaseException:
pass
if user:
if self.poc is None:
self.poc = user
if self.metadata_author is None:
self.metadata_author = user
def maintenance_frequency_title(self):
return [v for i, v in enumerate(
UPDATE_FREQUENCIES) if v[0] == self.maintenance_frequency][0][1].title()
def language_title(self):
return [v for i, v in enumerate(
ALL_LANGUAGES) if v[0] == self.language][0][1].title()
def _set_poc(self, poc):
# reset any poc assignation to this resource
ContactRole.objects.filter(
role='pointOfContact',
resource=self).delete()
# create the new assignation
ContactRole.objects.create(
role='pointOfContact',
resource=self,
contact=poc)
def _get_poc(self):
try:
the_poc = ContactRole.objects.get(
role='pointOfContact', resource=self).contact
except ContactRole.DoesNotExist:
the_poc = None
return the_poc
poc = property(_get_poc, _set_poc)
def _set_metadata_author(self, metadata_author):
# reset any metadata_author assignation to this resource
ContactRole.objects.filter(role='author', resource=self).delete()
# create the new assignation
ContactRole.objects.create(
role='author',
resource=self,
contact=metadata_author)
def _get_metadata_author(self):
try:
the_ma = ContactRole.objects.get(
role='author', resource=self).contact
except ContactRole.DoesNotExist:
the_ma = None
return the_ma
def handle_moderated_uploads(self):
if settings.ADMIN_MODERATE_UPLOADS:
self.is_published = False
metadata_author = property(_get_metadata_author, _set_metadata_author)
objects = ResourceBaseManager()
class Meta:
# custom permissions,
# add, change and delete are standard in django-guardian
permissions = (
('view_resourcebase', 'Can view resource'),
('change_resourcebase_permissions', 'Can change resource permissions'),
('download_resourcebase', 'Can download resource'),
('publish_resourcebase', 'Can publish resource'),
('change_resourcebase_metadata', 'Can change resource metadata'),
)
class LinkManager(models.Manager):
"""Helper class to access links grouped by type
"""
def data(self):
return self.get_queryset().filter(link_type='data')
def image(self):
return self.get_queryset().filter(link_type='image')
def download(self):
return self.get_queryset().filter(link_type__in=['image', 'data'])
def metadata(self):
return self.get_queryset().filter(link_type='metadata')
def original(self):
return self.get_queryset().filter(link_type='original')
def geogig(self):
return self.get_queryset().filter(name__icontains='geogig')
def ows(self):
return self.get_queryset().filter(
link_type__in=['OGC:WMS', 'OGC:WFS', 'OGC:WCS'])
class Link(models.Model):
"""Auxiliary model for storing links for resources.
This helps avoiding the need for runtime lookups
to the OWS server or the CSW Catalogue.
There are four types of links:
* original: For uploaded files (Shapefiles or GeoTIFFs)
* data: For WFS and WCS links that allow access to raw data
* image: For WMS and TMS links
* metadata: For CSW links
* OGC:WMS: for WMS service links
* OGC:WFS: for WFS service links
* OGC:WCS: for WCS service links
"""
resource = models.ForeignKey(ResourceBase, blank=True, null=True)
extension = models.CharField(
max_length=255,
help_text=_('For example "kml"'))
link_type = models.CharField(
max_length=255, choices=[
(x, x) for x in LINK_TYPES])
name = models.CharField(max_length=255, help_text=_(
'For example "View in Google Earth"'))
mime = models.CharField(max_length=255,
help_text=_('For example "text/xml"'))
url = models.TextField(max_length=1000)
objects = LinkManager()
def __str__(self):
return '%s link' % self.link_type
def resourcebase_post_save(instance, *args, **kwargs):
"""
Used to fill any additional fields after the save.
Has to be called by the children
"""
# we need to remove stale links
for link in instance.link_set.all():
if link.name == "External Document":
if link.resource.doc_url != link.url:
link.delete()
else:
if urlsplit(settings.SITEURL).hostname not in link.url:
link.delete()
try:
ResourceBase.objects.filter(id=instance.id).update(
thumbnail_url=instance.get_thumbnail_url(),
detail_url=instance.get_absolute_url(),
csw_insert_date=datetime.datetime.now())
except BaseException:
pass
try:
instance.thumbnail_url = instance.get_thumbnail_url()
instance.detail_url = instance.get_absolute_url()
instance.csw_insert_date = datetime.datetime.now()
finally:
instance.set_missing_info()
try:
if instance.regions and instance.regions.all():
"""
try:
queryset = instance.regions.all().order_by('name')
for region in queryset:
print ("%s : %s" % (region.name, region.geographic_bounding_box))
except:
tb = traceback.format_exc()
else:
tb = None
finally:
if tb:
logger.debug(tb)
"""
pass
else:
srid1, wkt1 = instance.geographic_bounding_box.split(";")
srid1 = re.findall(r'\d+', srid1)
poly1 = GEOSGeometry(wkt1, srid=int(srid1[0]))
poly1.transform(4326)
queryset = Region.objects.all().order_by('name')
global_regions = []
regions_to_add = []
for region in queryset:
try:
srid2, wkt2 = region.geographic_bounding_box.split(";")
srid2 = re.findall(r'\d+', srid2)
poly2 = GEOSGeometry(wkt2, srid=int(srid2[0]))
poly2.transform(4326)
if poly2.intersection(poly1):
regions_to_add.append(region)
if region.level == 0 and region.parent is None:
global_regions.append(region)
except BaseException:
tb = traceback.format_exc()
if tb:
logger.debug(tb)
if regions_to_add or global_regions:
if regions_to_add and len(
regions_to_add) > 0 and len(regions_to_add) <= 30:
instance.regions.add(*regions_to_add)
else:
instance.regions.add(*global_regions)
except BaseException:
tb = traceback.format_exc()
if tb:
logger.debug(tb)
# set default License if no specified
if instance.license is None:
no_license = License.objects.filter(name="Not Specified")
if no_license and len(no_license) > 0:
instance.license = no_license[0]
def rating_post_save(instance, *args, **kwargs):
"""
Used to fill the average rating field on OverallRating change.
"""
ResourceBase.objects.filter(
id=instance.object_id).update(
rating=instance.rating)
signals.post_save.connect(rating_post_save, sender=OverallRating)
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def do_login(sender, user, request, **kwargs):
"""
Take action on user login. Generate a new user access_token to be shared
with GeoServer, and store it into the request.session
"""
if user and user.is_authenticated():
token = None
try:
Application = get_application_model()
app = Application.objects.get(name="GeoServer")
# Lets create a new one
token = generate_token()
AccessToken.objects.get_or_create(
user=user,
application=app,
expires=datetime.datetime.now() +
datetime.timedelta(
days=1),
token=token)
except BaseException:
u = uuid.uuid1()
token = u.hex
# Do GeoServer Login
url = "%s%s?access_token=%s" % (settings.OGC_SERVER['default']['PUBLIC_LOCATION'],
'ows?service=wms&version=1.3.0&request=GetCapabilities',
token)
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
jsessionid = None
try:
opener.open(url)
for c in cj:
if c.name == "JSESSIONID":
jsessionid = c.value
except BaseException:
u = uuid.uuid1()
jsessionid = u.hex
request.session['access_token'] = token
request.session['JSESSIONID'] = jsessionid
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def do_logout(sender, user, request, **kwargs):
"""
Take action on user logout. Cleanup user access_token and send logout
request to GeoServer
"""
if 'access_token' in request.session:
try:
Application = get_application_model()
app = Application.objects.get(name="GeoServer")
# Lets delete the old one
try:
old = AccessToken.objects.get(user=user, application=app)
except BaseException:
pass
else:
old.delete()
except BaseException:
pass
# Do GeoServer Logout
if 'access_token' in request.session:
access_token = request.session['access_token']
else:
access_token = None
if access_token:
url = "%s%s?access_token=%s" % (settings.OGC_SERVER['default']['PUBLIC_LOCATION'],
settings.OGC_SERVER['default']['LOGOUT_ENDPOINT'],
access_token)
header_params = {
"Authorization": ("Bearer %s" % access_token)
}
else:
url = "%s%s" % (settings.OGC_SERVER['default']['PUBLIC_LOCATION'],
settings.OGC_SERVER['default']['LOGOUT_ENDPOINT'])
param = {}
data = urllib.urlencode(param)
cookies = None
for cook in request.COOKIES:
name = str(cook)
value = request.COOKIES.get(name)
if name == 'csrftoken':
header_params['X-CSRFToken'] = value
cook = "%s=%s" % (name, value)
if not cookies:
cookies = cook
else:
cookies = cookies + '; ' + cook
if cookies:
if 'JSESSIONID' in request.session and request.session['JSESSIONID']:
cookies = cookies + '; JSESSIONID=' + \
request.session['JSESSIONID']
header_params['Cookie'] = cookies
gs_request = urllib2.Request(url, data, header_params)
try:
urllib2.urlopen(gs_request)
except BaseException:
tb = traceback.format_exc()
if tb:
logger.debug(tb)
if 'access_token' in request.session:
del request.session['access_token']
request.session.modified = True
user_logged_in.connect(do_login)
user_logged_out.connect(do_logout)
|
gpl-3.0
| -7,080,191,159,422,798,000 | 32.661664 | 101 | 0.582212 | false | 4.125397 | false | false | false |
anish/buildbot
|
master/buildbot/steps/source/gitlab.py
|
1
|
2149
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.python import log
from buildbot.steps.source.git import Git
class GitLab(Git):
"""
Source step that knows how to handle merge requests from
the GitLab change source
"""
def startVC(self, branch, revision, patch):
# If this is a merge request:
if self.build.hasProperty("target_branch"):
target_repourl = self.build.getProperty("target_git_ssh_url", None)
if self.repourl != target_repourl:
log.msg("GitLab.startVC: note: GitLab step for merge requests"
" should probably have repourl='%s' instead of '%s'?" %
(target_repourl, self.repourl))
# This step is (probably) configured to fetch the target
# branch of a merge (because it is impractical for users to
# configure one builder for each of the infinite number of
# possible source branches for merge requests).
# Point instead to the source being proposed for merge.
branch = self.build.getProperty("source_branch", None)
# FIXME: layering violation, should not be modifying self here?
self.repourl = self.build.getProperty("source_git_ssh_url", None)
# The revision is unlikely to exist in the repo already,
# so tell Git to not check.
revision = None
super(GitLab, self).startVC(branch, revision, patch)
|
gpl-2.0
| -870,936,740,008,805,600 | 43.770833 | 79 | 0.670079 | false | 4.367886 | false | false | false |
parasgithub/PrairieLearn
|
elements/pl-symbolic-input/pl-symbolic-input.py
|
1
|
17204
|
import prairielearn as pl
import lxml.html
from html import escape
import chevron
import sympy
import random
import math
import python_helper_sympy as phs
def get_variables_list(variables_string):
if variables_string is not None:
variables_list = [variable.strip() for variable in variables_string.split(',')]
return variables_list
else:
return []
def prepare(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
required_attribs = ['answers-name']
optional_attribs = ['weight', 'correct-answer', 'variables', 'label', 'display', 'allow-complex', 'imaginary-unit-for-display']
pl.check_attribs(element, required_attribs, optional_attribs)
name = pl.get_string_attrib(element, 'answers-name')
correct_answer = pl.get_string_attrib(element, 'correct-answer', None)
if correct_answer is not None:
if name in data['correct-answers']:
raise Exception('duplicate correct-answers variable name: %s' % name)
data['correct-answers'][name] = correct_answer
imaginary_unit = pl.get_string_attrib(element, 'imaginary-unit-for-display', 'i')
if not (imaginary_unit == 'i' or imaginary_unit == 'j'):
raise Exception('imaginary-unit-for-display must be either i or j')
def render(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
label = pl.get_string_attrib(element, 'label', None)
variables_string = pl.get_string_attrib(element, 'variables', None)
variables = get_variables_list(variables_string)
display = pl.get_string_attrib(element, 'display', 'inline')
allow_complex = pl.get_boolean_attrib(element, 'allow-complex', False)
imaginary_unit = pl.get_string_attrib(element, 'imaginary-unit-for-display', 'i')
if data['panel'] == 'question':
editable = data['editable']
raw_submitted_answer = data['raw_submitted_answers'].get(name, None)
operators = ', '.join(['cos', 'sin', 'tan', 'exp', 'log', 'sqrt', '( )', '+', '-', '*', '/', '^', '**'])
constants = ', '.join(['pi, e'])
info_params = {
'format': True,
'variables': variables_string,
'operators': operators,
'constants': constants,
'allow_complex': allow_complex,
}
with open('pl-symbolic-input.mustache', 'r', encoding='utf-8') as f:
info = chevron.render(f, info_params).strip()
with open('pl-symbolic-input.mustache', 'r', encoding='utf-8') as f:
info_params.pop('format', None)
info_params['shortformat'] = True
shortinfo = chevron.render(f, info_params).strip()
html_params = {
'question': True,
'name': name,
'label': label,
'editable': editable,
'info': info,
'shortinfo': shortinfo,
'uuid': pl.get_uuid(),
'allow_complex': allow_complex,
}
partial_score = data['partial_scores'].get(name, {'score': None})
score = partial_score.get('score', None)
if score is not None:
try:
score = float(score)
if score >= 1:
html_params['correct'] = True
elif score > 0:
html_params['partial'] = math.floor(score * 100)
else:
html_params['incorrect'] = True
except Exception:
raise ValueError('invalid score' + score)
if display == 'inline':
html_params['inline'] = True
elif display == 'block':
html_params['block'] = True
else:
raise ValueError('method of display "%s" is not valid (must be "inline" or "block")' % display)
if raw_submitted_answer is not None:
html_params['raw_submitted_answer'] = escape(raw_submitted_answer)
with open('pl-symbolic-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
elif data['panel'] == 'submission':
parse_error = data['format_errors'].get(name, None)
html_params = {
'submission': True,
'label': label,
'parse_error': parse_error,
'uuid': pl.get_uuid()
}
if parse_error is None:
a_sub = data['submitted_answers'][name]
if isinstance(a_sub, str):
# this is for backward-compatibility
a_sub = phs.convert_string_to_sympy(a_sub, variables, allow_complex=allow_complex)
else:
a_sub = phs.json_to_sympy(a_sub, allow_complex=allow_complex)
a_sub = a_sub.subs(sympy.I, sympy.Symbol(imaginary_unit))
html_params['a_sub'] = sympy.latex(a_sub)
else:
raw_submitted_answer = data['raw_submitted_answers'].get(name, None)
if raw_submitted_answer is not None:
html_params['raw_submitted_answer'] = escape(raw_submitted_answer)
partial_score = data['partial_scores'].get(name, {'score': None})
score = partial_score.get('score', None)
if score is not None:
try:
score = float(score)
if score >= 1:
html_params['correct'] = True
elif score > 0:
html_params['partial'] = math.floor(score * 100)
else:
html_params['incorrect'] = True
except Exception:
raise ValueError('invalid score' + score)
if display == 'inline':
html_params['inline'] = True
elif display == 'block':
html_params['block'] = True
else:
raise ValueError('method of display "%s" is not valid (must be "inline" or "block")' % display)
with open('pl-symbolic-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
elif data['panel'] == 'answer':
a_tru = data['correct_answers'].get(name, None)
if a_tru is not None:
if isinstance(a_tru, str):
# this is so instructors can specify the true answer simply as a string
a_tru = phs.convert_string_to_sympy(a_tru, variables, allow_complex=allow_complex)
else:
a_tru = phs.json_to_sympy(a_tru, allow_complex=allow_complex)
a_tru = a_tru.subs(sympy.I, sympy.Symbol(imaginary_unit))
html_params = {
'answer': True,
'label': label,
'a_tru': sympy.latex(a_tru)
}
with open('pl-symbolic-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
else:
html = ''
else:
raise Exception('Invalid panel type: %s' % data['panel'])
return html
def parse(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
variables = get_variables_list(pl.get_string_attrib(element, 'variables', None))
allow_complex = pl.get_boolean_attrib(element, 'allow-complex', False)
imaginary_unit = pl.get_string_attrib(element, 'imaginary-unit-for-display', 'i')
# Get submitted answer or return parse_error if it does not exist
a_sub = data['submitted_answers'].get(name, None)
if not a_sub:
data['format_errors'][name] = 'No submitted answer.'
data['submitted_answers'][name] = None
return
# Parse the submitted answer and put the result in a string
try:
# Replace '^' with '**' wherever it appears. In MATLAB, either can be used
# for exponentiation. In python, only the latter can be used.
a_sub = a_sub.replace('^', '**')
# Strip whitespace
a_sub = a_sub.strip()
# Convert safely to sympy
a_sub_parsed = phs.convert_string_to_sympy(a_sub, variables, allow_complex=allow_complex)
# If complex numbers are not allowed, raise error if expression has the imaginary unit
if (not allow_complex) and (a_sub_parsed.has(sympy.I)):
a_sub_parsed = a_sub_parsed.subs(sympy.I, sympy.Symbol(imaginary_unit))
s = 'Your answer was simplified to this, which contains a complex number (denoted ${:s}$): $${:s}$$'.format(imaginary_unit, sympy.latex(a_sub_parsed))
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
# Store result as json.
a_sub_json = phs.sympy_to_json(a_sub_parsed, allow_complex=allow_complex)
except phs.HasFloatError as err:
s = 'Your answer contains the floating-point number ' + str(err.n) + '. '
s += 'All numbers must be expressed as integers (or ratios of integers). '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasComplexError as err:
s = 'Your answer contains the complex number ' + str(err.n) + '. '
s += 'All numbers must be expressed as integers (or ratios of integers). '
if allow_complex:
s += 'To include a complex number in your expression, write it as the product of an integer with the imaginary unit <code>i</code> or <code>j</code>. '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasInvalidExpressionError as err:
s = 'Your answer has an invalid expression. '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasInvalidFunctionError as err:
s = 'Your answer calls an invalid function "' + err.text + '". '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasInvalidVariableError as err:
s = 'Your answer refers to an invalid variable "' + err.text + '". '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasParseError as err:
s = 'Your answer has a syntax error. '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasEscapeError as err:
s = 'Your answer must not contain the character "\\". '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasCommentError as err:
s = 'Your answer must not contain the character "#". '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except Exception:
data['format_errors'][name] = 'Invalid format.'
data['submitted_answers'][name] = None
return
# Make sure we can parse the json again
try:
# Convert safely to sympy
phs.json_to_sympy(a_sub_json, allow_complex=allow_complex)
# Finally, store the result
data['submitted_answers'][name] = a_sub_json
except Exception:
s = 'Your answer was simplified to this, which contains an invalid expression: $${:s}$$'.format(sympy.latex(a_sub_parsed))
data['format_errors'][name] = s
data['submitted_answers'][name] = None
def grade(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
variables = get_variables_list(pl.get_string_attrib(element, 'variables', None))
allow_complex = pl.get_boolean_attrib(element, 'allow-complex', False)
weight = pl.get_integer_attrib(element, 'weight', 1)
# Get true answer (if it does not exist, create no grade - leave it
# up to the question code)
a_tru = data['correct_answers'].get(name, None)
if a_tru is None:
return
# Get submitted answer (if it does not exist, score is zero)
a_sub = data['submitted_answers'].get(name, None)
if a_sub is None:
data['partial_scores'][name] = {'score': 0, 'weight': weight}
return
# Parse true answer
if isinstance(a_tru, str):
# this is so instructors can specify the true answer simply as a string
a_tru = phs.convert_string_to_sympy(a_tru, variables, allow_complex=allow_complex)
else:
a_tru = phs.json_to_sympy(a_tru, allow_complex=allow_complex)
# Parse submitted answer
if isinstance(a_sub, str):
# this is for backward-compatibility
a_sub = phs.convert_string_to_sympy(a_sub, variables, allow_complex=allow_complex)
else:
a_sub = phs.json_to_sympy(a_sub, allow_complex=allow_complex)
# Check equality
correct = a_tru.equals(a_sub)
if correct:
data['partial_scores'][name] = {'score': 1, 'weight': weight}
else:
data['partial_scores'][name] = {'score': 0, 'weight': weight}
def test(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
weight = pl.get_integer_attrib(element, 'weight', 1)
result = random.choices(['correct', 'incorrect', 'invalid'], [5, 5, 1])[0]
if result == 'correct':
data['raw_submitted_answers'][name] = str(pl.from_json(data['correct_answers'][name]))
data['partial_scores'][name] = {'score': 1, 'weight': weight}
elif result == 'incorrect':
data['raw_submitted_answers'][name] = str(pl.from_json(data['correct_answers'][name])) + ' + {:d}'.format(random.randint(1, 100))
data['partial_scores'][name] = {'score': 0, 'weight': weight}
elif result == 'invalid':
invalid_type = random.choice(['float', 'complex', 'expression', 'function', 'variable', 'syntax', 'escape', 'comment'])
if invalid_type == 'float':
data['raw_submitted_answers'][name] = 'x + 1.234'
s = 'Your answer contains the floating-point number ' + str(1.234) + '. '
s += 'All numbers must be expressed as integers (or ratios of integers). '
s += '<br><br><pre>' + phs.point_to_error('x + 1.234', 4) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'complex':
data['raw_submitted_answers'][name] = 'x + (1+2j)'
s = 'Your answer contains the complex number ' + str(2j) + '. '
s += 'All numbers must be expressed as integers (or ratios of integers). '
s += '<br><br><pre>' + phs.point_to_error('x + (1+2j)', 7) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'expression':
data['raw_submitted_answers'][name] = '1 and 0'
s = 'Your answer has an invalid expression. '
s += '<br><br><pre>' + phs.point_to_error('1 and 0', 0) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'function':
data['raw_submitted_answers'][name] = 'atan(x)'
s = 'Your answer calls an invalid function "' + 'atan' + '". '
s += '<br><br><pre>' + phs.point_to_error('atan(x)', 0) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'variable':
data['raw_submitted_answers'][name] = 'x + y'
s = 'Your answer refers to an invalid variable "' + 'y' + '". '
s += '<br><br><pre>' + phs.point_to_error('x + y', 4) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'syntax':
data['raw_submitted_answers'][name] = 'x +* 1'
s = 'Your answer has a syntax error. '
s += '<br><br><pre>' + phs.point_to_error('x +* 1', 4) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'escape':
data['raw_submitted_answers'][name] = 'x + 1\\n'
s = 'Your answer must not contain the character "\\". '
s += '<br><br><pre>' + phs.point_to_error('x + 1\\n', 5) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'comment':
data['raw_submitted_answers'][name] = 'x # some text'
s = 'Your answer must not contain the character "#". '
s += '<br><br><pre>' + phs.point_to_error('x # some text', 2) + '</pre>'
data['format_errors'][name] = s
else:
raise Exception('invalid invalid_type: %s' % invalid_type)
else:
raise Exception('invalid result: %s' % result)
|
agpl-3.0
| 4,258,647,680,519,534,600 | 44.036649 | 163 | 0.575273 | false | 3.687889 | false | false | false |
kawamuray/ganeti
|
lib/utils/text.py
|
1
|
18178
|
#
#
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Utility functions for manipulating or working with text.
"""
import re
import os
import time
import collections
from ganeti import errors
from ganeti import compat
#: Unit checker regexp
_PARSEUNIT_REGEX = re.compile(r"^([.\d]+)\s*([a-zA-Z]+)?$")
#: Characters which don't need to be quoted for shell commands
_SHELL_UNQUOTED_RE = re.compile("^[-.,=:/_+@A-Za-z0-9]+$")
#: Shell param checker regexp
_SHELLPARAM_REGEX = re.compile(r"^[-a-zA-Z0-9._+/:%@]+$")
#: ASCII equivalent of unicode character 'HORIZONTAL ELLIPSIS' (U+2026)
_ASCII_ELLIPSIS = "..."
#: MAC address octet
_MAC_ADDR_OCTET_RE = r"[0-9a-f]{2}"
def MatchNameComponent(key, name_list, case_sensitive=True):
"""Try to match a name against a list.
This function will try to match a name like test1 against a list
like C{['test1.example.com', 'test2.example.com', ...]}. Against
this list, I{'test1'} as well as I{'test1.example'} will match, but
not I{'test1.ex'}. A multiple match will be considered as no match
at all (e.g. I{'test1'} against C{['test1.example.com',
'test1.example.org']}), except when the key fully matches an entry
(e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
@type key: str
@param key: the name to be searched
@type name_list: list
@param name_list: the list of strings against which to search the key
@type case_sensitive: boolean
@param case_sensitive: whether to provide a case-sensitive match
@rtype: None or str
@return: None if there is no match I{or} if there are multiple matches,
otherwise the element from the list which matches
"""
if key in name_list:
return key
re_flags = 0
if not case_sensitive:
re_flags |= re.IGNORECASE
key = key.upper()
name_re = re.compile(r"^%s(\..*)?$" % re.escape(key), re_flags)
names_filtered = []
string_matches = []
for name in name_list:
if name_re.match(name) is not None:
names_filtered.append(name)
if not case_sensitive and key == name.upper():
string_matches.append(name)
if len(string_matches) == 1:
return string_matches[0]
if len(names_filtered) == 1:
return names_filtered[0]
return None
def _DnsNameGlobHelper(match):
"""Helper function for L{DnsNameGlobPattern}.
Returns regular expression pattern for parts of the pattern.
"""
text = match.group(0)
if text == "*":
return "[^.]*"
elif text == "?":
return "[^.]"
else:
return re.escape(text)
def DnsNameGlobPattern(pattern):
"""Generates regular expression from DNS name globbing pattern.
A DNS name globbing pattern (e.g. C{*.site}) is converted to a regular
expression. Escape sequences or ranges (e.g. [a-z]) are not supported.
Matching always starts at the leftmost part. An asterisk (*) matches all
characters except the dot (.) separating DNS name parts. A question mark (?)
matches a single character except the dot (.).
@type pattern: string
@param pattern: DNS name globbing pattern
@rtype: string
@return: Regular expression
"""
return r"^%s(\..*)?$" % re.sub(r"\*|\?|[^*?]*", _DnsNameGlobHelper, pattern)
def FormatUnit(value, units, roman=False):
"""Formats an incoming number of MiB with the appropriate unit.
@type value: int
@param value: integer representing the value in MiB (1048576)
@type units: char
@param units: the type of formatting we should do:
- 'h' for automatic scaling
- 'm' for MiBs
- 'g' for GiBs
- 't' for TiBs
@rtype: str
@return: the formatted value (with suffix)
"""
if units not in ("m", "g", "t", "h"):
raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
suffix = ""
if units == "m" or (units == "h" and value < 1024):
if units == "h":
suffix = "M"
return "%s%s" % (compat.RomanOrRounded(value, 0, roman), suffix)
elif units == "g" or (units == "h" and value < (1024 * 1024)):
if units == "h":
suffix = "G"
return "%s%s" % (compat.RomanOrRounded(float(value) / 1024, 1, roman),
suffix)
else:
if units == "h":
suffix = "T"
return "%s%s" % (compat.RomanOrRounded(float(value) / 1024 / 1024, 1,
roman), suffix)
def ParseUnit(input_string):
"""Tries to extract number and scale from the given string.
Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
[UNIT]}. If no unit is specified, it defaults to MiB. Return value
is always an int in MiB.
"""
m = _PARSEUNIT_REGEX.match(str(input_string))
if not m:
raise errors.UnitParseError("Invalid format")
value = float(m.groups()[0])
unit = m.groups()[1]
if unit:
lcunit = unit.lower()
else:
lcunit = "m"
if lcunit in ("m", "mb", "mib"):
# Value already in MiB
pass
elif lcunit in ("g", "gb", "gib"):
value *= 1024
elif lcunit in ("t", "tb", "tib"):
value *= 1024 * 1024
else:
raise errors.UnitParseError("Unknown unit: %s" % unit)
# Make sure we round up
if int(value) < value:
value += 1
# Round up to the next multiple of 4
value = int(value)
if value % 4:
value += 4 - value % 4
return value
def ShellQuote(value):
"""Quotes shell argument according to POSIX.
@type value: str
@param value: the argument to be quoted
@rtype: str
@return: the quoted value
"""
if _SHELL_UNQUOTED_RE.match(value):
return value
else:
return "'%s'" % value.replace("'", "'\\''")
def ShellQuoteArgs(args):
"""Quotes a list of shell arguments.
@type args: list
@param args: list of arguments to be quoted
@rtype: str
@return: the quoted arguments concatenated with spaces
"""
return " ".join([ShellQuote(i) for i in args])
def ShellCombineCommands(cmdlist):
"""Out of a list of shell comands construct a single one.
"""
return ["/bin/sh", "-c", " && ".join(ShellQuoteArgs(c) for c in cmdlist)]
class ShellWriter:
"""Helper class to write scripts with indentation.
"""
INDENT_STR = " "
def __init__(self, fh, indent=True):
"""Initializes this class.
"""
self._fh = fh
self._indent_enabled = indent
self._indent = 0
def IncIndent(self):
"""Increase indentation level by 1.
"""
self._indent += 1
def DecIndent(self):
"""Decrease indentation level by 1.
"""
assert self._indent > 0
self._indent -= 1
def Write(self, txt, *args):
"""Write line to output file.
"""
assert self._indent >= 0
if args:
line = txt % args
else:
line = txt
if line and self._indent_enabled:
# Indent only if there's something on the line
self._fh.write(self._indent * self.INDENT_STR)
self._fh.write(line)
self._fh.write("\n")
def GenerateSecret(numbytes=20):
"""Generates a random secret.
This will generate a pseudo-random secret returning an hex string
(so that it can be used where an ASCII string is needed).
@param numbytes: the number of bytes which will be represented by the returned
string (defaulting to 20, the length of a SHA1 hash)
@rtype: str
@return: an hex representation of the pseudo-random sequence
"""
return os.urandom(numbytes).encode("hex")
def _MakeMacAddrRegexp(octets):
"""Builds a regular expression for verifying MAC addresses.
@type octets: integer
@param octets: How many octets to expect (1-6)
@return: Compiled regular expression
"""
assert octets > 0
assert octets <= 6
return re.compile("^%s$" % ":".join([_MAC_ADDR_OCTET_RE] * octets),
re.I)
#: Regular expression for full MAC address
_MAC_CHECK_RE = _MakeMacAddrRegexp(6)
#: Regular expression for half a MAC address
_MAC_PREFIX_CHECK_RE = _MakeMacAddrRegexp(3)
def _MacAddressCheck(check_re, mac, msg):
"""Checks a MAC address using a regular expression.
@param check_re: Compiled regular expression as returned by C{re.compile}
@type mac: string
@param mac: MAC address to be validated
@type msg: string
@param msg: Error message (%s will be replaced with MAC address)
"""
if check_re.match(mac):
return mac.lower()
raise errors.OpPrereqError(msg % mac, errors.ECODE_INVAL)
def NormalizeAndValidateMac(mac):
"""Normalizes and check if a MAC address is valid and contains six octets.
Checks whether the supplied MAC address is formally correct. Accepts
colon-separated format only. Normalize it to all lower case.
@type mac: string
@param mac: MAC address to be validated
@rtype: string
@return: Normalized and validated MAC address
@raise errors.OpPrereqError: If the MAC address isn't valid
"""
return _MacAddressCheck(_MAC_CHECK_RE, mac, "Invalid MAC address '%s'")
def NormalizeAndValidateThreeOctetMacPrefix(mac):
"""Normalizes a potential MAC address prefix (three octets).
Checks whether the supplied string is a valid MAC address prefix consisting
of three colon-separated octets. The result is normalized to all lower case.
@type mac: string
@param mac: Prefix to be validated
@rtype: string
@return: Normalized and validated prefix
@raise errors.OpPrereqError: If the MAC address prefix isn't valid
"""
return _MacAddressCheck(_MAC_PREFIX_CHECK_RE, mac,
"Invalid MAC address prefix '%s'")
def SafeEncode(text):
"""Return a 'safe' version of a source string.
This function mangles the input string and returns a version that
should be safe to display/encode as ASCII. To this end, we first
convert it to ASCII using the 'backslashreplace' encoding which
should get rid of any non-ASCII chars, and then we process it
through a loop copied from the string repr sources in the python; we
don't use string_escape anymore since that escape single quotes and
backslashes too, and that is too much; and that escaping is not
stable, i.e. string_escape(string_escape(x)) != string_escape(x).
@type text: str or unicode
@param text: input data
@rtype: str
@return: a safe version of text
"""
if isinstance(text, unicode):
# only if unicode; if str already, we handle it below
text = text.encode("ascii", "backslashreplace")
resu = ""
for char in text:
c = ord(char)
if char == "\t":
resu += r"\t"
elif char == "\n":
resu += r"\n"
elif char == "\r":
resu += r'\'r'
elif c < 32 or c >= 127: # non-printable
resu += "\\x%02x" % (c & 0xff)
else:
resu += char
return resu
def UnescapeAndSplit(text, sep=","):
r"""Split and unescape a string based on a given separator.
This function splits a string based on a separator where the
separator itself can be escape in order to be an element of the
elements. The escaping rules are (assuming coma being the
separator):
- a plain , separates the elements
- a sequence \\\\, (double backslash plus comma) is handled as a
backslash plus a separator comma
- a sequence \, (backslash plus comma) is handled as a
non-separator comma
@type text: string
@param text: the string to split
@type sep: string
@param text: the separator
@rtype: string
@return: a list of strings
"""
# we split the list by sep (with no escaping at this stage)
slist = text.split(sep)
# next, we revisit the elements and if any of them ended with an odd
# number of backslashes, then we join it with the next
rlist = []
while slist:
e1 = slist.pop(0)
if e1.endswith("\\"):
num_b = len(e1) - len(e1.rstrip("\\"))
if num_b % 2 == 1 and slist:
e2 = slist.pop(0)
# Merge the two elements and push the result back to the source list for
# revisiting. If e2 ended with backslashes, further merging may need to
# be done.
slist.insert(0, e1 + sep + e2)
continue
# here the backslashes remain (all), and will be reduced in the next step
rlist.append(e1)
# finally, replace backslash-something with something
rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
return rlist
def EscapeAndJoin(slist, sep=","):
"""Encode a list in a way parsable by UnescapeAndSplit.
@type slist: list of strings
@param slist: the strings to be encoded
@rtype: string
@return: the encoding of the list oas a string
"""
return sep.join([re.sub("\\" + sep, "\\\\" + sep,
re.sub(r"\\", r"\\\\", v)) for v in slist])
def CommaJoin(names):
"""Nicely join a set of identifiers.
@param names: set, list or tuple
@return: a string with the formatted results
"""
return ", ".join([str(val) for val in names])
def FormatTime(val, usecs=None):
"""Formats a time value.
@type val: float or None
@param val: Timestamp as returned by time.time() (seconds since Epoch,
1970-01-01 00:00:00 UTC)
@return: a string value or N/A if we don't have a valid timestamp
"""
if val is None or not isinstance(val, (int, float)):
return "N/A"
# these two codes works on Linux, but they are not guaranteed on all
# platforms
result = time.strftime("%F %T", time.localtime(val))
if usecs is not None:
result += ".%06d" % usecs
return result
def FormatSeconds(secs):
"""Formats seconds for easier reading.
@type secs: number
@param secs: Number of seconds
@rtype: string
@return: Formatted seconds (e.g. "2d 9h 19m 49s")
"""
parts = []
secs = round(secs, 0)
if secs > 0:
# Negative values would be a bit tricky
for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
(complete, secs) = divmod(secs, one)
if complete or parts:
parts.append("%d%s" % (complete, unit))
parts.append("%ds" % secs)
return " ".join(parts)
class LineSplitter:
"""Splits data chunks into lines separated by newline.
Instances provide a file-like interface.
"""
def __init__(self, line_fn, *args):
"""Initializes this class.
@type line_fn: callable
@param line_fn: Function called for each line, first parameter is line
@param args: Extra arguments for L{line_fn}
"""
assert callable(line_fn)
if args:
# Python 2.4 doesn't have functools.partial yet
self._line_fn = \
lambda line: line_fn(line, *args) # pylint: disable=W0142
else:
self._line_fn = line_fn
self._lines = collections.deque()
self._buffer = ""
def write(self, data):
parts = (self._buffer + data).split("\n")
self._buffer = parts.pop()
self._lines.extend(parts)
def flush(self):
while self._lines:
self._line_fn(self._lines.popleft().rstrip("\r\n"))
def close(self):
self.flush()
if self._buffer:
self._line_fn(self._buffer)
def IsValidShellParam(word):
"""Verifies is the given word is safe from the shell's p.o.v.
This means that we can pass this to a command via the shell and be
sure that it doesn't alter the command line and is passed as such to
the actual command.
Note that we are overly restrictive here, in order to be on the safe
side.
@type word: str
@param word: the word to check
@rtype: boolean
@return: True if the word is 'safe'
"""
return bool(_SHELLPARAM_REGEX.match(word))
def BuildShellCmd(template, *args):
"""Build a safe shell command line from the given arguments.
This function will check all arguments in the args list so that they
are valid shell parameters (i.e. they don't contain shell
metacharacters). If everything is ok, it will return the result of
template % args.
@type template: str
@param template: the string holding the template for the
string formatting
@rtype: str
@return: the expanded command line
"""
for word in args:
if not IsValidShellParam(word):
raise errors.ProgrammerError("Shell argument '%s' contains"
" invalid characters" % word)
return template % args
def FormatOrdinal(value):
"""Formats a number as an ordinal in the English language.
E.g. the number 1 becomes "1st", 22 becomes "22nd".
@type value: integer
@param value: Number
@rtype: string
"""
tens = value % 10
if value > 10 and value < 20:
suffix = "th"
elif tens == 1:
suffix = "st"
elif tens == 2:
suffix = "nd"
elif tens == 3:
suffix = "rd"
else:
suffix = "th"
return "%s%s" % (value, suffix)
def Truncate(text, length):
"""Truncate string and add ellipsis if needed.
@type text: string
@param text: Text
@type length: integer
@param length: Desired length
@rtype: string
@return: Truncated text
"""
assert length > len(_ASCII_ELLIPSIS)
# Serialize if necessary
if not isinstance(text, basestring):
text = str(text)
if len(text) <= length:
return text
else:
return text[:length - len(_ASCII_ELLIPSIS)] + _ASCII_ELLIPSIS
def FilterEmptyLinesAndComments(text):
"""Filters empty lines and comments from a line-based string.
Whitespace is also removed from the beginning and end of all lines.
@type text: string
@param text: Input string
@rtype: list
"""
return [line for line in map(lambda s: s.strip(), text.splitlines())
# Ignore empty lines and comments
if line and not line.startswith("#")]
def FormatKeyValue(data):
"""Formats a dictionary as "key=value" parameters.
The keys are sorted to have a stable order.
@type data: dict
@rtype: list of string
"""
return ["%s=%s" % (key, value) for (key, value) in sorted(data.items())]
|
gpl-2.0
| -4,384,509,988,743,595,500 | 25.306802 | 80 | 0.655298 | false | 3.601744 | true | false | false |
whtsky/Waterspout
|
waterspout/auth.py
|
1
|
1374
|
import functools
from tornado.web import urlparse, urlencode, HTTPError
def permission_required(f):
"""
Returns a decoration that check the current user with given function.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If the user does not have the permission, they will receive 403 page.
"""
@functools.wraps(f)
def check_permission(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
user = self.current_user
if not user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
elif f(user):
return method(self, *args, **kwargs)
raise HTTPError(403)
return wrapper
return check_permission
login_required = permission_required(lambda x: True)
|
mit
| -6,591,939,943,022,267,000 | 33.35 | 78 | 0.542213 | false | 4.787456 | false | false | false |
elainekmao/hiphoptextanalysis
|
lyricwiki-scraper/lyricwiki/spiders/chance_spider.py
|
1
|
1152
|
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from lyricwiki.items import LyricWikiItem
class LyricWikiSpider(CrawlSpider):
name = "chance" #CHANGE NAME
allowed_domains = ["lyrics.wikia.com"]
start_urls = [
"http://lyrics.wikia.com/Chance_The_Rapper", #CHANGE URL
]
rules = ( #CHANGE REGEX
Rule(SgmlLinkExtractor(allow=('/Chance_The_Rapper:.*',),restrict_xpaths=('//ol/li',)), callback='parse_item', follow=True),
)
def parse_item(self, response):
sel = Selector(response)
info = sel.xpath('//div[@class="mw-content-ltr"]')
item = LyricWikiItem()
item['title'] = sel.xpath('//header[@id="WikiaPageHeader"]/h1/text()').extract()
item['artist'] = info.xpath('b/a/text()').extract()
item['album'] = info.xpath('i/a/text()').extract()
item['lyrics'] = sel.xpath('//div[@class="lyricbox"]/text()').extract()
return item
|
gpl-2.0
| -5,528,600,276,207,493,000 | 44.16 | 131 | 0.578993 | false | 3.566563 | false | false | false |
sailfish-sdk/sailfish-qtcreator
|
tests/system/suite_general/tst_cmake_speedcrunch/test.py
|
1
|
3171
|
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
source("../../shared/qtcreator.py")
SpeedCrunchPath = ""
BuildPath = tempDir()
def cmakeSupportsServerMode():
versionLines = filter(lambda line: "cmake version " in line,
getOutputFromCmdline(["cmake", "--version"]).splitlines())
try:
test.log("Using " + versionLines[0])
matcher = re.match("cmake version (\d+)\.(\d+)\.\d+", versionLines[0])
major = __builtin__.int(matcher.group(1))
minor = __builtin__.int(matcher.group(2))
except:
return False
if major < 3:
return False
elif major > 3:
return True
else:
return minor >= 7
def main():
if (which("cmake") == None):
test.fatal("cmake not found in PATH - needed to run this test")
return
if not neededFilePresent(SpeedCrunchPath):
return
startQC()
if not startedWithoutPluginError():
return
result = openCmakeProject(SpeedCrunchPath, BuildPath)
if not result:
test.fatal("Could not open/create cmake project - leaving test")
invokeMenuItem("File", "Exit")
return
progressBarWait(30000)
naviTreeView = "{column='0' container=':Qt Creator_Utils::NavigationTreeView' text~='%s' type='QModelIndex'}"
if cmakeSupportsServerMode():
treeFile = "projecttree_speedcrunch_server.tsv"
else:
treeFile = "projecttree_speedcrunch.tsv"
compareProjectTree(naviTreeView % "speedcrunch( \[\S+\])?", treeFile)
# Invoke a rebuild of the application
invokeMenuItem("Build", "Rebuild All")
# Wait for, and test if the build succeeded
waitForCompile(300000)
checkCompile()
checkLastBuild()
invokeMenuItem("File", "Exit")
def init():
global SpeedCrunchPath
SpeedCrunchPath = srcPath + "/creator-test-data/speedcrunch/src/CMakeLists.txt"
cleanup()
def cleanup():
global BuildPath
# Make sure the .user files are gone
cleanUpUserFiles(SpeedCrunchPath)
deleteDirIfExists(BuildPath)
|
gpl-3.0
| -7,002,482,751,329,805,000 | 34.233333 | 113 | 0.653106 | false | 4.024112 | true | false | false |
dustinrohde/python-conway
|
test/test_cell_set.py
|
1
|
2096
|
import pytest
from conway.grid import Cell
from conway.grid import Point as P
from conway.grid.cell_set import Grid
from . import GameRulesTestMixin
T = Cell.ALIVE
F = Cell.DEAD
class TestGrid(GameRulesTestMixin):
GRID_CLS = Grid
def test_init_with_width_and_height(self):
grid = Grid(width=3, height=2)
assert (grid.width, grid.height) == (3, 2)
assert grid.cells == set()
with pytest.raises(ValueError):
grid = Grid(width=3)
with pytest.raises(ValueError):
grid = Grid(height=3)
with pytest.raises(ValueError):
grid = Grid(width=3, height=0)
with pytest.raises(ValueError):
grid = Grid(width=0, height=3)
with pytest.raises(ValueError):
grid = Grid(width=0, height=0)
with pytest.raises(ValueError):
grid = Grid()
def test_init_with_cells(self):
cells = {P(0, 0), P(1, 1), P(2, 1)}
grid = Grid(cells=cells)
assert (grid.width, grid.height) == (3, 2)
assert grid.cells == cells
cells = {P(1, 1), P(1, 2)}
grid = Grid(cells=cells.copy())
assert (grid.width, grid.height) == (2, 3)
assert grid.cells == cells
grid = Grid(cells=cells.copy(), width=2, height=3)
assert (grid.width, grid.height) == (2, 3)
assert grid.cells == cells
grid = Grid(cells=cells.copy(), width=4)
assert (grid.width, grid.height) == (4, 3)
assert grid.cells == cells
grid = Grid(cells=cells.copy(), height=4)
assert (grid.width, grid.height) == (2, 4)
assert grid.cells == cells
with pytest.raises(ValueError):
grid = Grid(cells=cells.copy(), height=2)
with pytest.raises(ValueError):
grid = Grid(cells=cells.copy(), width=1)
with pytest.raises(ValueError):
grid = Grid(cells=set())
with pytest.raises(ValueError):
grid = Grid(cells=set(), width=2)
with pytest.raises(ValueError):
grid = Grid(cells=set(), height=2)
|
mit
| 1,718,295,299,079,489,500 | 29.823529 | 58 | 0.57395 | false | 3.601375 | true | false | false |
deepmind/sonnet
|
sonnet/src/build.py
|
1
|
2561
|
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility function to build Sonnet modules."""
from typing import Any, Callable
import tensorflow as tf
import tree
def _int_or_none(o):
return isinstance(o, (int, type(None)))
def _promote_shapes(o):
"""Promotes lists of ints/Nones to :tf:`TensorSpec` instances."""
if isinstance(o, (list, tuple)) and all(_int_or_none(e) for e in o):
return tf.TensorSpec(o)
return o
def _maybe_tensor_spec(shape, dtype):
return tf.TensorSpec(shape, dtype) if dtype is not None else None
# TODO(tomhennigan) Use TensorNest in types here.
def build(
f: Callable[..., Any],
*args,
**kwargs
):
r"""Builds a module by creating all parameters but not computing any output.
>>> mod = snt.nets.MLP([1000, 10])
>>> snt.build(mod, [None, 28 * 28])
TensorSpec(shape=(None, 10), dtype=tf.float32, name=None)
>>> mod.variables
(<tf.Variable 'mlp/linear_0/b:0' shape=(1000,) ...>,
<tf.Variable 'mlp/linear_0/w:0' shape=(784, 1000) ...>,
<tf.Variable 'mlp/linear_1/b:0' shape=(10,) ...>,
<tf.Variable 'mlp/linear_1/w:0' shape=(1000, 10) ...>)
Args:
f: A function or callable :class:`Module` that will create variables.
*args: Positional arguments to supply to ``f``. Note that positional
arguments that are sequences of None/ints are converted to
:tf:`TensorSpec` instances.
**kwargs: Keyword arguments to pass to the module.
Returns:
The output of ``f`` with any :tf:`Tensor`\ s replaced by :tf:`TensorSpec`.
"""
f = tf.function(f)
args = map(_promote_shapes, args)
# NOTE: We use a concrete function to ensure that weights are created and
# initialized, but other stateful ops (e.g. updating weights) are not.
cf = f.get_concrete_function(*args, **kwargs)
return tree.map_structure(_maybe_tensor_spec, cf.output_shapes,
cf.output_dtypes)
|
apache-2.0
| 7,186,900,872,818,753,000 | 35.070423 | 78 | 0.655994 | false | 3.617232 | false | false | false |
bnewbold/diffoscope
|
tests/comparators/test_rpm.py
|
1
|
3005
|
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2015 Jérémy Bobbio <[email protected]>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <http://www.gnu.org/licenses/>.
import os.path
import pytest
from diffoscope.comparators import specialize
from diffoscope.comparators.binary import FilesystemFile, NonExistingFile
from diffoscope.comparators.rpm import RpmFile
from diffoscope.config import Config
from conftest import tool_missing
TEST_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.rpm')
TEST_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.rpm')
@pytest.fixture
def rpm1():
return specialize(FilesystemFile(TEST_FILE1_PATH))
@pytest.fixture
def rpm2():
return specialize(FilesystemFile(TEST_FILE2_PATH))
def test_identification(rpm1):
assert isinstance(rpm1, RpmFile)
def test_no_differences(rpm1):
difference = rpm1.compare(rpm1)
assert difference is None
@pytest.fixture
def differences(rpm1, rpm2):
return rpm1.compare(rpm2).details
@pytest.mark.skipif(tool_missing('rpm2cpio'), reason='missing rpm2cpio')
def test_header(differences):
assert differences[0].source1 == 'header'
expected_diff = open(os.path.join(os.path.dirname(__file__), '../data/rpm_header_expected_diff')).read()
assert differences[0].unified_diff == expected_diff
@pytest.mark.skipif(tool_missing('rpm2cpio'), reason='missing rpm2cpio')
def test_listing(differences):
assert differences[1].source1 == 'content'
assert differences[1].details[0].source1 == 'file list'
expected_diff = open(os.path.join(os.path.dirname(__file__), '../data/rpm_listing_expected_diff')).read()
assert differences[1].details[0].unified_diff == expected_diff
@pytest.mark.skipif(tool_missing('rpm2cpio'), reason='missing rpm2cpio')
def test_content(differences):
assert differences[1].source1 == 'content'
assert differences[1].details[1].source1 == './dir/text'
expected_diff = open(os.path.join(os.path.dirname(__file__), '../data/text_ascii_expected_diff')).read()
assert differences[1].details[1].unified_diff == expected_diff
def test_compare_non_existing(monkeypatch, rpm1):
monkeypatch.setattr(Config.general, 'new_file', True)
difference = rpm1.compare(NonExistingFile('/nonexisting', rpm1))
assert difference.source2 == '/nonexisting'
assert difference.details[-1].source2 == '/dev/null'
|
gpl-3.0
| -2,231,490,363,198,703,600 | 39.567568 | 109 | 0.738175 | false | 3.395928 | true | false | false |
syscoin/syscoin2
|
test/functional/p2p_disconnect_ban.py
|
1
|
5575
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node disconnect and ban behavior"""
import time
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
wait_until,
)
class DisconnectBanTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.supports_cli = False
def run_test(self):
self.log.info("Connect nodes both way")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
self.log.info("Test setban and listbanned RPCs")
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
self.nodes[1].setban(subnet="127.0.0.1", command="add")
wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("clearbanned: successfully clear ban list")
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].setban("127.0.0.0/24", "add")
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
assert_raises_rpc_error(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
self.nodes[1].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.log.info("setban: test persistence across node restart")
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
# Set the mocktime so we can control when bans expire
old_time = int(time.time())
self.nodes[1].setmocktime(old_time)
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
# Move time forward by 3 seconds so the third ban has expired
self.nodes[1].setmocktime(old_time + 3)
assert_equal(len(self.nodes[1].listbanned()), 3)
self.stop_node(1)
self.start_node(1)
listAfterShutdown = self.nodes[1].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
# Clear ban lists
self.nodes[1].clearbanned()
self.log.info("Connect nodes both way")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
self.log.info("Test disconnectnode RPCs")
self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
node1 = self.nodes[0].getpeerinfo()[0]['addr']
assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
connect_nodes(self.nodes[0], 1) # reconnect the node
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(nodeid=id1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
DisconnectBanTest().main()
|
mit
| -4,048,927,991,779,465,700 | 47.903509 | 155 | 0.650762 | false | 3.328358 | true | false | false |
Zatsugami/python-egghead-crawler
|
egghead/spiders/lessons.py
|
1
|
1244
|
# -*- coding: utf-8 -*-
import scrapy
from egghead.items import LessonVideo
from egghead.spiders import LoginSpider
from urlparse import urlparse, urljoin
def lesson_filename(url):
file_name = urlparse(url).path.split('/')[-1]
return '{}.mp4'.format(file_name)
def lesson_urls(response):
return response.css('#lesson-list .lesson-row .title a::attr(href)').extract()
class LessonsSpider(LoginSpider):
name = 'lessons'
allowed_domains = ['egghead.io']
on_success_auth = 'https://egghead.io/technologies'
def with_session(self, response):
urls = lesson_urls(response)
for url in urls:
yield scrapy.Request(url, callback=self.parse_lesson)
next_page_url = response.css('.pagination .next a::attr(href)').extract_first()
if next_page_url:
next_url = urljoin(response.url, next_page_url)
yield scrapy.Request(next_url, callback=self.with_session)
def parse_lesson(self, response):
file_url = response.css('.wistia_embed meta[itemprop="contentURL"]::attr(content)').extract_first()
file_name = lesson_filename(response.url)
if file_url:
yield LessonVideo(file_urls=['{}/{}'.format(file_url, file_name)])
|
gpl-2.0
| -1,784,667,792,561,143,300 | 32.621622 | 107 | 0.659164 | false | 3.455556 | false | false | false |
apiaryio/black-belt
|
blackbelt/deployment.py
|
1
|
1417
|
from subprocess import check_call, check_output
from blackbelt.handle_github import get_current_branch, run_grunt_in_parallel
from blackbelt.messages import post_message
def deploy_staging():
branch_name = get_current_branch()
post_message("Deploying branch %s to staging" % branch_name, "#deploy-queue")
check_call(['grunt', 'deploy', '--app=apiary-staging', '--force', "--branch=%s" % branch_name])
def deploy_production():
post_message("Deploying to production", "#deploy-queue")
slug_creaction_return_code = run_grunt_in_parallel((
['grunt', 'create-slug'],
['grunt', 'create-slug', '--app=apiary-staging-pre'],
['grunt', 'create-slug', '--app=apiary-staging-qa'],
))
if slug_creaction_return_code != 0:
post_message("Slug creation failed, deploy stopped.", "#deploy-queue")
raise ValueError("One of the slug creations failed. Check output few lines above.")
check_output(['grunt', 'deploy-slug', '--app=apiary-staging-qa'])
check_output(['grunt', 'deploy-slug', '--app=apiary-staging-pre'])
check_output(['grunt', 'deploy-slug'])
def rollback_production():
post_message("Rollback production for all environments (prod, qa, pre)", "#deploy-queue")
check_call(['grunt', 'rollback', '--app=apiary-staging-qa'])
check_call(['grunt', 'rollback', '--app=apiary-staging-pre'])
check_call(['grunt', 'rollback'])
|
mit
| 1,149,457,579,899,781,200 | 37.297297 | 99 | 0.66055 | false | 3.516129 | false | false | false |
richard-willowit/odoo
|
odoo/tools/xml_utils.py
|
2
|
1848
|
# -*- coding: utf-8 -*-
from lxml import etree
from odoo.tools.misc import file_open
def check_with_xsd(tree_or_str, xsd_path):
if not isinstance(tree_or_str, etree._Element):
tree_or_str = etree.fromstring(tree_or_str)
xml_schema_doc = etree.parse(file_open(xsd_path))
xsd_schema = etree.XMLSchema(xml_schema_doc)
try:
xsd_schema.assertValid(tree_or_str)
except etree.DocumentInvalid as xml_errors:
#import UserError only here to avoid circular import statements with tools.func being imported in exceptions.py
from odoo.exceptions import UserError
raise UserError('\n'.join(str(e) for e in xml_errors.error_log))
def create_xml_node_chain(first_parent_node, nodes_list, last_node_value=None):
""" Utility function for generating XML files nodes. Generates as a hierarchical
chain of nodes (each new node being the son of the previous one) based on the tags
contained in `nodes_list`, under the given node `first_parent_node`.
It will also set the value of the last of these nodes to `last_node_value` if it is
specified. This function returns the list of created nodes.
"""
res = []
current_node = first_parent_node
for tag in nodes_list:
current_node = etree.SubElement(current_node, tag)
res.append(current_node)
if last_node_value is not None:
current_node.text = last_node_value
return res
def create_xml_node(parent_node, node_name, node_value=None):
""" Utility function for managing XML. It creates a new node with the specified
`node_name` as a child of given `parent_node` and assigns it `node_value` as value.
:param parent_node: valid etree Element
:param node_name: string
:param node_value: string
"""
return create_xml_node_chain(parent_node, [node_name], node_value)[0]
|
gpl-3.0
| -1,629,396,402,940,386,600 | 43 | 119 | 0.697511 | false | 3.659406 | false | false | false |
sanjeevtripurari/hue
|
desktop/core/src/desktop/middleware.py
|
1
|
25658
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import inspect
import json
import logging
import os.path
import re
import tempfile
import time
import kerberos
from datetime import datetime
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, BACKEND_SESSION_KEY, authenticate, load_backend, login
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.core import exceptions, urlresolvers
import django.db
from django.http import HttpResponseNotAllowed
from django.core.urlresolvers import resolve
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.translation import ugettext as _
from django.utils.http import urlquote, is_safe_url
from django.utils.encoding import iri_to_uri
import django.views.static
import desktop.views
import desktop.conf
from desktop.context_processors import get_app_name
from desktop.lib import apputil, i18n
from desktop.lib.django_util import render, render_json, is_jframe_request, get_username_re_rule, get_groupname_re_rule
from desktop.lib.exceptions import StructuredException
from desktop.lib.exceptions_renderable import PopupException
from desktop.log.access import access_log, log_page_hit
from desktop import appmanager
from desktop import metrics
from hadoop import cluster
from desktop.log import get_audit_logger
LOG = logging.getLogger(__name__)
MIDDLEWARE_HEADER = "X-Hue-Middleware-Response"
# Views inside Django that don't require login
# (see LoginAndPermissionMiddleware)
DJANGO_VIEW_AUTH_WHITELIST = [
django.views.static.serve,
desktop.views.is_alive,
]
class AjaxMiddleware(object):
"""
Middleware that augments request to set request.ajax
for either is_ajax() (looks at HTTP headers) or ?format=json
GET parameters.
"""
def process_request(self, request):
request.ajax = request.is_ajax() or request.REQUEST.get("format", "") == "json"
return None
class ExceptionMiddleware(object):
"""
If exceptions know how to render themselves, use that.
"""
def process_exception(self, request, exception):
import traceback
tb = traceback.format_exc()
logging.info("Processing exception: %s: %s" % (i18n.smart_unicode(exception),
i18n.smart_unicode(tb)))
if isinstance(exception, PopupException):
return exception.response(request)
if isinstance(exception, StructuredException):
if request.ajax:
response = render_json(exception.response_data)
response[MIDDLEWARE_HEADER] = 'EXCEPTION'
response.status_code = getattr(exception, 'error_code', 500)
return response
else:
response = render("error.mako", request,
dict(error=exception.response_data.get("message")))
response.status_code = getattr(exception, 'error_code', 500)
return response
return None
class ClusterMiddleware(object):
"""
Manages setting request.fs and request.jt
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
Sets request.fs and request.jt on every request to point to the
configured filesystem.
"""
request.fs_ref = request.REQUEST.get('fs', view_kwargs.get('fs', 'default'))
if "fs" in view_kwargs:
del view_kwargs["fs"]
try:
request.fs = cluster.get_hdfs(request.fs_ref)
except KeyError:
raise KeyError(_('Cannot find HDFS called "%(fs_ref)s".') % {'fs_ref': request.fs_ref})
if request.user.is_authenticated():
if request.fs is not None:
request.fs.setuser(request.user.username)
request.jt = cluster.get_default_mrcluster() # Deprecated, only there for MR1
if request.jt is not None:
request.jt.setuser(request.user.username)
else:
request.jt = None
class NotificationMiddleware(object):
"""
Manages setting request.info and request.error
"""
def process_view(self, request, view_func, view_args, view_kwargs):
def message(title, detail=None):
if detail is None:
detail = ''
else:
detail = '<br/>%s' % detail
return '%s %s' % (title, detail)
def info(title, detail=None):
messages.info(request, message(title, detail))
def error(title, detail=None):
messages.error(request, message(title, detail))
def warn(title, detail=None):
messages.warning(request, message(title, detail))
request.info = info
request.error = error
request.warn = warn
class AppSpecificMiddleware(object):
@classmethod
def augment_request_with_app(cls, request, view_func):
""" Stuff the app into the request for use in later-stage middleware """
if not hasattr(request, "_desktop_app"):
module = inspect.getmodule(view_func)
request._desktop_app = apputil.get_app_for_module(module)
if not request._desktop_app and not module.__name__.startswith('django.'):
logging.debug("no app for view func: %s in %s" % (view_func, module))
def __init__(self):
self.middlewares_by_app = {}
for app in appmanager.DESKTOP_APPS:
self.middlewares_by_app[app.name] = self._load_app_middleware(app)
def _get_middlewares(self, app, type):
return self.middlewares_by_app.get(app, {}).get(type, [])
def process_view(self, request, view_func, view_args, view_kwargs):
"""View middleware"""
self.augment_request_with_app(request, view_func)
if not request._desktop_app:
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'view'):
ret = middleware(request, view_func, view_args, view_kwargs)
if ret: return ret # short circuit
return ret
def process_response(self, request, response):
"""Response middleware"""
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for request.")
return response
for middleware in reversed(self._get_middlewares(request._desktop_app, 'response')):
response = middleware(request, response)
return response
def process_exception(self, request, exception):
"""Exception middleware"""
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for exception.")
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'exception'):
ret = middleware(request, exception)
if ret: return ret # short circuit
return ret
def _load_app_middleware(cls, app):
app_settings = app.settings
if not app_settings:
return
mw_classes = app_settings.__dict__.get('MIDDLEWARE_CLASSES', [])
result = {'view': [], 'response': [], 'exception': []}
for middleware_path in mw_classes:
# This code brutally lifted from django.core.handlers
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured, _('%(module)s isn\'t a middleware module.') % {'module': middleware_path}
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = __import__(mw_module, {}, {}, [''])
except ImportError, e:
raise exceptions.ImproperlyConfigured, _('Error importing middleware %(module)s: "%(error)s".') % {'module': mw_module, 'error': e}
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured, _('Middleware module "%(module)s" does not define a "%(class)s" class.') % {'module': mw_module, 'class':mw_classname}
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
# End brutal code lift
# We need to make sure we don't have a process_request function because we don't know what
# application will handle the request at the point process_request is called
if hasattr(mw_instance, 'process_request'):
raise exceptions.ImproperlyConfigured, \
_('AppSpecificMiddleware module "%(module)s" has a process_request function' + \
' which is impossible.') % {'module': middleware_path}
if hasattr(mw_instance, 'process_view'):
result['view'].append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
result['response'].insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
result['exception'].insert(0, mw_instance.process_exception)
return result
class LoginAndPermissionMiddleware(object):
"""
Middleware that forces all views (except those that opt out) through authentication.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
We also perform access logging in ``process_view()`` since we have the view function,
which tells us the log level. The downside is that we don't have the status code,
which isn't useful for status logging anyways.
"""
access_log_level = getattr(view_func, 'access_log_level', None)
# First, skip views not requiring login
# If the view has "opted out" of login required, skip
if hasattr(view_func, "login_notrequired"):
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# There are certain django views which are also opt-out, but
# it would be evil to go add attributes to them
if view_func in DJANGO_VIEW_AUTH_WHITELIST:
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# If user is logged in, check that he has permissions to access the
# app.
if request.user.is_active and request.user.is_authenticated():
AppSpecificMiddleware.augment_request_with_app(request, view_func)
# Until we get Django 1.3 and resolve returning the URL name, we just do a match of the name of the view
try:
access_view = 'access_view:%s:%s' % (request._desktop_app, resolve(request.path)[0].__name__)
except Exception, e:
access_log(request, 'error checking view perm: %s', e, level=access_log_level)
access_view =''
# Accessing an app can access an underlying other app.
# e.g. impala or spark uses code from beeswax and so accessing impala shows up as beeswax here.
# Here we trust the URL to be the real app we need to check the perms.
app_accessed = request._desktop_app
ui_app_accessed = get_app_name(request)
if app_accessed != ui_app_accessed and ui_app_accessed not in ('logs', 'accounts', 'login'):
app_accessed = ui_app_accessed
if app_accessed and \
app_accessed not in ("desktop", "home", "about") and \
not (request.user.has_hue_permission(action="access", app=app_accessed) or
request.user.has_hue_permission(action=access_view, app=app_accessed)):
access_log(request, 'permission denied', level=access_log_level)
return PopupException(
_("You do not have permission to access the %(app_name)s application.") % {'app_name': app_accessed.capitalize()}, error_code=401).response(request)
else:
log_page_hit(request, view_func, level=access_log_level)
return None
logging.info("Redirecting to login page: %s", request.get_full_path())
access_log(request, 'login redirection', level=access_log_level)
if request.ajax:
# Send back a magic header which causes Hue.Request to interpose itself
# in the ajax request and make the user login before resubmitting the
# request.
response = HttpResponse("/* login required */", content_type="text/javascript")
response[MIDDLEWARE_HEADER] = 'LOGIN_REQUIRED'
return response
else:
return HttpResponseRedirect("%s?%s=%s" % (settings.LOGIN_URL, REDIRECT_FIELD_NAME, urlquote(request.get_full_path())))
class JsonMessage(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __str__(self):
return json.dumps(self.kwargs)
class AuditLoggingMiddleware(object):
username_re = get_username_re_rule()
groupname_re = get_groupname_re_rule()
operations = {
'/accounts/login': 'USER_LOGIN',
'/accounts/logout': 'USER_LOGOUT',
'/useradmin/users/add_ldap_users': 'ADD_LDAP_USERS',
'/useradmin/users/add_ldap_groups': 'ADD_LDAP_GROUPS',
'/useradmin/users/sync_ldap_users_groups': 'SYNC_LDAP_USERS_GROUPS',
'/useradmin/users/new': 'CREATE_USER',
'/useradmin/groups/new': 'CREATE_GROUP',
'/useradmin/users/delete': 'DELETE_USER',
'/useradmin/groups/delete': 'DELETE_GROUP'
}
operation_patterns = {
'/useradmin/permissions/edit/(?P<app>.*)/(?P<priv>.*)': 'EDIT_PERMISSION',
'/useradmin/users/edit/(?P<username>%s)' % (username_re,): 'EDIT_USER',
'/useradmin/groups/edit/(?P<name>%s)' % (groupname_re,): 'EDIT_GROUP'
}
def __init__(self):
from desktop.conf import AUDIT_EVENT_LOG_DIR, SERVER_USER
self.impersonator = SERVER_USER.get()
if not AUDIT_EVENT_LOG_DIR.get():
LOG.info('Unloading AuditLoggingMiddleware')
raise exceptions.MiddlewareNotUsed
def process_view(self, request, view_func, view_args, view_kwargs):
try:
operation = self._get_operation(request.path)
if operation == 'USER_LOGOUT':
self._log_message(operation, request)
except Exception, e:
LOG.error('Could not audit the request: %s' % e)
return None
def process_response(self, request, response):
response['audited'] = False
try:
operation = self._get_operation(request.path)
if request.method == 'POST' and operation and operation != 'USER_LOGOUT':
self._log_message(operation, request, response)
response['audited'] = True
except Exception, e:
LOG.error('Could not audit the request: %s' % e)
return response
def _log_message(self, operation, request, response=None):
audit_logger = get_audit_logger()
allowed = True
status = 200
if response is not None:
allowed = response.status_code != 401
status = response.status_code
audit_logger.debug(JsonMessage(**{
'username': self._get_username(request),
'impersonator': self.impersonator,
'ipAddress': self._get_client_ip(request),
'operation': operation,
'eventTime': self._milliseconds_since_epoch(),
'allowed': allowed,
'status': status,
'service': get_app_name(request),
'url': request.path
}))
def _get_client_ip(self, request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
x_forwarded_for = x_forwarded_for.split(',')[0]
return request.META.get('HTTP_CLIENT_IP') or x_forwarded_for or request.META.get('REMOTE_ADDR')
def _get_username(self, request):
if hasattr(request, 'user') and not request.user.is_anonymous():
return request.user.get_username()
else:
return 'anonymous'
def _milliseconds_since_epoch(self):
return int(time.time() * 1000)
def _get_operation(self, path):
url = path.rstrip('/')
if url in AuditLoggingMiddleware.operations:
return AuditLoggingMiddleware.operations[url]
else:
for regex, operation in AuditLoggingMiddleware.operation_patterns.items():
pattern = re.compile(regex)
if pattern.match(url):
return operation
return None
try:
import tidylib
_has_tidylib = True
except Exception, ex:
# The exception type is not ImportError. It's actually an OSError.
logging.warn("Failed to import tidylib (for debugging). Is libtidy installed?")
_has_tidylib = False
class HtmlValidationMiddleware(object):
"""
If configured, validate output html for every response.
"""
def __init__(self):
self._logger = logging.getLogger('HtmlValidationMiddleware')
if not _has_tidylib:
logging.error("HtmlValidationMiddleware not activatived: "
"Failed to import tidylib.")
return
# Things that we don't care about
self._to_ignore = (
re.compile('- Warning: <.*> proprietary attribute "data-'),
re.compile('- Warning: trimming empty'),
re.compile('- Info:'),
)
# Find the directory to write tidy html output
try:
self._outdir = os.path.join(tempfile.gettempdir(), 'hue_html_validation')
if not os.path.isdir(self._outdir):
os.mkdir(self._outdir, 0755)
except Exception, ex:
self._logger.exception('Failed to get temp directory: %s', (ex,))
self._outdir = tempfile.mkdtemp(prefix='hue_html_validation-')
# Options to pass to libtidy. See
# http://tidy.sourceforge.net/docs/quickref.html
self._options = {
'show-warnings': 1,
'output-html': 0,
'output-xhtml': 1,
'char-encoding': 'utf8',
'output-encoding': 'utf8',
'indent': 1,
'wrap': 0,
}
def process_response(self, request, response):
if not _has_tidylib or not self._is_html(request, response):
return response
html, errors = tidylib.tidy_document(response.content,
self._options,
keep_doc=True)
if not errors:
return response
# Filter out what we care about
err_list = errors.rstrip().split('\n')
err_list = self._filter_warnings(err_list)
if not err_list:
return response
try:
fn = urlresolvers.resolve(request.path)[0]
fn_name = '%s.%s' % (fn.__module__, fn.__name__)
except:
LOG.exception('failed to resolve url')
fn_name = '<unresolved_url>'
# Write the two versions of html out for offline debugging
filename = os.path.join(self._outdir, fn_name)
result = "HTML tidy result: %s [%s]:" \
"\n\t%s" \
"\nPlease see %s.orig %s.tidy\n-------" % \
(request.path, fn_name, '\n\t'.join(err_list), filename, filename)
file(filename + '.orig', 'w').write(i18n.smart_str(response.content))
file(filename + '.tidy', 'w').write(i18n.smart_str(html))
file(filename + '.info', 'w').write(i18n.smart_str(result))
self._logger.error(result)
return response
def _filter_warnings(self, err_list):
"""A hacky way to filter out things that we don't care about."""
res = [ ]
for err in err_list:
for ignore in self._to_ignore:
if ignore.search(err):
break
else:
res.append(err)
return res
def _is_html(self, request, response):
return not request.is_ajax() and \
'html' in response['Content-Type'] and \
200 <= response.status_code < 300
class SpnegoMiddleware(object):
"""
Based on the WSGI SPNEGO middlware class posted here:
http://code.activestate.com/recipes/576992/
"""
def __init__(self):
if not 'desktop.auth.backend.SpnegoDjangoBackend' in desktop.conf.AUTH.BACKEND.get():
LOG.info('Unloading SpnegoMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
if 'GSS-String' in request.META:
response['WWW-Authenticate'] = request.META['GSS-String']
elif 'Return-401' in request.META:
response = HttpResponse("401 Unauthorized", content_type="text/plain",
status=401)
response['WWW-Authenticate'] = 'Negotiate'
response.status = 401
return response
def process_request(self, request):
"""
The process_request() method needs to communicate some state to the
process_response() method. The two options for this are to return an
HttpResponse object or to modify the META headers in the request object. In
order to ensure that all of the middleware is properly invoked, this code
currently uses the later approach. The following headers are currently used:
GSS-String:
This means that GSS authentication was successful and that we need to pass
this value for the WWW-Authenticate header in the response.
Return-401:
This means that the SPNEGO backend is in use, but we didn't get an
AUTHORIZATION header from the client. The way that the protocol works
(http://tools.ietf.org/html/rfc4559) is by having the first response to an
un-authenticated request be a 401 with the WWW-Authenticate header set to
Negotiate. This will cause the browser to re-try the request with the
AUTHORIZATION header set.
"""
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the SpnegoUserMiddleware class.")
if 'HTTP_AUTHORIZATION' in request.META:
type, authstr = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if type == 'Negotiate':
try:
result, context = kerberos.authGSSServerInit('HTTP')
if result != 1:
return
gssstring=''
r=kerberos.authGSSServerStep(context,authstr)
if r == 1:
gssstring=kerberos.authGSSServerResponse(context)
request.META['GSS-String'] = 'Negotiate %s' % gssstring
else:
kerberos.authGSSServerClean(context)
return
username = kerberos.authGSSServerUserName(context)
kerberos.authGSSServerClean(context)
if request.user.is_authenticated():
if request.user.username == self.clean_username(username, request):
return
user = authenticate(username=username)
if user:
request.user = user
login(request, user)
return
except:
LOG.exception('Unexpected error when authenticating against KDC')
return
else:
request.META['Return-401'] = ''
return
else:
if not request.user.is_authenticated():
request.META['Return-401'] = ''
return
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError:
pass
return username
class HueRemoteUserMiddleware(RemoteUserMiddleware):
"""
Middleware to delegate authentication to a proxy server. The proxy server
will set an HTTP header (defaults to Remote-User) with the name of the
authenticated user. This class extends the RemoteUserMiddleware class
built into Django with the ability to configure the HTTP header and to
unload the middleware if the RemoteUserDjangoBackend is not currently
in use.
"""
def __init__(self):
if not 'desktop.auth.backend.RemoteUserDjangoBackend' in desktop.conf.AUTH.BACKEND.get():
LOG.info('Unloading HueRemoteUserMiddleware')
raise exceptions.MiddlewareNotUsed
self.header = desktop.conf.AUTH.REMOTE_USER_HEADER.get()
class EnsureSafeMethodMiddleware(object):
"""
Middleware to white list configured HTTP request methods.
"""
def process_request(self, request):
if request.method not in desktop.conf.HTTP_ALLOWED_METHODS.get():
return HttpResponseNotAllowed(desktop.conf.HTTP_ALLOWED_METHODS.get())
class EnsureSafeRedirectURLMiddleware(object):
"""
Middleware to white list configured redirect URLs.
"""
def process_response(self, request, response):
if response.status_code in (301, 302, 303, 305, 307, 308) and response.get('Location'):
redirection_patterns = desktop.conf.REDIRECT_WHITELIST.get()
location = response['Location']
if any(regexp.match(location) for regexp in redirection_patterns):
return response
if is_safe_url(location, request.get_host()):
return response
response = render("error.mako", request, dict(error=_('Redirect to %s is not allowed.') % response['Location']))
response.status_code = 403
return response
else:
return response
class MetricsMiddleware(object):
"""
Middleware to track the number of active requests.
"""
def process_request(self, request):
self._response_timer = metrics.response_time.time()
metrics.active_requests.inc()
def process_exception(self, request, exception):
self._response_timer.stop()
metrics.request_exceptions.inc()
def process_response(self, request, response):
self._response_timer.stop()
metrics.active_requests.dec()
return response
|
apache-2.0
| 6,306,637,644,091,341,000 | 34.735376 | 167 | 0.671292 | false | 3.91785 | false | false | false |
Toofifty/Oracle2
|
oracle/modules/lottery.py
|
1
|
5609
|
from threading import Thread
import time, traceback, random
from format import BOLD, RESET, CYAN, GREEN
lotto = None
def _init(b):
print '\t%s loaded' % __name__
def lottery(l, b, i):
"""!parent-command
!c new
!d Create a new lottery (cost: 10 points)
!a [duration] [min-bet] [max-bet]
!r user
!c info
!d Get info about the current lottery
!r user
!c bet
!d Place a bet in the current lottery
!a <amount>
!r user
"""
def new(l, b, i):
if lotto is not None:
b.l_say('There\'s already a lottery running.', i, 0)
return True
if i.user.get_points() < 10:
b.l_say('You don\'t have enough points to begin a lottery.', i, 0)
return True
duration = 600
min_bet = 10
max_bet = 200
if len(i.args) > 1:
try:
if 'm' in i.args[1]:
duration = 60 * int(i.args[1].replace('m', ''))
else:
duration = int(i.args[1])
except:
traceback.print_exc()
b.l_say('Please only use digits or \'m\' for the duration.', i, 0)
return True
if len(i.args) > 2:
try:
min_bet = max(min_bet, int(i.args[2]))
except:
b.l_say('The minimum bet must be a number.', i, 0)
return True
if len(i.args) > 3:
try:
max_bet = max(min_bet, int(i.args[3]))
except:
b.l_say('The maximum bet must be a number.', i, 0)
return True
global lotto
lotto = Lotto(b, duration, min_bet, max_bet)
lotto.start()
i.user.add_points(-10)
b.l_say('You have %d points left.' % i.user.get_points(), i, 0)
def info(l, b, i):
global lotto
if lotto is None:
b.l_say('There is no lottery at the moment.', i, 0)
return True
m, s = divmod(lotto.time_left, 60)
b.l_say(
'%s Time left: %02d:%02d, Prize pool: %d, Bet range: %d - %d' % (
lotto.format, m, s, lotto.get_pool(), lotto.min_bet,
lotto.max_bet
), i, 0
)
def bet(l, b, i):
global lotto
if lotto is None:
b.l_say('There is no lottery at the moment.', i, 0)
return True
if len(i.args) > 1:
try:
global lotto
bet = lotto.add_bet(i.nick, int(i.args[1]))
if not bet:
b.l_say('You don\'t have enough points.', i, 0)
return True
i.user.add_points(-1 * bet)
b.l_say('You have %d points left.' % i.user.get_points(), i, 0)
except:
traceback.print_exc()
b.l_say('The amount must be a number.', i, 0)
return True
b.l_say('You need to specify a bet amount.', i, 0)
try:
exec ('%s(l, b, i)' % i.args[0]) in globals(), locals()
except Exception, e:
traceback.print_exc()
b.l_say('Usage: %s.lottery new|bet|info' % CYAN, i, 0)
return True
class Lotto(Thread):
def __init__(self, bot, duration, min_bet, max_bet):
Thread.__init__(self)
self.min_bet = min_bet
self.max_bet = max_bet
self.time_left = duration
self.bets = {}
self.bot = bot
self.dead = False
self.format = '[%sLottery%s]' % (CYAN, RESET)
print '\t\tNew %s started' % __name__
m, s = divmod(duration, 60)
self.bot.say(
'%s New lottery started! Will run for %02d:%02d. Bet range: %d - %d'\
% (self.format, m, s, self.min_bet, self.max_bet),
'all'
)
def add_bet(self, nick, bet):
if bet < self.min_bet:
return False
elif bet > self.max_bet:
bet = self.max_bet
if nick in self.bets:
return False
self.bets[nick] = bet
pool = self.get_pool()
self.bot.say(
'%s %s bet %dp. Pool is now %dp.' % (self.format, nick, bet, pool),
'all'
)
return bet
def get_pool(self):
pool = 0
for k, v in self.bets.iteritems():
pool += v
return pool
def find_winner(self, num):
for k, v in self.bets.iteritems():
if num < v:
return k
else:
num -= v
return None
def kill():
self.dead = True
def end(self):
pool = self.get_pool()
winning_num = random.randint(1, pool)
winner = self.find_winner(winning_num)
if winner is None:
return False
self.bot.say(
'%s %s%s%s is the lucky winner of this round and receives %s%d%s points!' % \
(self.format, BOLD, winner, RESET, GREEN, pool, RESET),
'all'
)
win_user = self.bot.get_user(winner)
win_user.add_points(pool)
self.bot.msg(
win_user, 'You now have %s%d%s points.' % (BOLD,
win_user.get_points(), RESET)
)
self.kill()
def run(self):
while not self.dead:
while self.time_left > 0 and not self.dead:
self.time_left -= 2
time.sleep(2)
if self.dead:
return
self.end()
|
mit
| 8,233,873,189,446,941,000 | 28.062176 | 89 | 0.463541 | false | 3.516614 | false | false | false |
fedora-conary/conary-policy
|
policy/normalize.py
|
1
|
38175
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import stat
import tempfile
import filecmp
import shutil
from conary.lib import magic, util
from conary.build import policy, recipe
from conary.local import database
def _findProgPath(prog, db, recipe):
# ignore arguments
prog = prog.split(' ')[0]
if prog.startswith('/'):
progPath = prog
else:
macros = recipe.macros
searchPath = [macros.essentialbindir,
macros.bindir,
macros.essentialsbindir,
macros.sbindir]
searchPath.extend([x for x in ['/bin', '/usr/bin', '/sbin', '/usr/sbin']
if x not in searchPath])
searchPath.extend([x for x in os.getenv('PATH', '').split(os.path.pathsep)
if x not in searchPath])
progPath = util.findFile(prog, searchPath)
progTroveName = [ x.getName() for x in db.iterTrovesByPath(progPath) ]
if progTroveName:
progTroveName = progTroveName[0]
try:
if progTroveName in recipe._getTransitiveBuildRequiresNames():
recipe.reportExcessBuildRequires(progTroveName)
else:
recipe.reportMisingBuildRequires(progTroveName)
except AttributeError:
# older conary
pass
return progPath
class NormalizeCompression(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeCompression()}} - Compress files with maximum compression
SYNOPSIS
========
C{r.NormalizeCompression([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeCompression()} policy compresses files with maximum
compression, and without data which may change from invocation, to
invocation.
Recompresses .gz files with -9 -n, and .bz2 files with -9, to get maximum
compression and avoid meaningless changes overpopulating the database.
Ignores man/info pages, as they are encountered separately while making other
changes to man/info pages later.
EXAMPLES
========
C{r.NormalizeCompression(exceptions='%(thistestdir)s/.*')}
This package has test files that are tested byte-for-byte and
cannot be modified at all and still pass the tests.
"""
processUnmodified = False
invariantexceptions = [
'%(mandir)s/man.*/',
'%(infodir)s/',
]
invariantinclusions = [
('.*\.(gz|bz2)', None, stat.S_IFDIR),
]
db = None
gzip = None
bzip = None
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
m = self.recipe.magic[path]
if not m:
return
# Note: uses external gzip/bunzip if they exist because a
# pipeline is faster in a multiprocessing environment
def _mktmp(fullpath):
fd, path = tempfile.mkstemp('.temp', '', os.path.dirname(fullpath))
os.close(fd)
return path
def _move(tmppath, fullpath):
os.chmod(tmppath, os.lstat(fullpath).st_mode)
os.rename(tmppath, fullpath)
def _findProg(prog):
if not self.db:
self.db = database.Database(self.recipe.cfg.root,
self.recipe.cfg.dbPath)
return _findProgPath(prog, self.db, self.recipe)
fullpath = self.macros.destdir+path
if m.name == 'gzip' and \
(m.contents['compression'] != '9' or 'name' in m.contents):
tmppath = _mktmp(fullpath)
if not self.gzip:
self.gzip = _findProg('gzip')
util.execute('%s -dc %s | %s -f -n -9 > %s'
%(self.gzip, fullpath, self.gzip, tmppath))
_move(tmppath, fullpath)
del self.recipe.magic[path]
if m.name == 'bzip' and m.contents['compression'] != '9':
tmppath = _mktmp(fullpath)
if not self.bzip:
self.bzip = _findProg('bzip2')
util.execute('%s -dc %s | %s -9 > %s'
%(self.bzip, fullpath, self.bzip, tmppath))
_move(tmppath, fullpath)
del self.recipe.magic[path]
class NormalizeManPages(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeManPages()}} - Make all man pages follow sane system policy
SYNOPSIS
========
C{r.NormalizeManPages([I{filterexp}], [I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeManPages()} policy makes all system manual pages
follow sane system policy
Note: This policy class is not called directly from recipes, and does not
honor exceptions.
Some of the following tasks are performed against system manual pages via
C{r.NormalizeManPages}:
- Fix all man pages' contents:
- remove instances of C{/?%(destdir)s} from all man pages
- C{.so foo.n} becomes a symlink to foo.n
- (re)compress all man pages with gzip -f -n -9
- change all symlinks to point to .gz (if they don't already)
- make all man pages be mode 644
"""
requires = (
('ReadableDocs', policy.CONDITIONAL_SUBSEQUENT),
)
def _findProg(self, prog):
if not self.db:
self.db = database.Database(self.recipe.cfg.root,
self.recipe.cfg.dbPath)
return _findProgPath(prog, self.db, self.recipe)
# Note: not safe for derived packages; needs to check in each
# internal function for unmodified files
def _uncompress(self, dirname, names):
for name in names:
path = dirname + os.sep + name
if name.endswith('.gz') and util.isregular(path):
if not self.gunzip:
self.gunzip = self._findProg('gunzip')
util.execute('gunzip ' + dirname + os.sep + name)
try:
self.recipe.recordMove(util.joinPaths(dirname, name),
util.joinPaths(dirname, name)[:-3])
except AttributeError:
pass
if name.endswith('.bz2') and util.isregular(path):
if not self.bunzip:
self.bunzip = self._findProg('bunzip2')
util.execute('bunzip2 ' + dirname + os.sep + name)
try:
self.recipe.recordMove(util.joinPaths(dirname, name),
util.joinPaths(dirname, name)[:-4])
except AttributeError:
pass
def _touchup(self, dirname, names):
"""
remove destdir, fix up modes, ensure that it is legal UTF-8
"""
mode = os.lstat(dirname)[stat.ST_MODE]
if mode & 0777 != 0755:
os.chmod(dirname, 0755)
for name in names:
path = dirname + os.sep + name
mode = os.lstat(path)[stat.ST_MODE]
# avoid things like symlinks
if not stat.S_ISREG(mode):
continue
if mode & 0777 != 0644:
os.chmod(path, 0644)
f = file(path, 'r+')
data = f.read()
write = False
try:
data.decode('utf-8')
except:
try:
data = data.decode('iso-8859-1').encode('utf-8')
write = True
except:
self.error('unable to decode %s as utf-8 or iso-8859-1',
path)
if data.find(self.destdir) != -1:
write = True
# I think this is cheaper than using a regexp
data = data.replace('/'+self.destdir, '')
data = data.replace(self.destdir, '')
if write:
f.seek(0)
f.truncate(0)
f.write(data)
def _sosymlink(self, dirname, names):
section = os.path.basename(dirname)
for name in names:
path = dirname + os.sep + name
if util.isregular(path):
# if only .so, change to symlink
f = file(path)
lines = f.readlines(512) # we really don't need the whole file
f.close()
# delete comment lines first
newlines = []
for line in lines:
# newline means len(line) will be at least 1
if len(line) > 1 and not self.commentexp.search(line[:-1]):
newlines.append(line)
lines = newlines
# now see if we have only a .so line to replace
# only replace .so with symlink if the file exists
# in order to deal with searchpaths
if len(lines) == 1:
line = lines[0]
# remove newline and other trailing whitespace if it exists
line = line.rstrip()
match = self.soexp.search(line)
if match:
matchlist = match.group(1).split('/')
l = len(matchlist)
if l == 1 or matchlist[l-2] == section:
# no directory specified, or in the same
# directory:
targetpath = os.sep.join((dirname, matchlist[l-1]))
if (os.path.exists(targetpath) and
os.path.isfile(targetpath)):
self.info('replacing %s (%s) with symlink %s',
name, match.group(0),
os.path.basename(match.group(1)))
os.remove(path)
os.symlink(os.path.basename(match.group(1)),
path)
else:
# either the canonical .so manN/foo.N or an
# absolute path /usr/share/man/manN/foo.N
# .so is relative to %(mandir)s and the other
# man page is in a different dir, so add ../
target = "../%s/%s" %(matchlist[l-2],
matchlist[l-1])
targetpath = os.sep.join((dirname, target))
if os.path.exists(targetpath):
self.info('replacing %s (%s) with symlink %s',
name, match.group(0), target)
os.remove(path)
os.symlink(target, path)
def _compress(self, dirname, names):
for name in names:
path = dirname + os.sep + name
if util.isregular(path):
if not self.gzip:
self.gzip = self._findProg('gzip')
util.execute('gzip -f -n -9 ' + dirname + os.sep + name)
try:
self.recipe.recordMove(dirname + os.sep + name,
dirname + os.sep + name + '.gz')
except AttributeError:
pass
def _gzsymlink(self, dirname, names):
for name in names:
path = dirname + os.sep + name
if os.path.islink(path):
# change symlinks to .gz -> .gz
contents = os.readlink(path)
os.remove(path)
if not contents.endswith('.gz'):
contents = contents + '.gz'
if not path.endswith('.gz'):
path = path + '.gz'
os.symlink(util.normpath(contents), path)
def __init__(self, *args, **keywords):
policy.DestdirPolicy.__init__(self, *args, **keywords)
self.soexp = re.compile(r'^\.so (.*\...*)$')
self.commentexp = re.compile(r'^\.\\"')
self.db = None
self.gzip = None
self.gunzip = None
self.bunzip = None
def test(self):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe.getType() == recipe.RECIPE_TYPE_CAPSULE:
return False
return True
def do(self):
for manpath in sorted(list(set((
self.macros.mandir,
os.sep.join((self.macros.x11prefix, 'man')),
os.sep.join((self.macros.krbprefix, 'man')),)))
):
manpath = self.macros.destdir + manpath
self.destdir = self.macros['destdir'][1:] # without leading /
# uncompress all man pages
os.path.walk(manpath, NormalizeManPages._uncompress, self)
# remove '/?%(destdir)s' and fix modes
os.path.walk(manpath, NormalizeManPages._touchup, self)
# .so foo.n becomes a symlink to foo.n
os.path.walk(manpath, NormalizeManPages._sosymlink, self)
# recompress all man pages
os.path.walk(manpath, NormalizeManPages._compress, self)
# change all symlinks to point to .gz (if they don't already)
os.path.walk(manpath, NormalizeManPages._gzsymlink, self)
class NormalizeInfoPages(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeInfoPages()}} - Compress files with maximum compression
SYNOPSIS
========
C{r.NormalizeInfoPages([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeInfoPages()} policy properly compresses info files,
and removes the info directory file.
EXAMPLES
========
The only recipe invocation possible for C{r.NormalizeInfoPages} is
C{r.NormalizeInfoPages(exceptions='%(infodir)s/dir')} in the recipe that
should own the info directory file (normally texinfo).
"""
requires = (
('ReadableDocs', policy.CONDITIONAL_SUBSEQUENT),
)
def test(self):
# Not safe for derived packages in this form, needs explicit checks
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe.getType() == recipe.RECIPE_TYPE_CAPSULE:
return False
return True
def do(self):
dir = self.macros['infodir']+'/dir'
fsdir = self.macros['destdir']+dir
if os.path.exists(fsdir):
if not self.policyException(dir):
util.remove(fsdir)
if os.path.isdir('%(destdir)s/%(infodir)s' %self.macros):
infofilespath = '%(destdir)s/%(infodir)s' %self.macros
infofiles = os.listdir(infofilespath)
for file in infofiles:
self._moveToInfoRoot(file)
infofiles = os.listdir(infofilespath)
for file in infofiles:
self._processInfoFile(file)
def __init__(self, *args, **keywords):
policy.DestdirPolicy.__init__(self, *args, **keywords)
self.db = None
self.gzip = None
self.gunzip = None
self.bunzip = None
def _findProg(self, prog):
if not self.db:
self.db = database.Database(self.recipe.cfg.root,
self.recipe.cfg.dbPath)
return _findProgPath(prog, self.db, self.recipe)
def _moveToInfoRoot(self, file):
infofilespath = '%(destdir)s/%(infodir)s' %self.macros
fullfile = util.joinPaths(infofilespath, file)
if os.path.isdir(fullfile):
for subfile in os.listdir(fullfile):
self._moveToInfoRoot(util.joinPaths(file, subfile))
shutil.rmtree(fullfile)
elif os.path.dirname(fullfile) != infofilespath:
destPath = util.joinPaths(infofilespath,
os.path.basename(fullfile))
shutil.move(fullfile, destPath)
try:
self.recipe.recordMove(fullfile, destPath)
except AttributeError:
pass
def _processInfoFile(self, file):
syspath = '%(destdir)s/%(infodir)s/' %self.macros + file
path = '%(infodir)s/' %self.macros + file
if not self.policyException(path):
m = self.recipe.magic[path]
if not m or m.name not in ('gzip', 'bzip'):
# not compressed
if not self.gzip:
self.gzip = self._findProg('gzip')
util.execute('gzip -f -n -9 %s' %syspath)
try:
self.recipe.recordMove(syspath, syspath + '.gz')
except AttributeError:
pass
del self.recipe.magic[path]
elif m.name == 'gzip' and \
(m.contents['compression'] != '9' or \
'name' in m.contents):
if not self.gzip:
self.gzip = self._findProg('gzip')
if not self.gunzip:
self.gunzip = self._findProg('gunzip')
util.execute('gunzip %s; gzip -f -n -9 %s'
%(syspath, syspath[:-3]))
# filename didn't change, so don't record it in the manifest
del self.recipe.magic[path]
elif m.name == 'bzip':
# should use gzip instead
if not self.gzip:
self.gzip = self._findProg('gzip')
if not self.bunzip:
self.bunzip = self._findProg('bunzip2')
util.execute('bunzip2 %s; gzip -f -n -9 %s'
%(syspath, syspath[:-4]))
try:
self.recipe.recordMove(syspath, syspath[:-4] + '.gz')
except AttributeError:
pass
del self.recipe.magic[path]
class NormalizeInitscriptLocation(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeInitscriptLocation()}} - Properly locates init scripts
SYNOPSIS
========
C{r.NormalizeInitscriptLocation([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeInitscriptLocation()} policy puts init scripts in their
proper location, resolving ambiguity about their proper location.
Moves all init scripts from /etc/rc.d/init.d/ to their official location.
"""
requires = (
('RelativeSymlinks', policy.CONDITIONAL_SUBSEQUENT),
('NormalizeInterpreterPaths', policy.CONDITIONAL_SUBSEQUENT),
)
processUnmodified = False
# need both of the next two lines to avoid following /etc/rc.d/init.d
# if it is a symlink
invariantsubtrees = [ '/etc/rc.d' ]
invariantinclusions = [ '/etc/rc.d/init.d/' ]
def test(self):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe.getType() == recipe.RECIPE_TYPE_CAPSULE:
return False
return self.macros['initdir'] != '/etc/rc.d/init.d'
def doFile(self, path):
basename = os.path.basename(path)
target = util.joinPaths(self.macros['initdir'], basename)
if os.path.exists(self.macros['destdir'] + os.sep + target):
raise policy.PolicyError(
"Conflicting initscripts %s and %s installed" %(
path, target))
util.mkdirChain(self.macros['destdir'] + os.sep +
self.macros['initdir'])
util.rename(self.macros['destdir'] + path,
self.macros['destdir'] + target)
try:
self.recipe.recordMove(self.macros['destdir'] + path,
self.macros['destdir'] + target)
except AttributeError:
pass
class NormalizeInitscriptContents(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeInitscriptContents()}} - Fixes common errors within init scripts
SYNOPSIS
========
C{r.NormalizeInitscriptContents([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeInitscriptContents()} policy fixes common errors within
init scripts, and adds some dependencies if needed.
EXAMPLES
========
C{r.NormalizeInitscriptContents(exceptions='%(initdir)s/foo')}
Use this in the unprecedented case that C{r.NormalizeInitscriptContents}
damages an init script.
"""
requires = (
# for invariantsubtree to be sufficient
('NormalizeInitscriptLocation', policy.REQUIRED_PRIOR),
('RelativeSymlinks', policy.REQUIRED_PRIOR),
# for adding requirements
('Requires', policy.REQUIRED_SUBSEQUENT),
)
processUnmodified = False
invariantsubtrees = [ '%(initdir)s' ]
invariantinclusions = [ ('.*', 0400, stat.S_IFDIR), ]
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
m = self.recipe.macros
fullpath = '/'.join((m.destdir, path))
if os.path.islink(fullpath):
linkpath = os.readlink(fullpath)
if m.destdir not in linkpath:
# RelativeSymlinks has already run. linkpath is relative to
# fullpath
newpath = util.joinPaths(os.path.dirname(fullpath), linkpath)
if os.path.exists(newpath):
fullpath = newpath
else:
# If the target of an init script is not present, don't
# error, DanglingSymlinks will address this situation.
self.warn('%s is a symlink to %s, which does not exist.' % \
(path, linkpath))
return
contents = file(fullpath).read()
modified = False
if ('/etc/rc.d/init.d' != m.initdir and
'/etc/rc.d/init.d' in contents):
contents = contents.replace('/etc/rc.d/init.d', m.initdir)
modified = True
elif ('/etc/init.d' != m.initdir and
'/etc/init.d' in contents):
contents = contents.replace('/etc/init.d', m.initdir)
modified = True
if '%(initdir)s/functions' %m in contents:
self.recipe.Requires('file: %(initdir)s/functions',
util.literalRegex(path))
if modified:
file(fullpath, 'w').write(contents)
class NormalizeAppDefaults(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeAppDefaults()}} - Locate X application defaults files
SYNOPSIS
========
C{r.NormalizeAppDefaults([I{filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeAppDefaults()} policy locates X application defaults
files.
No exceptions to this policy are honored.
"""
def test(self):
# not safe in this form for derived packages
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe.getType() == recipe.RECIPE_TYPE_CAPSULE:
return False
return True
def do(self):
e = '%(destdir)s/%(sysconfdir)s/X11/app-defaults' % self.macros
if not os.path.isdir(e):
return
x = '%(destdir)s/%(x11prefix)s/lib/X11/app-defaults' % self.macros
self.warn('app-default files misplaced in'
' %(sysconfdir)s/X11/app-defaults' % self.macros)
if os.path.islink(x):
util.remove(x)
util.mkdirChain(x)
for file in os.listdir(e):
util.rename(util.joinPaths(e, file),
util.joinPaths(x, file))
try:
self.recipe.recordMove(util.joinPaths(e, file),
util.joinPaths(x, file))
except AttributeError:
pass
class NormalizeInterpreterPaths(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeInterpreterPaths()}} - Rewrites interpreter paths in
scripts
SYNOPSIS
========
C{r.NormalizeInterpreterPaths([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeInterpreterPaths()} policy re-writes the paths, in
particular changing indirect calls through env to direct calls.
Exceptions to this policy should only be made when they are part of the
explicit calling convention of a script where the location of the final
interpreter depend on the user's C{PATH}.
EXAMPLES
========
C{r.NormalizeInterpreterPaths(exceptions=".*")}
Do not modify any interpreter paths for this package. Not
generally recommended.
"""
processUnmodified = False
invariantexceptions = [ '%(thisdocdir.literalRegex)s/', ]
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
destdir = self.recipe.macros.destdir
d = util.joinPaths(destdir, path)
mode = os.lstat(d)[stat.ST_MODE]
if not mode & 0111:
# we care about interpreter paths only in executable scripts
return
m = self.recipe.magic[path]
if m and m.name == 'script':
if self._correctInterp(m, path):
del self.recipe.magic[path]
m = self.recipe.magic[path]
if self._correctEnv(m, path):
del self.recipe.magic[path]
def _correctInterp(self, m, path):
destdir = self.recipe.macros.destdir
d = util.joinPaths(destdir, path)
interp = m.contents['interpreter']
interpBase = os.path.basename(interp)
found = False
if not os.path.exists('/'.join((destdir, interp))) and not os.path.exists(interp):
#try tro remove 'local' part
if '/local/' in interp:
normalized = interp.replace('/local', '')
if os.path.exists('/'.join((destdir, normalized))) or os.path.exists(normalized):
found = True
if not found:
cadidates = (
self.recipe.macros.bindir,
self.recipe.macros.sbindir,
self.recipe.macros.essentialbindir,
self.recipe.macros.essentialsbindir,
)
for i in cadidates:
if os.path.exists('/'.join((destdir, i, interpBase))):
normalized = util.joinPaths(i, interpBase)
found = True
break
if not found:
#try to find in '/bin', '/sbin', '/usr/bin', '/usr/sbin'
for i in '/usr/bin', '/bin', '/usr/sbin', '/sbin':
normalized = '/'.join((i, interpBase))
if os.path.exists(normalized):
found = True
break
if not found:
self.warn('The interpreter path %s in %s does not exist!', interp, path)
if found:
line = m.contents['line']
normalized = line.replace(interp, normalized)
self._changeInterpLine(d, '#!' + normalized + '\n')
self.info('changing %s to %s in %s',
line, normalized, path)
return found
def _correctEnv(self, m, path):
destdir = self.recipe.macros.destdir
d = util.joinPaths(destdir, path)
interp = m.contents['interpreter']
if interp.find('/bin/env') != -1: #finds /usr/bin/env too...
line = m.contents['line']
# rewrite to not have env
wordlist = [ x for x in line.split() ]
if len(wordlist) == 1:
self.error("Interpreter is not given for %s in %s", wordlist[0], path)
return
wordlist.pop(0) # get rid of env
# first look in package
fullintpath = util.checkPath(wordlist[0], root=destdir)
if fullintpath == None:
# then look on installed system
fullintpath = util.checkPath(wordlist[0])
if fullintpath == None:
self.error("Interpreter %s for file %s not found, could not convert from /usr/bin/env syntax", wordlist[0], path)
return False
wordlist[0] = fullintpath
self._changeInterpLine(d, '#!'+" ".join(wordlist)+'\n')
self.info('changing %s to %s in %s',
line, " ".join(wordlist), path)
return True
return False
def _changeInterpLine(self, path, newline):
mode = os.lstat(path)[stat.ST_MODE]
# we need to be able to write the file
os.chmod(path, mode | 0600)
f = file(path, 'r+')
l = f.readlines()
l[0] = newline
f.seek(0)
f.truncate(0)# we may have shrunk the file, avoid garbage
f.writelines(l)
f.close()
# revert any change to mode
os.chmod(path, mode)
class NormalizePamConfig(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizePamConfig()}} - Adjust PAM configuration files
SYNOPSIS
========
C{r.NormalizePamConfig([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizePamConfig()} policy adjusts PAM configuration files, and
remove references to older module paths such as: C{/lib/security/$ISA} as
there is no need for such paths in modern PAM libraries.
Exceptions to this policy should never be required.
"""
processUnmodified = False
invariantsubtrees = [
'%(sysconfdir)s/pam.d/',
]
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
d = util.joinPaths(self.recipe.macros.destdir, path)
mode = os.lstat(d)[stat.ST_MODE]
if stat.S_ISLNK(mode):
# we'll process whatever this is pointing to whenever we
# get there.
return
if not (mode & 0200):
os.chmod(d, mode | 0200)
f = file(d, 'r+')
l = f.readlines()
l = [x.replace('/lib/security/$ISA/', '') for x in l]
stackRe = re.compile('(.*)required.*pam_stack.so.*service=(.*)')
def removeStack(line):
m = stackRe.match(line)
if m:
return '%s include %s\n'%(m.group(1), m.group(2))
return line
l = [removeStack(x) for x in l]
f.seek(0)
f.truncate(0) # we may have shrunk the file, avoid garbage
f.writelines(l)
f.close()
os.chmod(d, mode)
class NormalizePythonInterpreterVersion(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizePythonInterpreterVersion()}} - Provides version-specific path to python interpreter in python program files
SYNOPSIS
========
C{r.NormalizePythonInterpreterVersion([I{filterexp}], [I{exceptions=filterexp}i], [I{versionMap=((from, to), ...)}])}
DESCRIPTION
===========
The C{r.NormalizePythonInterpreterVersion()} policy ensures that
python script files have a version-specific path to the
interpreter if possible.
KEYWORDS
========
B{versionMap} : Specify mappings of interpreter version changes
to make for python scripts.
EXAMPLES
========
C{r.NormalizePythonInterpreterVersion(versionMap=(
('%(bindir)s/python', '%(bindir)s/python2.5'),
('%(bindir)s/python25', '%(bindir)s/python2.5')
))}
Specify that any scripts with an interpreter of C{/usr/bin/python}
or C{/usr/bin/python25} should be changed to C{/usr/bin/python2.5}.
"""
requires = (
('NormalizeInterpreterPaths', policy.CONDITIONAL_PRIOR),
)
keywords = {'versionMap': {}}
processUnmodified = False
def updateArgs(self, *args, **keywords):
if 'versionMap' in keywords:
versionMap = keywords.pop('versionMap')
if type(versionMap) in (list, tuple):
versionMap = dict(versionMap)
self.versionMap.update(versionMap)
policy.DestdirPolicy.updateArgs(self, *args, **keywords)
def preProcess(self):
self.interpreterRe = re.compile(".*python[-0-9.]+$")
self.interpMap = {}
versionMap = {}
for item in self.versionMap.items():
versionMap[item[0]%self.macros] = item[1]%self.macros
self.versionMap = versionMap
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
destdir = self.recipe.macros.destdir
d = util.joinPaths(destdir, path)
mode = os.lstat(d)[stat.ST_MODE]
m = self.recipe.magic[path]
if m and m.name == 'script':
interp = m.contents['interpreter']
if '/python' not in interp:
# we handle only python scripts here
return
if interp in self.versionMap.keys():
normalized = self.versionMap[interp]
elif not self._isNormalizedInterpreter(interp):
# normalization
if self.interpMap.has_key(interp):
normalized = self.interpMap[interp]
else:
normalized = self._normalize(interp)
if normalized:
self.interpMap[interp] = normalized
else:
self.warn('No version-specific python interpreter '
'found for %s in %s', interp, path)
return
else:
return
# we need to be able to write the file
os.chmod(d, mode | 0600)
f = file(d, 'r+')
l = f.readlines()
l[0] = l[0].replace(interp, normalized)
# we may have shrunk the file, avoid garbage
f.seek(0)
f.truncate(0)
f.writelines(l)
f.close()
# revert any change to mode
os.chmod(d, mode)
self.info('changed %s to %s in %s', interp, normalized, path)
del self.recipe.magic[path]
def _isNormalizedInterpreter(self, interp):
return os.path.basename(interp).startswith('python') and self.interpreterRe.match(interp)
def _normalize(self, interp):
dir = self.recipe.macros.destdir
interpFull = '/'.join((dir, interp))
interpFullBase = os.path.basename(interpFull)
interpFullDir = os.path.dirname(interpFull)
interpDir = os.path.dirname(interp)
links = []
if os.path.exists(interpFull):
for i in os.listdir(interpFullDir):
if os.path.samefile(interpFull, '/'.join((interpFullDir, i))):
links += [i]
path = sorted(links, key=len, reverse=True)
if path and self._isNormalizedInterpreter('/'.join((interpFullDir, path[0]))):
return os.path.join(interpDir, path[0])
links = []
for i in os.listdir(interpFullDir):
try:
if filecmp.cmp(interpFull, '/'.join((interpFullDir, i))):
links += [i]
except IOError:
# this is a fallback for a bad install anyway, so
# a failure here is both unusual and not important
pass
path = sorted(links, key=len, reverse=True)
if path and self._isNormalizedInterpreter('/'.join((interpFullDir, path[0]))):
return os.path.join(interpDir, path[0])
else:
db = database.Database('/', self.recipe.cfg.dbPath)
pythonTroveList = db.iterTrovesByPath(interp)
for trove in pythonTroveList:
pathList = [x[1] for x in trove.iterFileList()]
links += [x for x in pathList if x.startswith(interp)]
path = sorted(links, key=len, reverse=True)
if path and self._isNormalizedInterpreter(path[0]):
return path[0]
return None
class NormalizePythonEggs(policy.DestdirPolicy):
invariantinclusions = [
('.*/python[^/]*/site-packages/.*\.egg', stat.S_IFREG),
]
requires = (
('RemoveNonPackageFiles', policy.CONDITIONAL_PRIOR),
)
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
dir = self.recipe.macros.destdir
fullPath = util.joinPaths(dir, path)
m = magic.magic(fullPath)
if not (m and m.name == 'ZIP'):
# if it's not a zip, we can't unpack it, PythonEggs will raise
# an error on this path
return
tmpPath = tempfile.mkdtemp(dir = self.recipe.macros.builddir)
util.execute("unzip -q -o -d '%s' '%s'" % (tmpPath, fullPath))
self._addActionPathBuildRequires(['unzip'])
os.unlink(fullPath)
shutil.move(tmpPath, fullPath)
# Note: NormalizeLibrarySymlinks is in libraries.py
|
apache-2.0
| -4,332,780,223,955,068,400 | 35.184834 | 129 | 0.540982 | false | 4.155781 | false | false | false |
hungpham2511/toppra
|
toppra/solverwrapper/cvxpy_solverwrapper.py
|
1
|
5009
|
from .solverwrapper import SolverWrapper
import logging
import numpy as np
from ..constraint import ConstraintType
from ..constants import CVXPY_MAXX, CVXPY_MAXU
logger = logging.getLogger(__name__)
try:
import cvxpy
FOUND_CVXPY = True
except ImportError:
logger.info("CVXPY installation not found.")
FOUND_CVXPY = False
try:
import mosek
FOUND_MOSEK = True
except ImportError:
logger.info("Mosek installation not found!")
FOUND_MOSEK = False
class cvxpyWrapper(SolverWrapper):
"""A solver wrapper using `cvxpy`.
NOTE: the two constants CVXPY_MAXX and CVXPY_MAXU is used to
guarantee that the solution is not too large, in which case cvxpy
can't handle very well.
`cvxpyWrapper` should not be used in production due to robustness
issue.
Parameters
----------
constraint_list: list of :class:`.Constraint`
The constraints the robot is subjected to.
path: :class:`.Interpolator`
The geometric path.
path_discretization: array
The discretized path positions.
"""
def __init__(self, constraint_list, path, path_discretization):
super(cvxpyWrapper, self).__init__(constraint_list, path, path_discretization)
valid_types = [ConstraintType.CanonicalLinear, ConstraintType.CanonicalConic]
# Currently only support Canonical Linear Constraint
for constraint in constraint_list:
if constraint.get_constraint_type() not in valid_types:
raise NotImplementedError
def solve_stagewise_optim(self, i, H, g, x_min, x_max, x_next_min, x_next_max):
assert i <= self.N and 0 <= i
ux = cvxpy.Variable(2)
u = ux[0]
x = ux[1]
cvxpy_constraints = [-CVXPY_MAXU <= u, u <= CVXPY_MAXU, 0 <= x, x <= CVXPY_MAXX]
if not np.isnan(x_min):
cvxpy_constraints.append(x_min <= x)
if not np.isnan(x_max):
cvxpy_constraints.append(x <= x_max)
if i < self.N:
delta = self.get_deltas()[i]
if not np.isnan(x_next_min):
cvxpy_constraints.append(x_next_min <= x + 2 * delta * u)
if not np.isnan(x_next_max):
cvxpy_constraints.append(x + 2 * delta * u <= x_next_max)
for k, constraint in enumerate(self.constraints):
if constraint.get_constraint_type() == ConstraintType.CanonicalLinear:
a, b, c, F, h, ubound, xbound = self.params[k]
if a is not None:
v = a[i] * u + b[i] * x + c[i]
if constraint.identical:
cvxpy_constraints.append(F * v <= h)
else:
cvxpy_constraints.append(F[i] * v <= h[i])
# ecos (via cvxpy in this class) is very bad at
# handling badly scaled problems. Problems with very
# large bound. The below max(), min() operators is a
# workaround to get pass this issue.
if ubound is not None:
cvxpy_constraints.append(max(-CVXPY_MAXU, ubound[i, 0]) <= u)
cvxpy_constraints.append(u <= min(CVXPY_MAXU, ubound[i, 1]))
if xbound is not None:
cvxpy_constraints.append(xbound[i, 0] <= x)
cvxpy_constraints.append(x <= min(CVXPY_MAXX, xbound[i, 1]))
elif constraint.get_constraint_type() == ConstraintType.CanonicalConic:
a, b, c, P, ubound, xbound = self.params[k]
if a is not None:
d = a.shape[1]
for j in range(d):
cvxpy_constraints.append(
a[i, j] * u
+ b[i, j] * x
+ c[i, j]
+ cvxpy.norm(P[i, j].T[:, :2] * ux + P[i, j].T[:, 2])
<= 0
)
if ubound is not None:
cvxpy_constraints.append(max(-CVXPY_MAXU, ubound[i, 0]) <= u)
cvxpy_constraints.append(u <= min(CVXPY_MAXU, ubound[i, 1]))
if xbound is not None:
cvxpy_constraints.append(xbound[i, 0] <= x)
cvxpy_constraints.append(x <= min(CVXPY_MAXX, xbound[i, 1]))
if H is None:
H = np.zeros((self.get_no_vars(), self.get_no_vars()))
objective = cvxpy.Minimize(0.5 * cvxpy.quad_form(ux, H) + g * ux)
problem = cvxpy.Problem(objective, constraints=cvxpy_constraints)
try:
problem.solve(verbose=False)
except cvxpy.SolverError:
# solve fail
pass
if (
problem.status == cvxpy.OPTIMAL
or problem.status == cvxpy.OPTIMAL_INACCURATE
):
return np.array(ux.value).flatten()
else:
res = np.empty(self.get_no_vars())
res[:] = np.nan
return res
|
mit
| 2,394,813,359,082,985,000 | 35.562044 | 88 | 0.539429 | false | 3.640262 | false | false | false |
sealevelresearch/tide-wrangler
|
tide_wrangler/parsers/task_2000.py
|
1
|
1695
|
#!/usr/bin/env python
import pytz
import datetime
from .row import Row
__all__ = ['get_parser']
class Parser():
def __init__(self, f):
self._fobj = f
def get(self):
return generate_rows(self._fobj)
def get_parser(fobj, options):
return Parser(fobj)
def generate_rows(f):
for line in f.readlines()[20:]:
line = line.strip(' \n\r')
if len(line) == 0:
continue
(_, ignore, year, day_365, hour_decimal, height_cm,
_, _, _, _) = line.split()
if int(ignore) != 0:
continue
when = make_datetime(int(year), int(day_365), float(hour_decimal))
height_m = float(height_cm) / 100
yield Row(when, observed_sea_level=height_m)
def make_datetime(year, day_365, hour_decimal):
return make_day_datetime(year, day_365) + make_timedelta(hour_decimal)
def make_day_datetime(year, days_365):
"""
January 1st is represented by 2013, 1
Febuary 1st is represented by 2013, 32
>>> make_day_datetime(2013, 1)
datetime.datetime(2013, 1, 1, 0, 0, tzinfo=<UTC>)
>>> make_day_datetime(2013, 32)
datetime.datetime(2013, 2, 1, 0, 0, tzinfo=<UTC>)
"""
return (datetime.datetime(year, 1, 1, tzinfo=pytz.UTC) +
datetime.timedelta(days=days_365 - 1))
def make_timedelta(hour_decimal):
"""
>>> make_timedelta(0.016)
datetime.timedelta(0, 60)
"""
delta = datetime.timedelta(hours=hour_decimal)
return datetime.timedelta(seconds=my_round(delta.total_seconds(), 60))
def my_round(x, base):
"""
>>> my_round(59, 60)
60
>>> my_round(61, 60)
60
"""
return int(base * round(float(x) / base))
|
mit
| 8,514,717,235,296,300,000 | 21.905405 | 74 | 0.589381 | false | 3.27853 | false | false | false |
ati-ozgur/KDD99ReviewArticle
|
HelperCodes/create_table_JournalAndArticleCounts.py
|
1
|
1930
|
import ReviewHelper
import pandas as pd
df = ReviewHelper.get_pandas_data_frame_created_from_bibtex_file()
#df_journal = df.groupby('journal')["ID"]
dfJournalList = df.groupby(['journal'])['ID'].count().order(ascending=False)
isOdd = (dfJournalList.size % 2 == 1)
if (isOdd):
table_row_length = dfJournalList.size / 2 +1
else:
table_row_length = dfJournalList.size / 2
table_content_inside=""
for index in range(table_row_length):
journal_name_1column = dfJournalList.index[index]
journal_count_1column = dfJournalList[index]
second_column_index = index + table_row_length
if(second_column_index < dfJournalList.size):
journal_name_2column = dfJournalList.index[second_column_index]
journal_count_2column = dfJournalList[second_column_index]
else:
journal_name_2column = ""
journal_count_2column = ""
line = "{journal_name_1column} & {journal_count_1column} & {journal_name_2column} & {journal_count_2column} \\\\ \n".format(
journal_name_1column = journal_name_1column
,journal_count_1column = journal_count_1column
,journal_name_2column = journal_name_2column
,journal_count_2column = journal_count_2column
)
table_content_inside = table_content_inside + line
table_content_start = """
\\begin{table*}[!ht]
\\caption{ \\textbf{Journals and Article Counts} }
\\label{table-JournalAndArticleCounts}
\\centering
\\begin{adjustbox}{max width=\\textwidth}
\\normalsize
\\begin{tabular}{llll}
\\toprule
Journal Name & Article Count & Journal Name & Article Count \\\\
\\midrule
"""
table_content_end = """
\\bottomrule
\\end{tabular}
\\end{adjustbox}
\\end{table*}
"""
table_content_full = table_content_start + table_content_inside + table_content_end
filename = "../latex/table-JournalAndArticleCounts.tex"
target = open(filename, 'w')
target.write(table_content_full)
target.close()
|
mit
| 6,558,627,946,973,043,000 | 25.081081 | 130 | 0.688601 | false | 3.216667 | false | false | false |
PokeHunterProject/pogom-linux
|
pogom/pgoapi/__init__.py
|
1
|
2430
|
"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
# from __future__ import absolute_import
from .exceptions import PleaseInstallProtobufVersion3
import pkg_resources
import logging
__title__ = 'pgoapi'
__version__ = '1.1.7'
__author__ = 'tjado'
__license__ = 'MIT License'
__copyright__ = 'Copyright (c) 2016 tjado <https://github.com/tejado>'
__patchedBy__ = 'Patched for 0.45.0 by the PokeHunter Project <https://github.com/PokeHunterProject>'
protobuf_exist = False
protobuf_version = "0"
try:
protobuf_version = pkg_resources.get_distribution("protobuf").version
protobuf_exist = True
except:
pass
if (not protobuf_exist) or (int(protobuf_version[:1]) < 3):
print int(protobuf_version[:1])
raise PleaseInstallProtobufVersion3()
from .pgoapi import PGoApi
from .rpc_api import RpcApi
from .auth import Auth
logging.getLogger("pgoapi").addHandler(logging.NullHandler())
logging.getLogger("rpc_api").addHandler(logging.NullHandler())
logging.getLogger("utilities").addHandler(logging.NullHandler())
logging.getLogger("auth").addHandler(logging.NullHandler())
logging.getLogger("auth_ptc").addHandler(logging.NullHandler())
logging.getLogger("auth_google").addHandler(logging.NullHandler())
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
|
mit
| -520,971,139,557,488,900 | 35.268657 | 101 | 0.767078 | false | 3.814757 | false | false | false |
shreyasp/erpnext
|
erpnext/hr/doctype/salary_slip/salary_slip.py
|
1
|
14795
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, cint, cstr, flt, getdate, nowdate, rounded, date_diff, money_in_words
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.accounts.utils import get_fiscal_year
from erpnext.setup.utils import get_company_currency
from erpnext.hr.utils import set_employee_name
from erpnext.hr.doctype.process_payroll.process_payroll import get_month_details
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.utilities.transaction_base import TransactionBase
class SalarySlip(TransactionBase):
def autoname(self):
self.name = make_autoname('Sal Slip/' +self.employee + '/.#####')
def validate(self):
self.status = self.get_status()
self.validate_dates()
self.check_existing()
self.set_month_dates()
if not (len(self.get("earnings")) or len(self.get("deductions"))):
# get details from salary structure
self.get_emp_and_leave_details()
else:
self.get_leave_details(lwp = self.leave_without_pay)
# if self.salary_slip_based_on_timesheet or not self.net_pay:
self.calculate_net_pay()
company_currency = get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
if frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet"):
max_working_hours = frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet")
if self.salary_slip_based_on_timesheet and (self.total_working_hours > int(max_working_hours)):
frappe.msgprint(_("Total working hours should not be greater than max working hours {0}").
format(max_working_hours), alert=True)
def validate_dates(self):
if date_diff(self.end_date, self.start_date) < 0:
frappe.throw(_("To date cannot be before From date"))
def calculate_component_amounts(self):
if not getattr(self, '_salary_structure_doc', None):
self._salary_structure_doc = frappe.get_doc('Salary Structure', self.salary_structure)
data = self.get_data_for_eval()
for key in ('earnings', 'deductions'):
for struct_row in self._salary_structure_doc.get(key):
amount = self.eval_condition_and_formula(struct_row, data)
if amount:
self.update_component_row(struct_row, amount, key)
def update_component_row(self, struct_row, amount, key):
component_row = None
for d in self.get(key):
if d.salary_component == struct_row.salary_component:
component_row = d
if not component_row:
self.append(key, {
'amount': amount,
'default_amount': amount,
'depends_on_lwp' : struct_row.depends_on_lwp,
'salary_component' : struct_row.salary_component
})
else:
component_row.amount = amount
def eval_condition_and_formula(self, d, data):
try:
if d.condition:
if not eval(d.condition, None, data):
return None
amount = d.amount
if d.amount_based_on_formula:
if d.formula:
amount = eval(d.formula, None, data)
data[d.abbr] = amount
return amount
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in formula or condition: {0}".format(err)))
except:
frappe.throw(_("Error in formula or condition"))
raise
def get_data_for_eval(self):
'''Returns data for evaluating formula'''
data = frappe._dict()
for d in self._salary_structure_doc.employees:
if d.employee == self.employee:
data.base, data.variable = d.base, d.variable
data.update(frappe.get_doc("Employee", self.employee).as_dict())
data.update(self.as_dict())
# set values for components
salary_components = frappe.get_all("Salary Component", fields=["salary_component_abbr"])
for salary_component in salary_components:
data[salary_component.salary_component_abbr] = 0
return data
def get_emp_and_leave_details(self):
'''First time, load all the components from salary structure'''
if self.employee:
self.set("earnings", [])
self.set("deductions", [])
self.set_month_dates()
self.validate_dates()
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
self.get_leave_details(joining_date, relieving_date)
struct = self.check_sal_struct(joining_date, relieving_date)
if struct:
self._salary_structure_doc = frappe.get_doc('Salary Structure', struct)
self.salary_slip_based_on_timesheet = self._salary_structure_doc.salary_slip_based_on_timesheet or 0
self.set_time_sheet()
self.pull_sal_struct()
def set_time_sheet(self):
if self.salary_slip_based_on_timesheet:
self.set("timesheets", [])
timesheets = frappe.db.sql(""" select * from `tabTimesheet` where employee = %(employee)s and start_date BETWEEN %(start_date)s AND %(end_date)s and (status = 'Submitted' or
status = 'Billed')""", {'employee': self.employee, 'start_date': self.start_date, 'end_date': self.end_date}, as_dict=1)
for data in timesheets:
self.append('timesheets', {
'time_sheet': data.name,
'working_hours': data.total_hours
})
def set_month_dates(self):
if self.month and not self.salary_slip_based_on_timesheet:
m = get_month_details(self.fiscal_year, self.month)
self.start_date = m['month_start_date']
self.end_date = m['month_end_date']
def check_sal_struct(self, joining_date, relieving_date):
st_name = frappe.db.sql("""select parent from `tabSalary Structure Employee`
where employee=%s
and parent in (select name from `tabSalary Structure`
where is_active = 'Yes'
and (from_date <= %s or from_date <= %s)
and (to_date is null or to_date >= %s or to_date >= %s))
""",(self.employee, self.start_date, joining_date, self.end_date, relieving_date))
if st_name:
if len(st_name) > 1:
frappe.msgprint(_("Multiple active Salary Structures found for employee {0} for the given dates")
.format(self.employee), title=_('Warning'))
return st_name and st_name[0][0] or ''
else:
self.salary_structure = None
frappe.throw(_("No active or default Salary Structure found for employee {0} for the given dates")
.format(self.employee), title=_('Salary Structure Missing'))
def pull_sal_struct(self):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
make_salary_slip(self._salary_structure_doc.name, self)
if self.salary_slip_based_on_timesheet:
self.salary_structure = self._salary_structure_doc.name
self.hour_rate = self._salary_structure_doc.hour_rate
self.total_working_hours = sum([d.working_hours or 0.0 for d in self.timesheets]) or 0.0
self.add_earning_for_hourly_wages(self._salary_structure_doc.salary_component)
def process_salary_structure(self):
'''Calculate salary after salary structure details have been updated'''
self.pull_emp_details()
self.get_leave_details()
self.calculate_net_pay()
def add_earning_for_hourly_wages(self, salary_component):
default_type = False
for data in self.earnings:
if data.salary_component == salary_component:
data.amount = self.hour_rate * self.total_working_hours
default_type = True
break
if not default_type:
earnings = self.append('earnings', {})
earnings.salary_component = salary_component
earnings.amount = self.hour_rate * self.total_working_hours
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee, ["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def get_leave_details(self, joining_date=None, relieving_date=None, lwp=None):
if not self.fiscal_year:
# if default fiscal year is not set, get from nowdate
self.fiscal_year = get_fiscal_year(nowdate())[0]
if not self.month:
self.month = "%02d" % getdate(nowdate()).month
self.set_month_dates()
if not joining_date:
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
holidays = self.get_holidays_for_employee(self.start_date, self.end_date)
working_days = date_diff(self.end_date, self.start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
working_days -= len(holidays)
if working_days < 0:
frappe.throw(_("There are more holidays than working days this month."))
if not lwp:
lwp = self.calculate_lwp(holidays, working_days)
self.total_days_in_month = working_days
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(joining_date, relieving_date)) - flt(lwp)
self.payment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, joining_date, relieving_date):
start_date = getdate(self.start_date)
if joining_date:
if joining_date > getdate(self.start_date):
start_date = joining_date
elif joining_date > getdate(self.end_date):
return
end_date = getdate(self.end_date)
if relieving_date:
if relieving_date > start_date and relieving_date < getdate(self.end_date):
end_date = relieving_date
elif relieving_date < getdate(self.start_date):
frappe.throw(_("Employee relieved on {0} must be set as 'Left'")
.format(relieving_date))
payment_days = date_diff(end_date, start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
holidays = self.get_holidays_for_employee(start_date, end_date)
payment_days -= len(holidays)
return payment_days
def get_holidays_for_employee(self, start_date, end_date):
holiday_list = get_holiday_list_for_employee(self.employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
def calculate_lwp(self, holidays, working_days):
lwp = 0
holidays = "','".join(holidays)
for d in range(working_days):
dt = add_days(cstr(getdate(self.start_date)), d)
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and t2.is_lwp = 1
and t1.docstatus = 1
and t1.employee = %(employee)s
and CASE WHEN t2.include_holiday != 1 THEN %(dt)s not in ('{0}') and %(dt)s between from_date and to_date
WHEN t2.include_holiday THEN %(dt)s between from_date and to_date
END
""".format(holidays), {"employee": self.employee, "dt": dt})
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def check_existing(self):
if not self.salary_slip_based_on_timesheet:
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where month = %s and fiscal_year = %s and docstatus != 2
and employee = %s and name != %s""",
(self.month, self.fiscal_year, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this period").format(self.employee))
else:
for data in self.timesheets:
if frappe.db.get_value('Timesheet', data.time_sheet, 'status') == 'Payrolled':
frappe.throw(_("Salary Slip of employee {0} already created for time sheet {1}").format(self.employee, data.time_sheet))
def sum_components(self, component_type, total_field):
for d in self.get(component_type):
if cint(d.depends_on_lwp) == 1 and not self.salary_slip_based_on_timesheet:
d.amount = rounded((flt(d.amount) * flt(self.payment_days)
/ cint(self.total_days_in_month)), self.precision("amount", component_type))
elif not self.payment_days and not self.salary_slip_based_on_timesheet:
d.amount = 0
elif not d.amount:
d.amount = d.default_amount
self.set(total_field, self.get(total_field) + flt(d.amount))
def calculate_net_pay(self):
if self.salary_structure:
self.calculate_component_amounts()
disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None, "disable_rounded_total"))
self.gross_pay = flt(self.arrear_amount) + flt(self.leave_encashment_amount)
self.total_deduction = 0
self.sum_components('earnings', 'gross_pay')
self.sum_components('deductions', 'total_deduction')
self.net_pay = flt(self.gross_pay) - flt(self.total_deduction)
self.rounded_total = rounded(self.net_pay,
self.precision("net_pay") if disable_rounded_total else 0)
def on_submit(self):
if self.net_pay < 0:
frappe.throw(_("Net Pay cannot be less than 0"))
else:
self.set_status()
self.update_status(self.name)
if(frappe.db.get_single_value("HR Settings", "email_salary_slip_to_employee")):
self.email_salary_slip()
def on_cancel(self):
self.set_status()
self.update_status()
def email_salary_slip(self):
receiver = frappe.db.get_value("Employee", self.employee, "prefered_email")
if receiver:
subj = 'Salary Slip - from {0} to {1}, fiscal year {2}'.format(self.start_date, self.end_date, self.fiscal_year)
frappe.sendmail([receiver], subject=subj, message = _("Please see attachment"),
attachments=[frappe.attach_print(self.doctype, self.name, file_name=self.name)], reference_doctype= self.doctype, reference_name= self.name)
else:
msgprint(_("{0}: Employee email not found, hence email not sent").format(self.employee_name))
def update_status(self, salary_slip=None):
for data in self.timesheets:
if data.time_sheet:
timesheet = frappe.get_doc('Timesheet', data.time_sheet)
timesheet.salary_slip = salary_slip
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def set_status(self, status=None):
'''Get and update status'''
if not status:
status = self.get_status()
self.db_set("status", status)
def get_status(self):
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Submitted"
if self.journal_entry:
status = "Paid"
elif self.docstatus == 2:
status = "Cancelled"
return status
def unlink_ref_doc_from_salary_slip(ref_no):
linked_ss = frappe.db.sql_list("""select name from `tabSalary Slip`
where journal_entry=%s and docstatus < 2""", (ref_no))
if linked_ss:
for ss in linked_ss:
ss_doc = frappe.get_doc("Salary Slip", ss)
frappe.db.set_value("Salary Slip", ss_doc.name, "status", "Submitted")
frappe.db.set_value("Salary Slip", ss_doc.name, "journal_entry", "")
|
gpl-3.0
| -4,528,810,867,627,106,000 | 36.173367 | 176 | 0.693748 | false | 3.012625 | false | false | false |
Konovalov-Nik/storyboard
|
storyboard/projects/models.py
|
1
|
1893
|
# Copyright 2011 Thierry Carrez <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.db import models
class Project(models.Model):
name = models.CharField(max_length=50, primary_key=True)
title = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class ProjectGroup(models.Model):
name = models.CharField(max_length=50, primary_key=True)
title = models.CharField(max_length=100)
members = models.ManyToManyField(Project)
def __unicode__(self):
return self.name
class Branch(models.Model):
BRANCH_STATUS = (
('M', 'master'),
('R', 'release'),
('S', 'stable'),
('U', 'unsupported'))
name = models.CharField(max_length=50)
short_name = models.CharField(max_length=20)
status = models.CharField(max_length=1, choices=BRANCH_STATUS)
release_date = models.DateTimeField()
def __unicode__(self):
return self.name
class Meta:
ordering = ['release_date']
class Milestone(models.Model):
name = models.CharField(max_length=50)
branch = models.ForeignKey(Branch)
released = models.BooleanField(default=False)
undefined = models.BooleanField(default=False)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
|
apache-2.0
| 9,213,235,775,598,121,000 | 28.578125 | 78 | 0.673006 | false | 3.879098 | false | false | false |
OpenBfS/dokpool-plone
|
Plone/src/docpool.localbehavior/docpool/localbehavior/localbehavior.py
|
1
|
1955
|
from Acquisition import aq_inner
from docpool.localbehavior import MessageFactory as _
from plone.autoform import directives
from plone.autoform.interfaces import IFormFieldProvider
from plone.supermodel import model
from z3c.form.browser.checkbox import CheckBoxFieldWidget
from zope import schema
from zope.component import getMultiAdapter
from zope.interface import provider
from zope.interface import Interface
from zope.schema.interfaces import IContextAwareDefaultFactory
@provider(IContextAwareDefaultFactory)
def initializeLocalBehaviors(context):
dp_app_state = getMultiAdapter((context, context.REQUEST), name=u'dp_app_state')
return dp_app_state.effectiveAppsHere()
@provider(IFormFieldProvider)
class ILocalBehaviorSupport(model.Schema):
directives.widget(local_behaviors=CheckBoxFieldWidget)
local_behaviors = schema.List(
title=u'Behaviors',
description=_(
u'description_local_behaviors',
default=u'Select applications supported for this content,'
' changes will be applied after saving',
),
required=False,
defaultFactory=initializeLocalBehaviors,
value_type=schema.Choice(
title=u'Applications',
vocabulary="LocalBehaviors"),
)
class ILocalBehaviorSupporting(Interface):
"""Marker"""
class LocalBehaviorSupport(object):
def __init__(self, context):
self.context = context
def _get_local_behaviors(self):
return list(set(self.context.local_behaviors))
def _set_local_behaviors(self, value):
if isinstance(value, type([])) or (isinstance(value, type(tuple))):
value = list(set(value))
context = aq_inner(self.context)
if value is not None:
context.local_behaviors = list(set(value))
else:
context.local_behaviors = []
local_behaviors = property(_get_local_behaviors, _set_local_behaviors)
|
gpl-3.0
| -6,956,147,756,234,758,000 | 32.135593 | 84 | 0.707417 | false | 3.997955 | false | false | false |
scopatz/regolith
|
regolith/broker.py
|
1
|
2663
|
"""API for accessing the metadata and file storage"""
from regolith.database import dump_database, open_dbs
from regolith.runcontrol import DEFAULT_RC, load_rcfile, filter_databases
from regolith.storage import store_client, push
def load_db(rc_file="regolithrc.json"):
"""Create a Broker instance from an rc file"""
rc = DEFAULT_RC
rc._update(load_rcfile(rc_file))
filter_databases(rc)
return Broker(rc)
class Broker:
"""Interface to the database and file storage systems
Examples
--------
>>> # Load the db
>>> db = Broker.from_rc()
>>> # Get a docment from the broker
>>> ergs =db['group']['ergs']
>>> # Store a file
>>> db.add_file(ergs, 'myfile', '/path/to/file/hello.txt')
>>> # Get a file from the store
>>> path = db.get_file_path(ergs, 'myfile')
"""
def __init__(self, rc=DEFAULT_RC):
self.rc = rc
# TODO: Lazy load these
with store_client(rc) as sclient:
self.store = sclient
rc.client = open_dbs(rc)
self._dbs = rc.client.dbs
self.md = rc.client.chained_db
self.db_client = rc.client
def add_file(self, document, name, filepath):
"""Add a file to a document in a collection.
Parameters
----------
document : dict
The document to add the file to
name : str
Name of the reference to the file
filepath : str
Location of the file on local disk
"""
output_path = self.store.copydoc(filepath)
if "files" not in document:
document["files"] = {}
document["files"][name] = output_path
for db in self.rc.databases:
dump_database(db, self.db_client, self.rc)
push(self.store.store, self.store.path)
@classmethod
def from_rc(cls, rc_file="regolithrc.json"):
"""Return a Broker instance"""
return load_db(rc_file)
def get_file_path(self, document, name):
""" Get a file from the file storage associated with the document and
name
Parameters
----------
document : dict
The document which stores the reference to the file
name : str
The name of the file stored (note that this can be different from
the filename itself)
Returns
-------
path : str or None
The file path, if not in the storage None
"""
if "files" in document:
return self.store.retrieve(document["files"][name])
else:
return None
def __getitem__(self, item):
return self.md[item]
|
cc0-1.0
| -2,406,156,181,989,504,000 | 28.921348 | 77 | 0.575667 | false | 4.047112 | false | false | false |
christodoulos/pycompgeom
|
pycompgeom/algorithms.py
|
1
|
1904
|
from primitives import *
from predicates import *
import random
def jarvis(points):
r0 = min(points)
hull = [r0]
r = r0
while True:
u = random.choice(points)
for t in points:
if cw(r, u, t) or collinear(r, u, t) and between(r, t, u):
u = t
if u == r0: break
else:
r = u
points.remove(r)
hull.append(r)
return hull
def find_bridge(poly1, poly2, upper=True):
max1, min2 = max(poly1.vertices), min(poly2.vertices)
i, j = poly1.index(max_p1), poly2.index(min_p2)
bridge_found = False
while not bridge_found:
if upper:
if not ccw(poly1[i], poly1[i+1], poly2[j]):
i += 1; i_changed = True
else: i_changed = False
if not cw(poly2[j], poly2[j-1], poly1[i]):
j -= 1; j_changed = True
else: j_changed = False
else:
if not cw(poly1[i], poly1[i-1], poly2[j]):
i -= 1; i_changed = True
else: i_changed = False
if not ccw(poly2[j], poly2[j+1], poly1[i]):
j -= 1; j_changed = True
else: j_changed = False
bridge_found = not i_changed and not j_changed
return Segment2(poly1[i], poly2[j])
def andrew(points, return_hull=True):
upper = []
lower = []
for point in sorted(points):
while len(upper) > 1 and ccwon(upper[-2], upper[-1], point):
upper.pop()
while len(lower) > 1 and cwon(lower[-2], lower[-1], point):
lower.pop()
upper.append(point)
lower.append(point)
if return_hull:
return lower[:-1]+ [x for x in reversed(upper[1:])]
else:
return upper, lower
def andipodal_pairs(points):
U, L = andrew(points, return_hull=False)
i, j = 0, len(L)-1
while i<len(U)-1 or j>0:
yield U[i], L[j]
if i == len(U)-1: j -= 1
elif j == 0: i += 1
elif (U[i+1].y-U[i].y) * (L[j].x-L[j-1].x) > \
(L[j].y-L[j-1].y) * (U[i+1].x-U[i].x):
i += 1
else: j -= 1
def diameter(points):
dlist = [((p.x-q.x)**2+(p.y-q.y)**2,(p,q)) \
for p,q in antipodal_pairs(points)]
diam, pair = max(dlist)
return pair
|
gpl-3.0
| 9,201,103,693,256,576,000 | 24.052632 | 62 | 0.596113 | false | 2.291215 | false | false | false |
Instanssi/Instanssi.org
|
Instanssi/admin_profile/forms.py
|
1
|
3306
|
# -*- coding: utf-8 -*-
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Fieldset, ButtonHolder
from django.contrib.auth.models import User
class InformationChangeForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(InformationChangeForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset(
'',
'first_name',
'last_name',
'email',
ButtonHolder(
Submit('submit', 'Tallenna')
)
)
)
self.fields['email'].required = True
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class PasswordChangeForm(forms.Form):
old_pw = forms.CharField(
widget=forms.PasswordInput,
label='Vanha salasana',
help_text='Kirjoita vanha salasanasi turvallisuussyistä.')
new_pw = forms.CharField(
widget=forms.PasswordInput,
label='Uusi salasana',
help_text='Kirjoita uusi salasanasi. Tulee olla vähintään 8 merkkiä pitkä.')
new_pw_again = forms.CharField(
widget=forms.PasswordInput,
label='Uusi salasana uudelleen',
help_text='Kirjoita uusi salasanasi toistamiseen varmistukseksi.')
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(PasswordChangeForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset(
'',
'old_pw',
'new_pw',
'new_pw_again',
ButtonHolder(
Submit('submit', 'Tallenna')
)
)
)
def save(self):
password = self.cleaned_data['new_pw']
self.user.set_password(password)
self.user.save()
def clean_old_pw(self):
# Make sure this is valid
old = self.cleaned_data['old_pw']
if not self.user.check_password(old):
raise forms.ValidationError('Vanha salasana väärin!')
# Remember to return cleaned data
return old
def clean_new_pw(self):
pw = self.cleaned_data['new_pw']
if len(pw) < 8:
raise forms.ValidationError('Salasanan tulee olla vähintään 8 merkkiä pitkä!')
return pw
def clean_new_pw_again(self):
pw = self.cleaned_data['new_pw_again']
if len(pw) < 8:
raise forms.ValidationError('Salasanan tulee olla vähintään 8 merkkiä pitkä!')
return pw
def clean(self):
cleaned_data = super(PasswordChangeForm, self).clean()
# Make sure new pw fields match
pwa = cleaned_data.get('new_pw')
pwb = cleaned_data.get('new_pw_again')
if pwa != pwb:
msg = 'Salasana ei vastaa edelliseen kenttään annettua!'
self._errors["new_pw_again"] = self.error_class([msg])
del cleaned_data["new_pw_again"]
# Remember to return cleaned data
return cleaned_data
|
mit
| 8,177,352,354,657,648,000 | 31.86 | 90 | 0.550822 | false | 3.829837 | false | false | false |
the-it/WS_THEbotIT
|
archive/online/2016/160711_replace_citations.py
|
1
|
1373
|
# -*- coding: utf-8 -*-
__author__ = 'eso'
import sys
sys.path.append('../../')
import re
import pywikibot
def add_zeros(number, digits):
number_str = str(number)
if number < 10:
for members in range(digits-1):
number_str = "0" + number_str
elif number < 100:
for members in range(digits-2):
number_str = "0" + number_str
elif number < 1000:
for members in range(digits-3):
number_str = "0" + number_str
return number_str
wiki = pywikibot.Site()
regex = re.compile("\{\{Zitierempfehlung\|Projekt=Karl Zeumer: ''Quellensammlung zur Geschichte der Deutschen Reichsverfassung in Mittelalter und Neuzeit''\.Tübingen: Verlag von J\.C\.B\. Mohr \(Paul Siebeck\), 1913\|Seite=(\d{1,3})\}\}")
for i in range(1, 563):
print(i)
page = pywikibot.Page(wiki, 'Seite:De Zeumer V2 {}.jpg'.format(add_zeros(i, 3)))
temp_text = page.text
if regex.search(temp_text):
if int(regex.search(temp_text).group(1)) != i:
temp_text = regex.sub("{{Zitierempfehlung|Projekt=Karl Zeumer: ''Quellensammlung zur Geschichte der Deutschen Reichsverfassung in Mittelalter und Neuzeit''.Tübingen: Verlag von J.C.B. Mohr (Paul Siebeck), 1913|Seite=" + str(i) +"}}", temp_text)
page.text = temp_text
page.save(summary='Zitierempfehlung korrigiert', botflag=True)
|
mit
| 4,581,316,253,179,235,300 | 39.323529 | 256 | 0.636032 | false | 2.764113 | false | false | false |
mdauphin/pycvnode
|
pycvnode/connector.py
|
1
|
3391
|
import cv2
import numpy as np
class Connector(object):
class Direction:
OUTPUT = 1
INPUT = 2
def __init__(self,node,name,direction,type):
self.node = node
self.name = name
self.direction = direction
self.value = None
self.type = type
self.parser = ConnectorParser(self)
self.render = ConnectorRenderer(self)
def setValue(self,value):
self.value = self.parser.parse(value)
def generate(self):
return None
def evaluate(self):
raise Exception('Connector','Can not evaluate generic Connector')
class ConnectorInput(Connector):
def __init__(self,node,name,type):
self.connection = None
super( ConnectorInput, self ).__init__( node, name,
Connector.Direction.INPUT, type );
def generate(self):
if self.connection != None:
return self.connection.output_connector.generate()
if self.value != None:
if isinstance(self.value, str):
return "'%s'" % self.value
return str(self.value)
def evaluate(self):
if self.connection != None:
return self.connection.output_connector.evaluate()
elif self.value != None:
return self.value
else:
raise Exception('ConnectorInput','No connection no value to evaluate')
class ConnectorOutput(Connector):
_cpt = 0
def __init__(self,node,name,type):
self.varname = self.generate_uniq_var()
self.connections = []
super( ConnectorOutput, self ).__init__( node, name,
Connector.Direction.OUTPUT, type )
def generate_uniq_var(self):
ConnectorOutput._cpt += 1
return "var%d" % ( ConnectorOutput._cpt )
def generate(self):
return self.varname
def evaluate(self):
return self.node.evaluate()
class ConnectorParser(object):
def __init__(self,connector):
self.connector = connector
self.converter = {
'str' : self.toStr,
'int' : self.toInt,
'float' : self.toFloat,
'tuple' : self.toTuple,
}
def parse(self,value):
return self.converter[self.connector.type](value)
def toStr(self,value):
return value
def toInt(self,value):
return int(value)
def toFloat(self,value):
return foat(value)
def toTuple(self,value):
return eval(value)
class ConnectorRenderer(object):
def __init__(self,connector):
self.connector = connector
self.converter = {
'str' : self.toStr,
'int' : self.toStr,
'float' : self.toStr,
'tuple' : self.toStr,
'numpy.ndarray' : self.toImg,
}
def render(self):
return self.converter[self.connector.type](self.connector.evaluate())
def toImg(self, value ):
ret, buf = cv2.imencode( '.png', value )
return buf.tobytes()
def toStr(self,value):
return '<p>%s</p>' % value
class ConnectorJson(object):
def __init__(self,connector):
self.connector = connector;
def render(self):
#{ 'dir' : Direction.Input , 'name' : 'conIn' },
dir = 'Input'
if ( self.connector is ConnectorOutput ):
dir = 'Output'
return { 'dir': dir, 'name' : self.connector.name }
|
gpl-2.0
| 670,151,130,487,789,400 | 27.737288 | 82 | 0.576821 | false | 3.961449 | false | false | false |
openatv/enigma2
|
lib/python/Components/VfdSymbols.py
|
2
|
12566
|
# -*- coding: utf-8 -*-
from twisted.internet import threads
from config import config
from enigma import eDBoxLCD, eTimer, iPlayableService, pNavigation, iServiceInformation
import NavigationInstance
from Tools.Directories import fileExists
from Components.ParentalControl import parentalControl
from Components.ServiceEventTracker import ServiceEventTracker
from Components.SystemInfo import SystemInfo
from boxbranding import getBoxType, getMachineBuild
from time import time
import Components.RecordingConfig
POLLTIME = 5 # seconds
def SymbolsCheck(session, **kwargs):
global symbolspoller, POLLTIME
if getBoxType() in ('alien5','osninopro','osnino','osninoplus','tmtwin4k','mbmicrov2','revo4k','force3uhd','wetekplay', 'wetekplay2', 'wetekhub', 'ixussone', 'ixusszero', 'mbmicro', 'e4hd', 'e4hdhybrid', 'dm7020hd', 'dm7020hdv2', '9910lx', '9911lx', '9920lx') or getMachineBuild() in ('dags7362' , 'dags73625', 'dags5','ustym4kpro','beyonwizv2','viper4k','sf8008','sf8008m','gbmv200','cc1'):
POLLTIME = 1
symbolspoller = SymbolsCheckPoller(session)
symbolspoller.start()
class SymbolsCheckPoller:
def __init__(self, session):
self.session = session
self.blink = False
self.led = "0"
self.timer = eTimer()
self.onClose = []
self.__event_tracker = ServiceEventTracker(screen=self,eventmap=
{
iPlayableService.evUpdatedInfo: self.__evUpdatedInfo,
})
def __onClose(self):
pass
def start(self):
if self.symbolscheck not in self.timer.callback:
self.timer.callback.append(self.symbolscheck)
self.timer.startLongTimer(0)
def stop(self):
if self.symbolscheck in self.timer.callback:
self.timer.callback.remove(self.symbolscheck)
self.timer.stop()
def symbolscheck(self):
threads.deferToThread(self.JobTask)
self.timer.startLongTimer(POLLTIME)
def JobTask(self):
self.Recording()
self.PlaySymbol()
self.timer.startLongTimer(POLLTIME)
def __evUpdatedInfo(self):
self.service = self.session.nav.getCurrentService()
if getMachineBuild() == 'u41':
self.Resolution()
self.Audio()
self.Crypted()
self.Teletext()
self.Hbbtv()
self.PauseSymbol()
self.PlaySymbol()
self.PowerSymbol()
self.Timer()
self.Subtitle()
self.ParentalControl()
del self.service
def Recording(self):
if fileExists("/proc/stb/lcd/symbol_circle"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
if recordings > 0:
open("/proc/stb/lcd/symbol_circle", "w").write("3")
else:
open("/proc/stb/lcd/symbol_circle", "w").write("0")
elif getBoxType() in ('alphatriple','mixosf5', 'mixoslumi', 'mixosf7', 'gi9196m', 'sf3038') and fileExists("/proc/stb/lcd/symbol_recording"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
if recordings > 0:
open("/proc/stb/lcd/symbol_recording", "w").write("1")
else:
open("/proc/stb/lcd/symbol_recording", "w").write("0")
elif getMachineBuild() == 'u41' and fileExists("/proc/stb/lcd/symbol_pvr2"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
if recordings > 0:
open("/proc/stb/lcd/symbol_pvr2", "w").write("1")
else:
open("/proc/stb/lcd/symbol_pvr2", "w").write("0")
elif getBoxType() in ('alien5','osninopro','wetekplay', 'wetekplay2', 'wetekhub', 'ixussone', 'ixusszero', '9910lx', '9911lx', 'osnino', 'osninoplus', '9920lx') and fileExists("/proc/stb/lcd/powerled"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/lcd/powerled", "w").write("1")
self.led = "1"
else:
open("/proc/stb/lcd/powerled", "w").write("0")
self.led = "0"
elif self.led == "1":
open("/proc/stb/lcd/powerled", "w").write("0")
elif getBoxType() in ('mbmicrov2','mbmicro', 'e4hd', 'e4hdhybrid') and fileExists("/proc/stb/lcd/powerled"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/lcd/powerled", "w").write("0")
self.led = "1"
else:
open("/proc/stb/lcd/powerled", "w").write("1")
self.led = "0"
elif self.led == "1":
open("/proc/stb/lcd/powerled", "w").write("1")
elif getBoxType() in ('dm7020hd', 'dm7020hdv2') and fileExists("/proc/stb/fp/led_set"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/fp/led_set", "w").write("0x00000000")
self.led = "1"
else:
open("/proc/stb/fp/led_set", "w").write("0xffffffff")
self.led = "0"
else:
open("/proc/stb/fp/led_set", "w").write("0xffffffff")
elif getMachineBuild() in ('dags7362' , 'dags73625', 'dags5') or getBoxType() in ('tmtwin4k','revo4k','force3uhd') and fileExists("/proc/stb/lcd/symbol_rec"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/lcd/symbol_rec", "w").write("1")
self.led = "1"
else:
open("/proc/stb/lcd/symbol_rec", "w").write("0")
self.led = "0"
elif self.led == "1":
open("/proc/stb/lcd/symbol_rec", "w").write("0")
elif getMachineBuild() in ('sf8008','sf8008m','cc1','ustym4kpro','beyonwizv2','viper4k') and fileExists("/proc/stb/fp/ledpowercolor"):
import Screens.Standby
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/fp/ledpowercolor", "w").write("0")
self.led = "1"
else:
if Screens.Standby.inStandby:
open("/proc/stb/fp/ledpowercolor", "w").write(config.usage.lcd_ledstandbycolor.value)
else:
open("/proc/stb/fp/ledpowercolor", "w").write(config.usage.lcd_ledpowercolor.value)
self.led = "0"
elif self.led == "1":
if Screens.Standby.inStandby:
open("/proc/stb/fp/ledpowercolor", "w").write(config.usage.lcd_ledstandbycolor.value)
else:
open("/proc/stb/fp/ledpowercolor", "w").write(config.usage.lcd_ledpowercolor.value)
else:
if not fileExists("/proc/stb/lcd/symbol_recording") or not fileExists("/proc/stb/lcd/symbol_record_1") or not fileExists("/proc/stb/lcd/symbol_record_2"):
return
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
if recordings > 0:
open("/proc/stb/lcd/symbol_recording", "w").write("1")
if recordings == 1:
open("/proc/stb/lcd/symbol_record_1", "w").write("1")
open("/proc/stb/lcd/symbol_record_2", "w").write("0")
elif recordings >= 2:
open("/proc/stb/lcd/symbol_record_1", "w").write("1")
open("/proc/stb/lcd/symbol_record_2", "w").write("1")
else:
open("/proc/stb/lcd/symbol_recording", "w").write("0")
open("/proc/stb/lcd/symbol_record_1", "w").write("0")
open("/proc/stb/lcd/symbol_record_2", "w").write("0")
def Subtitle(self):
if not fileExists("/proc/stb/lcd/symbol_smartcard") and not fileExists("/proc/stb/lcd/symbol_subtitle"):
return
subtitle = self.service and self.service.subtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
if subtitlelist:
subtitles = len(subtitlelist)
if fileExists("/proc/stb/lcd/symbol_subtitle"):
if subtitles > 0:
f = open("/proc/stb/lcd/symbol_subtitle", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_subtitle", "w")
f.write("0")
f.close()
else:
if subtitles > 0:
f = open("/proc/stb/lcd/symbol_smartcard", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_smartcard", "w")
f.write("0")
f.close()
else:
if fileExists("/proc/stb/lcd/symbol_subtitle"):
f = open("/proc/stb/lcd/symbol_subtitle", "w")
f.write("0")
f.close()
else:
f = open("/proc/stb/lcd/symbol_smartcard", "w")
f.write("0")
f.close()
def ParentalControl(self):
if not fileExists("/proc/stb/lcd/symbol_parent_rating"):
return
service = self.session.nav.getCurrentlyPlayingServiceReference()
if service:
if parentalControl.getProtectionLevel(service.toCompareString()) == -1:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("0")
else:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("1")
else:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("0")
def PlaySymbol(self):
if not fileExists("/proc/stb/lcd/symbol_play"):
return
if SystemInfo["SeekStatePlay"]:
file = open("/proc/stb/lcd/symbol_play", "w")
file.write('1')
file.close()
else:
file = open("/proc/stb/lcd/symbol_play", "w")
file.write('0')
file.close()
def PauseSymbol(self):
if not fileExists("/proc/stb/lcd/symbol_pause"):
return
if SystemInfo["StatePlayPause"]:
file = open("/proc/stb/lcd/symbol_pause", "w")
file.write('1')
file.close()
else:
file = open("/proc/stb/lcd/symbol_pause", "w")
file.write('0')
file.close()
def PowerSymbol(self):
if not fileExists("/proc/stb/lcd/symbol_power"):
return
if SystemInfo["StandbyState"]:
file = open("/proc/stb/lcd/symbol_power", "w")
file.write('0')
file.close()
else:
file = open("/proc/stb/lcd/symbol_power", "w")
file.write('1')
file.close()
def Resolution(self):
if not fileExists("/proc/stb/lcd/symbol_hd"):
return
info = self.service and self.service.info()
if not info:
return ""
videosize = int(info.getInfo(iServiceInformation.sVideoWidth))
if videosize >= 1280:
f = open("/proc/stb/lcd/symbol_hd", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_hd", "w")
f.write("0")
f.close()
def Crypted(self):
if not fileExists("/proc/stb/lcd/symbol_scrambled"):
return
info = self.service and self.service.info()
if not info:
return ""
crypted = info.getInfo(iServiceInformation.sIsCrypted)
if crypted == 1:
f = open("/proc/stb/lcd/symbol_scrambled", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_scrambled", "w")
f.write("0")
f.close()
def Teletext(self):
if not fileExists("/proc/stb/lcd/symbol_teletext"):
return
info = self.service and self.service.info()
if not info:
return ""
tpid = int(info.getInfo(iServiceInformation.sTXTPID))
if tpid != -1:
f = open("/proc/stb/lcd/symbol_teletext", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_teletext", "w")
f.write("0")
f.close()
def Hbbtv(self):
if not fileExists("/proc/stb/lcd/symbol_epg"):
return
info = self.service and self.service.info()
if not info:
return ""
hbbtv = info.getInfoString(iServiceInformation.sHBBTVUrl)
if hbbtv != "":
f = open("/proc/stb/lcd/symbol_epg", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_epg", "w")
f.write("0")
f.close()
def Audio(self):
if not fileExists("/proc/stb/lcd/symbol_dolby_audio"):
return
audio = self.service.audioTracks()
if audio:
n = audio.getNumberOfTracks()
idx = 0
while idx < n:
i = audio.getTrackInfo(idx)
description = i.getDescription();
if "AC3" in description or "AC-3" in description or "DTS" in description:
f = open("/proc/stb/lcd/symbol_dolby_audio", "w")
f.write("1")
f.close()
return
idx += 1
f = open("/proc/stb/lcd/symbol_dolby_audio", "w")
f.write("0")
f.close()
def Timer(self):
if fileExists("/proc/stb/lcd/symbol_timer"):
timer = NavigationInstance.instance.RecordTimer.getNextRecordingTime()
if timer > 0:
open("/proc/stb/lcd/symbol_timer", "w").write("1")
else:
open("/proc/stb/lcd/symbol_timer", "w").write("0")
|
gpl-2.0
| -1,859,345,489,814,616,800 | 32.87062 | 393 | 0.668073 | false | 2.799287 | true | false | false |
arenadata/ambari
|
ambari-server/src/main/resources/stacks/ADH/1.0/services/PIG/package/scripts/pig_client.py
|
1
|
1908
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
import os
from resource_management import *
from resource_management.libraries.functions import conf_select
from pig import pig
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
class PigClient(Script):
def configure(self, env):
import params
env.set_params(params)
pig()
def status(self, env):
raise ClientComponentHasNoStatus()
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class PigClientLinux(PigClient):
def get_stack_to_component(self):
return {"HDP": "hadoop-client"}
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
def install(self, env):
self.install_packages(env)
self.configure(env)
Execute(('tar', '-czf', '/usr/lib/pig/pig.tar.gz', '-C', '/usr/lib/pig/lib/', '.'), sudo = True)
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class PigClientWindows(PigClient):
def install(self, env):
import params
if params.pig_home is None:
self.install_packages(env)
self.configure(env)
if __name__ == "__main__":
PigClient().execute()
|
apache-2.0
| -5,231,814,969,549,273,000 | 29.285714 | 100 | 0.745283 | false | 3.648184 | false | false | false |
ARM-software/armnn
|
python/pyarmnn/test/test_supported_backends.py
|
1
|
1398
|
# Copyright © 2020 Arm Ltd. All rights reserved.
# SPDX-License-Identifier: MIT
import os
import platform
import pytest
import pyarmnn as ann
@pytest.fixture()
def get_supported_backends_setup(shared_data_folder):
options = ann.CreationOptions()
runtime = ann.IRuntime(options)
get_device_spec = runtime.GetDeviceSpec()
supported_backends = get_device_spec.GetSupportedBackends()
yield supported_backends
def test_ownership():
options = ann.CreationOptions()
runtime = ann.IRuntime(options)
device_spec = runtime.GetDeviceSpec()
assert not device_spec.thisown
def test_to_string():
options = ann.CreationOptions()
runtime = ann.IRuntime(options)
device_spec = runtime.GetDeviceSpec()
expected_str = "IDeviceSpec {{ supportedBackends: [" \
"{}" \
"]}}".format(', '.join(map(lambda b: str(b), device_spec.GetSupportedBackends())))
assert expected_str == str(device_spec)
def test_get_supported_backends_cpu_ref(get_supported_backends_setup):
assert "CpuRef" in map(lambda b: str(b), get_supported_backends_setup)
@pytest.mark.aarch64
class TestNoneCpuRefBackends:
@pytest.mark.parametrize("backend", ["CpuAcc"])
def test_get_supported_backends_cpu_acc(self, get_supported_backends_setup, backend):
assert backend in map(lambda b: str(b), get_supported_backends_setup)
|
mit
| 1,352,998,812,462,088,700 | 26.94 | 101 | 0.69864 | false | 3.676316 | true | false | false |
mpatacchiola/pyERA
|
examples/ex_nao_head_imitation/head_pose_estimation.py
|
1
|
8515
|
#!/usr/bin/env python
import numpy as np
import tensorflow as tf
import cv2
import os.path
DEBUG = False
class CnnHeadPoseEstimator:
def __init__(self, tf_session):
""" Init the class
@param tf_session An external tensorflow session
"""
self._sess = tf_session
def print_allocated_variables(self):
""" Print all the Tensorflow allocated variables
"""
all_vars = tf.all_variables()
print("[DEEPGAZE] Printing all the Allocated Tensorflow Variables:")
for k in all_vars:
print(k.name)
def load_yaw_variables(self, YawFilePath):
""" Load varibles from a checkpoint file
@param YawFilePath Path to a valid checkpoint
"""
#It is possible to use the checkpoint file
#y_ckpt = tf.train.get_checkpoint_state(YawFilePath)
#.restore(self._sess, y_ckpt.model_checkpoint_path)
#For future use, allocating a fraction of the GPU
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) #Allocate only half of the GPU memory
if(os.path.isfile(YawFilePath)==False): raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the yaw file path is incorrect.')
tf.train.Saver(({"conv1_yaw_w": self.hy_conv1_weights, "conv1_yaw_b": self.hy_conv1_biases,
"conv2_yaw_w": self.hy_conv2_weights, "conv2_yaw_b": self.hy_conv2_biases,
"conv3_yaw_w": self.hy_conv3_weights, "conv3_yaw_b": self.hy_conv3_biases,
"dense1_yaw_w": self.hy_dense1_weights, "dense1_yaw_b": self.hy_dense1_biases,
"out_yaw_w": self.hy_out_weights, "out_yaw_b": self.hy_out_biases
})).restore(self._sess, YawFilePath)
def allocate_yaw_variables(self):
""" Allocate variables in memory
"""
self._num_labels = 1
# Input data [batch_size, image_size, image_size, channels]
self.tf_yaw_input_vector = tf.placeholder(tf.float32, shape=(64, 64, 3))
# Variables.
#Conv layer
#[patch_size, patch_size, num_channels, depth]
self.hy_conv1_weights = tf.Variable(tf.truncated_normal([3, 3, 3, 64], stddev=0.1))
self.hy_conv1_biases = tf.Variable(tf.zeros([64]))
#Conv layer
#[patch_size, patch_size, depth, depth]
self.hy_conv2_weights = tf.Variable(tf.truncated_normal([3, 3, 64, 128], stddev=0.1))
self.hy_conv2_biases = tf.Variable(tf.random_normal(shape=[128]))
#Conv layer
#[patch_size, patch_size, depth, depth]
self.hy_conv3_weights = tf.Variable(tf.truncated_normal([3, 3, 128, 256], stddev=0.1)) #was[3, 3, 128, 256]
self.hy_conv3_biases = tf.Variable(tf.random_normal(shape=[256]))
#Dense layer
#[ 5*5 * previous_layer_out , num_hidden] wd1
#here 5*5 is the size of the image after pool reduction (divide by half 3 times)
self.hy_dense1_weights = tf.Variable(tf.truncated_normal([8 * 8 * 256, 256], stddev=0.1)) #was [5*5*256, 1024]
self.hy_dense1_biases = tf.Variable(tf.random_normal(shape=[256]))
#Dense layer
#[ , num_hidden] wd2
#self.hy_dense2_weights = tf.Variable(tf.truncated_normal([256, 256], stddev=0.01))
#self.hy_dense2_biases = tf.Variable(tf.random_normal(shape=[256]))
#Output layer
self.hy_out_weights = tf.Variable(tf.truncated_normal([256, self._num_labels], stddev=0.1))
self.hy_out_biases = tf.Variable(tf.random_normal(shape=[self._num_labels]))
# dropout (keep probability)
#self.keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# Model.
def model(data):
X = tf.reshape(data, shape=[-1, 64, 64, 3])
if(DEBUG == True): print("SHAPE X: " + str(X.get_shape()))
# Convolution Layer 1
conv1 = tf.tanh(tf.nn.bias_add(tf.nn.conv2d(X, self.hy_conv1_weights, strides=[1, 1, 1, 1], padding='SAME'),self.hy_conv1_biases))
if(DEBUG == True): print("SHAPE conv1: " + str(conv1.get_shape()))
# Max Pooling (down-sampling)
pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
if(DEBUG == True): print("SHAPE pool1: " + str(pool1.get_shape()))
# Apply Normalization
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
# Apply Dropout
#norm1 = tf.nn.dropout(norm1, _dropout)
# Convolution Layer 2
conv2 = tf.tanh(tf.nn.bias_add(tf.nn.conv2d(norm1, self.hy_conv2_weights, strides=[1, 1, 1, 1], padding='SAME'),self.hy_conv2_biases))
if(DEBUG == True): print("SHAPE conv2: " + str(conv2.get_shape()))
# Max Pooling (down-sampling)
pool2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
if(DEBUG == True): print("SHAPE pool2: " + str(pool2.get_shape()))
# Apply Normalization
norm2 = tf.nn.lrn(pool2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
# Apply Dropout
#norm2 = tf.nn.dropout(norm2, _dropout)
# Convolution Layer 3
conv3 = tf.tanh(tf.nn.bias_add(tf.nn.conv2d(norm2, self.hy_conv3_weights, strides=[1, 1, 1, 1], padding='SAME'),self.hy_conv3_biases))
if(DEBUG == True): print("SHAPE conv3: " + str(conv3.get_shape()))
# Max Pooling (down-sampling)
pool3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
if(DEBUG == True): print("SHAPE pool3: " + str(pool3.get_shape()))
# Apply Normalization
norm3 = tf.nn.lrn(pool3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
# Fully connected layer 4
dense1 = tf.reshape(norm3, [-1, self.hy_dense1_weights.get_shape().as_list()[0]]) # Reshape conv3
if(DEBUG == True): print("SHAPE dense1: " + str(dense1.get_shape()))
dense1 = tf.tanh(tf.matmul(dense1, self.hy_dense1_weights) + self.hy_dense1_biases)
#Fully connected layer 5
#dense2 = tf.tanh(tf.matmul(dense1, self.hy_dense2_weights) + self.hy_dense2_biases)
#if(DEBUG == True): print("SHAPE dense2: " + str(dense2.get_shape()))
#Output layer 6
out = tf.tanh(tf.matmul(dense1, self.hy_out_weights) + self.hy_out_biases)
if(DEBUG == True): print("SHAPE out: " + str(out.get_shape()))
return out
# Get the result from the model
self.cnn_output = model(self.tf_yaw_input_vector)
def return_yaw(self, image):
""" Return the yaw angle associated with the input image.
@param image It is a colour image. It must be >= 64 pixel
"""
#Uncomment if you want to see the image
#cv2.imshow('image',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
h, w, d = image.shape
#check if the image has the right shape
if(h == w and h==64 and d==3):
image_normalised = np.add(image, -127) #normalisation of the input
feed_dict = {self.tf_yaw_input_vector : image_normalised}
yaw_raw = self._sess.run([self.cnn_output], feed_dict=feed_dict)
yaw_vector = np.multiply(yaw_raw, 100.0)
#yaw = yaw_raw #* 100 #cnn out is in range [-1, +1] --> [-100, + 100]
return yaw_vector
#If the image is > 64 pixel then resize it
if(h == w and h>64 and d==3):
image_resized = cv2.resize(image, (64, 64), interpolation = cv2.INTER_AREA)
image_normalised = np.add(image_resized, -127) #normalisation of the input
feed_dict = {self.tf_yaw_input_vector : image_normalised}
yaw_raw = self._sess.run([self.cnn_output], feed_dict=feed_dict)
yaw_vector = np.multiply(yaw_raw, 100.0) #cnn-out is in range [-1, +1] --> [-100, + 100]
return yaw_vector
#wrong shape
if(h != w or w<64 or h<64):
raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the image given as input has wrong shape. Height and Width must be >= 64 pixel')
#wrong number of channels
if(d!=3):
raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the image given as input does not have 3 channels, this function accepts only colour images.')
|
mit
| 2,551,760,302,004,005,000 | 45.785714 | 158 | 0.582032 | false | 3.24876 | false | false | false |
jrichte43/ProjectEuler
|
Problem-0345/solutions.py
|
1
|
2222
|
__problem_title__ = "Matrix Sum"
__problem_url___ = "https://projecteuler.net/problem=345"
__problem_description__ = "We define the Matrix Sum of a matrix as the maximum sum of matrix " \
"elements with each element being the only one in his row and column. " \
"For example, the Matrix Sum of the matrix below equals 3315 ( = 863 + " \
"383 + 343 + 959 + 767): 7 53 183 439 497 563 79 973 287 63 169 583 " \
"627 343 773 943 473 103 699 303 Find the Matrix Sum of: 7 53 183 439 " \
"863 497 383 563 79 973 287 63 343 169 583 627 343 773 959 943 767 473 " \
"103 699 303 957 703 583 639 913 447 283 463 29 23 487 463 993 119 883 " \
"327 493 423 159 743 217 623 3 399 853 407 103 983 89 463 290 516 212 " \
"462 350 960 376 682 962 300 780 486 502 912 800 250 346 172 812 350 " \
"870 456 192 162 593 473 915 45 989 873 823 965 425 329 803 973 965 " \
"905 919 133 673 665 235 509 613 673 815 165 992 326 322 148 972 962 " \
"286 255 941 541 265 323 925 281 601 95 973 445 721 11 525 473 65 511 " \
"164 138 672 18 428 154 448 848 414 456 310 312 798 104 566 520 302 " \
"248 694 976 430 392 198 184 829 373 181 631 101 969 613 840 740 778 " \
"458 284 760 390 821 461 843 513 17 901 711 993 293 157 274 94 192 156 " \
"574 34 124 4 878 450 476 712 914 838 669 875 299 823 329 699 815 559 " \
"813 459 522 788 168 586 966 232 308 833 251 631 107 813 883 451 509 " \
"615 77 281 613 459 205 380 274 302 35 805"
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
gpl-3.0
| -5,434,478,510,974,057,000 | 51.904762 | 100 | 0.536004 | false | 3.248538 | false | false | false |
skdaccess/skdaccess
|
skdaccess/engineering/la/generic/stream.py
|
2
|
3472
|
# The MIT License (MIT)
# Copyright (c) 2018 Massachusetts Institute of Technology
#
# Author: Cody Rude
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Standard library imports
from collections import OrderedDict
from io import StringIO
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
# Third party imports
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import urlopen
import pandas as pd
class DataFetcher(DataFetcherStream):
"""
Class for handling data requests to data.lacity.org
"""
def __init__(self, endpoint, parameters, label, verbose=False, app_token = None, **pandas_kwargs):
"""
Initialize Data Fetcher for accessing data.lacity.org
@param endpoint: Data endpoint string
@param parameters: Parameters to use when retrieving dta
@param label: Label of pandas dataframe
@param verbose: Print out extra information
@param app_token: Application token to use to avoid throttling issues
@param date_columns
@param pandas_kwargs: Any additional key word arguments are passed to pandas.read_csv
"""
self.base_url = 'https://data.lacity.org/resource/'
self.base_url_and_endpoint = self.base_url + endpoint + '.csv?'
self.parameters = parameters
self.label = label
self.app_token = app_token
self.pandas_kwargs = pandas_kwargs
if '$$app_token' in parameters:
raise RuntimeError("Use app_token option in constructor instead of manually " +
"adding it into the the parameters")
if app_token != None:
self.parameters['$$app_token'] = app_token
super(DataFetcher, self).__init__([], verbose)
def output(self):
"""
Retrieve data from data.lacity.org
@return Table wrapper of containing specified data
"""
data_dict = OrderedDict()
url_query = self.base_url_and_endpoint + urlencode(self.parameters)
with urlopen(url_query) as remote_resource:
raw_string = remote_resource.read().decode()
string_data = StringIO(raw_string)
data_dict[self.label] = pd.read_csv(string_data, **self.pandas_kwargs)
return TableWrapper(data_dict)
|
mit
| -1,057,145,586,396,667,600 | 38.908046 | 102 | 0.702477 | false | 4.345432 | false | false | false |
saullocastro/pyNastran
|
pyNastran/converters/dev/ansys/ansys.py
|
1
|
5748
|
from numpy import zeros, array
class Ansys(object):
def __init__(self, log=None, debug=False):
pass
def read_ansys(self, ansys_filename):
with open(ansys_filename, 'r') as ansys_file:
lines = ansys_file.readlines()
nodes = []
elements = {}
i = 0
nlines = len(lines)
while i < nlines:
line = lines[i].strip()
if line.startswith(r'/nolist'):
i += 4
# line = (1i9,3e20.9e3)
snodes = []
i += 1
line = lines[i]
nnodes = 0
while not line.startswith('-1'):
#print('a =', line)
#snode = [float(val) for val in line.strip().split()[1:]]
snode = line.strip().split()[1:]
if len(snode) != 3:
print(snode)
print(line)
print(lines[i])
print(lines[i-1])
print(lines[i-2])
asdf1
snodes.append(snode)
line = lines[i]
#print(line)
i += 1
nnodes += 1
#print(snodes[:5])
#nodes = array(snodes, dtype='float32')
print('****%r' % line)
# nnodes = 793310
#asdf2
#line = lines[i]
#print(line)
i -= 1
#asdf
elif line.startswith('/wb,elem,start'):
i += 1
line = lines[i]
while line.startswith('/com'):
i += 1
et_line = lines[i].strip()
fmt_line = lines[i+2].strip()
i += 3
line = lines[i]
if fmt_line == '(19i9)':
# eblock,19,solid,,71892
while not line.startswith('-1'):
# 1 1 1 1 0 0 0 0 10 0 697401 1297419 1304724 1297455 1302783 2097856 2097997 2097853 2097855
# 2109421 2097995
# 27 27 27 27 0 0 0 0 10 0 387759 631841 659167 639072 631842 675592 723723 675588 675585
# 675599 675595
line = lines[i].strip() + lines[i+1].strip()
i += 2
print(line)
sline = line.split()
a = sline[0]
b = sline[1]
c = sline[2]
d = sline[3]
assert a == b, 'a=%r b=%r c=%r d=%r' % (a, b, c, d)
assert a == c, 'a=%r b=%r c=%r d=%r' % (a, b, c, d)
assert a == d, 'a=%r b=%r c=%r d=%r' % (a, b, c, d)
e = sline[3]
f = sline[4]
g = sline[5]
h = sline[6]
assert e == f, 'e=%r f=%r g=%r h=%r' % (e, f, g, h)
#asdf
else:
raise NotImplementedError(fmt_line)
print(line)
asdf
else:
if line.startswith('/'):
print(line)
i += 1
def main():
model = Ansys()
ansys_filename = 'ds.dat'
model.read_ansys(ansys_filename)
if __name__ == '__main__': # pragma: no cover
main()
"""
/com,*********** Create Remote Point "Internal Remote Point 39" ***********
! -------- Remote Point Used by "Fixed - Line Body To EndCap 14054021-1 d" --------
*set,_npilot,803315
_npilot474=_npilot
et,332,170
type,332
real,332
mat,332
keyo,332,2,1 ! don't fix pilot node
keyo,332,4,0 ! MPC for all DOF's
tshape,pilo
en,501901,803315 ! create pilot node for rigid link
tshape
en,501902,803315,127827
/com,*********** Create Remote Point "Internal Remote Point 40" ***********
! -------- Remote Point Used by "Fixed - Line Body To EndCap 14054021-1 d" --------
*set,tid,334
*set,cid,333
et,cid,175
et,tid,170
keyo,tid,2,1 ! Don't fix the pilot node
keyo,tid,4,111111
keyo,cid,12,5 ! Bonded Contact
keyo,cid,4,0 ! Rigid CERIG style load
keyo,cid,2,2 ! MPC style contact
mat,333
real,333
type,333
en,501903,418114
en,501904,418115
en,501905,418116
en,501906,418117
en,501907,418118
en,501908,418119
en,501909,418120
en,501910,418121
en,501911,418122
en,501912,418123
en,501913,418124
en,501914,427511
en,501915,427512
en,501916,427518
en,501917,427524
en,501918,427528
en,501919,427533
en,501920,427539
en,501921,427544
en,501922,427551
en,501923,427562
en,501924,427569
*set,_npilot,803316
_npilot475=_npilot
type,tid
mat ,cid
real,cid
tshape,pilo
en,501925,_npilot
tshape
"""
"""
et,2,187
et,27,187 # element, group 27, element_type=187 -> tet10
et,30,188
etype nastran_name
187 tet10
186 hexa20
188 beam
eblock,19,solid,,213
eblock,19,solid,,8
#----------------------------------------------------------------
et,_jid,184
et,tid,170
et,cid,174
keyo,tid,2,1 ! Don't fix the pilot node
keyo,tid,4,111111
keyo,cid,12,5 ! Bonded Contact
keyo,cid,4,2 ! Rigid CERIG style load
keyo,cid,2,2 ! MPC style contact
eblock,10,,,16
"""
|
lgpl-3.0
| -8,193,591,645,404,051,000 | 28.634021 | 200 | 0.428323 | false | 3.456404 | false | false | false |
hyperspy/hyperspyUI
|
hyperspyui/modelwrapper.py
|
1
|
8552
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Tue Nov 04 16:25:54 2014
@author: Vidar Tonaas Fauske
"""
from qtpy import QtCore
#from hyperspy.model import Model
import hyperspy.models.eelsmodel
from .actionable import Actionable
from functools import partial
from hyperspyui.widgets.stringinput import StringInputDialog
# TODO: Add smartfit for EELSModel
def tr(text):
return QtCore.QCoreApplication.translate("ModelWrapper", text)
class ModelWrapper(Actionable):
added = QtCore.Signal((object, object), (object,))
removed = QtCore.Signal((object, object), (object,))
def __init__(self, model, signal_wrapper, name):
super(ModelWrapper, self).__init__()
self.model = model
self.signal = signal_wrapper
self.name = name
if self.signal.signal is not self.model.signal:
raise ValueError("SignalWrapper doesn't match model.signal")
self.components = {}
self.update_components()
self.fine_structure_enabled = False
# Default actions
self.add_action('plot', tr("&Plot"), self.plot)
self.add_action('fit', tr("&Fit"), self.fit)
self.add_action('multifit', tr("&Multifit"), self.multifit)
self.add_action('set_signal_range', tr("Set signal &range"),
self.set_signal_range)
if isinstance(self.model, hyperspy.models.eelsmodel.EELSModel):
self.add_action('lowloss', tr("Set low-loss"), self.set_lowloss)
self.add_action('fine_structure', tr("Enable fine &structure"),
self.toggle_fine_structure)
f = partial(self.signal.remove_model, self)
self.add_action('delete', tr("&Delete"), f)
def plot(self):
self.signal.keep_on_close = True
self.model.plot()
self.signal.keep_on_close = False
self.signal.update_figures()
self.signal.signal_plot.setProperty('hyperspyUI.ModelWrapper', self)
def update_plot(self):
self.model.update_plot()
def record_code(self, code):
self.signal.mainwindow.record_code("model = ui.get_selected_model()")
self.signal.mainwindow.record_code(code)
def _args_for_record(self, args, kwargs):
argstr = str(args)[1:-1]
kwargstr = str(kwargs)[1:-1]
kwargstr = kwargstr.replace(": ", "=")
if argstr and kwargstr:
return ", ".join((argstr, kwargstr))
else:
return argstr + kwargstr
def fit(self, *args, **kwargs):
self.signal.keep_on_close = True
self.model.fit(*args, **kwargs)
self.signal.keep_on_close = False
self.signal.update_figures()
self.record_code("model.fit(%s)" %
self._args_for_record(args, kwargs))
def multifit(self, *args, **kwargs):
self.signal.keep_on_close = True
self.model.multifit(*args, **kwargs)
self.signal.keep_on_close = False
self.signal.update_figures()
self.record_code("model.multifit(%s)" %
self._args_for_record(args, kwargs))
def smartfit(self, *args, **kwargs):
if hasattr(self.model, 'smartfit'):
self.signal.keep_on_close = True
self.model.smartfit(*args, **kwargs)
self.signal.keep_on_close = False
self.signal.update_figures()
self.record_code("model.smartfit(%)" %
self._args_for_record(args, kwargs))
def fit_component(self, component):
# This is a non-blocking call, which means the normal keep_on_close +
# update_figures won't work. To make sure we keep our figures,
# we force a plot first if it is not active already.
if not self.model.signal._plot.is_active:
self.plot()
self.model.fit_component(component)
self.record_code("model.fit_component(%s)" % component.name)
def set_signal_range(self, *args, **kwargs):
self.signal.keep_on_close = True
self.model.set_signal_range(*args, **kwargs)
self.signal.keep_on_close = False
self.signal.update_figures()
self.record_code("model.set_signal_range(%s)" %
self._args_for_record(args, kwargs))
def set_lowloss(self, signal=None):
if signal is None:
signal = self.signal.mainwindow.select_x_signals(
1, ['Select low-loss'])
if signal is None:
return
self.model.lowloss = signal.signal
self.record_code("model.set_lowloss(low_loss_signal)")
def toggle_fine_structure(self):
if not isinstance(self.model, hyperspy.models.eelsmodel.EELSModel):
raise TypeError(
tr("Model is not EELS model. Can not toggle fine structure"))
if self.fine_structure_enabled:
self.model.disable_fine_structure()
self.actions['fine_structure'].setText(
tr("Enable fine &structure"))
self.record_code("model.disable_fine_structure()")
else:
self.model.enable_fine_structure()
self.actions['fine_structure'].setText(
tr("Disable fine &structure"))
self.record_code("model.enable_fine_structure()")
self.fine_structure_enabled = not self.fine_structure_enabled
def update_components(self):
"""
Updates internal compoenent list to match model's list (called e.g.
after console execute and in constructor)
"""
# Add missing
for c in self.model:
if c.name not in list(self.components.keys()):
self.components[c.name] = c
self.component_added(c)
# Remove lingering
ml = [c.name for c in self.model]
rm = [cn for cn in self.components.keys() if cn not in ml]
for n in rm:
c = self.components.pop(n)
self.component_removed(c)
def add_component(self, component):
if isinstance(component, type):
nec = ['EELSCLEdge', 'Spline', 'ScalableFixedPattern']
if component.__name__ in nec:
raise TypeError(
tr("Component of type %s currently not supported")
% component)
elif component.__name__ == 'Expression':
dlg = StringInputDialog(prompt="Enter expression:")
expression = dlg.prompt_modal(rejection=None)
if expression:
component = component(expression, 'Expression')
else:
return
else:
component = component()
added = False
if component not in self.model:
self.model.append(component)
added = True
self.record_code("model.append(%s)" % component.name)
if component.name not in self.components:
self.components[component.name] = component
added = True
if added:
self.component_added(component)
def remove_component(self, component):
removed = False
if component in self.model:
self.model.remove(component)
self.record_code("model.remove(%s)" % component.name)
removed = True
if component.name in self.components:
self.components.pop(component.name)
removed = True
if removed:
self.component_removed(component)
def component_added(self, component):
self.update_plot()
self.added[object, object].emit(component, self)
self.added[object].emit(component)
def component_removed(self, component):
self.update_plot()
self.removed[object, object].emit(component, self)
self.removed[object].emit(component)
|
gpl-3.0
| 8,070,357,070,856,363,000 | 36.840708 | 77 | 0.605005 | false | 3.99813 | false | false | false |
douglas/toxiproxy-python
|
toxiproxy/server.py
|
1
|
3386
|
# coding: utf-8
from future.utils import raise_with_traceback, viewitems, listvalues
from .api import APIConsumer
from .proxy import Proxy
from .exceptions import ProxyExists
from .utils import can_connect_to
class Toxiproxy(object):
""" Represents a Toxiproxy server """
def proxies(self):
""" Returns all the proxies registered in the server """
proxies = APIConsumer.get("/proxies").json()
proxies_dict = {}
for name, values in viewitems(proxies):
# Lets create a Proxy object to hold all its data
proxy = Proxy(**values)
# Add the new proxy to the toxiproxy proxies collection
proxies_dict.update({name: proxy})
return proxies_dict
def destroy_all(self):
proxies = listvalues(self.proxies())
for proxy in proxies:
self.destroy(proxy)
def get_proxy(self, proxy_name):
""" Retrive a proxy if it exists """
proxies = self.proxies()
if proxy_name in proxies:
return proxies[proxy_name]
else:
return None
def running(self):
""" Test if the toxiproxy server is running """
return can_connect_to(APIConsumer.host, APIConsumer.port)
def version(self):
""" Get the toxiproxy server version """
if self.running() is True:
return APIConsumer.get("/version").content
else:
return None
def reset(self):
""" Re-enables all proxies and disables all toxics. """
return bool(APIConsumer.post("/reset"))
def create(self, upstream, name, listen=None, enabled=None):
""" Create a toxiproxy proxy """
if name in self.proxies():
raise_with_traceback(ProxyExists("This proxy already exists."))
# Lets build a dictionary to send the data to the Toxiproxy server
json = {
"upstream": upstream,
"name": name
}
if listen is not None:
json["listen"] = listen
else:
json["listen"] = "127.0.0.1:0"
if enabled is not None:
json["enabled"] = enabled
proxy_info = APIConsumer.post("/proxies", json=json).json()
proxy_info["api_consumer"] = APIConsumer
# Lets create a Proxy object to hold all its data
proxy = Proxy(**proxy_info)
return proxy
def destroy(self, proxy):
""" Delete a toxiproxy proxy """
if isinstance(proxy, Proxy):
return proxy.destroy()
else:
return False
def populate(self, proxies):
""" Create a list of proxies from an array """
populated_proxies = []
for proxy in proxies:
existing = self.get_proxy(proxy["name"])
if existing is not None and (existing.upstream != proxy["upstream"] or existing.listen != proxy["listen"]):
self.destroy(existing)
existing = None
if existing is None:
proxy_instance = self.create(**proxy)
populated_proxies.append(proxy_instance)
return populated_proxies
def update_api_consumer(self, host, port):
""" Update the APIConsumer host and port """
APIConsumer.host = host
APIConsumer.port = port
APIConsumer.base_url = "http://%s:%s" % (host, port)
|
mit
| -5,460,193,500,709,897,000 | 27.453782 | 119 | 0.582989 | false | 4.478836 | false | false | false |
robket/BioScripts
|
alignment.py
|
1
|
9138
|
import numpy as np
from matplotlib import pyplot as plt
from scipy.misc import toimage
from collections import defaultdict, Counter
from types import SimpleNamespace
from PIL import ImageDraw
# This color table is sourced from https://github.com/trident01/BioExt-1/blob/master/AlignmentImage.java
LIGHT_GRAY = 196
FIXED_COLOR_TABLE = defaultdict(lambda: [0, 0, 0], {
"A": [255, 0, 0],
"C": [255, 255, 0],
"T": [0, 255, 0],
"G": [190, 0, 95],
"-": [LIGHT_GRAY, LIGHT_GRAY, LIGHT_GRAY]})
GRAY_GAPS_COLOR_TABLE = defaultdict(lambda: [0, 0, 0], {
"-": [LIGHT_GRAY, LIGHT_GRAY, LIGHT_GRAY]})
BLACK_COLOR_TABLE = defaultdict(lambda: [0, 0, 0])
class Alignment:
def __init__(self, query_start, query_seq, target_start, target_seq, sequence_name, target_label, expected_errors):
self.name = sequence_name
self.target_label = target_label
self.expected_errors = expected_errors
self.query_start = int(query_start) - 1
self.query_seq = query_seq
query_gap_count = query_seq.count("-")
self.query_length = len(query_seq) - query_gap_count
self.target_start = int(target_start) - 1
self.target_seq = target_seq
target_gap_count = target_seq.count("-")
self.target_length = len(target_seq) - target_gap_count
self.no_gap_length = len(target_seq) - target_gap_count - query_gap_count
if len(target_seq) != len(query_seq):
raise ValueError("Length of target sequence not equal to length of query sequence")
def alignment_iterator(alignment, ignore_case=True, include_gaps=False):
target_index = 0
target_offset = 0
query_index = 0
while target_index < len(alignment.target_seq) and query_index < len(alignment.query_seq):
if alignment.target_seq[target_index] == "-": # If it is an insertion
target_offset += 1
elif alignment.query_seq[query_index] != "-" or include_gaps:
reference_index = alignment.target_start + target_index - target_offset
query_nucleotide = alignment.query_seq[query_index].upper() if ignore_case else alignment.query_seq[query_index]
target_nucleotide = alignment.target_seq[target_index].upper() if ignore_case else alignment.target_seq[target_index]
yield SimpleNamespace(reference_index=reference_index,
target_nucleotide=target_nucleotide,
query_nucleotide=query_nucleotide)
target_index += 1
query_index += 1
def count_mismatches(alignment, ignore_case=True):
mismatch_count = 0
for position in alignment_iterator(alignment, ignore_case):
if position.target_nucleotide != position.query_nucleotide:
mismatch_count += 1
return mismatch_count
def save_expected_error_rates(alignments, output_file):
expected_error_rates = [a.expected_errors / a.query_length for a in alignments]
plt.cla()
plt.hist(expected_error_rates, 50, log=True)
plt.ylim(ymin=0.9)
plt.xlabel('Expected Error Rate')
plt.ylabel('Number of sequences')
plt.tick_params(which='both', direction='out')
plt.title('Expected Error Rates')
plt.grid(True)
plt.savefig(output_file)
def save_mismatch_rates(alignments, output_file, ignore_case=True):
mismatch_rates = [count_mismatches(a, ignore_case) / a.no_gap_length for a in alignments]
plt.cla()
plt.hist(mismatch_rates, 50, log=True)
plt.ylim(ymin=0.9)
plt.xlabel('Rate of mismatches')
plt.ylabel('Number of sequences')
plt.tick_params(which='both', direction='out')
plt.title('Mismatch Rates')
plt.grid(True)
plt.savefig(output_file)
def gap_distribution(sequence):
dist = Counter()
count_length = 0
for char in sequence:
if char == "-":
count_length += 1
elif count_length > 0:
dist[count_length] += 1
count_length = 0
if count_length > 0:
dist[count_length] += 1
return dist
def save_insertion_or_deletion_dist(alignments, output_file, insertion_not_deletion=True):
size_counter = Counter()
for a in alignments:
size_counter += gap_distribution(a.target_seq if insertion_not_deletion else a.query_seq)
sizes, counts = zip(*size_counter.items())
number_of_bins = max(sizes)
number_of_bins = round(number_of_bins / np.ceil(number_of_bins/50))
plt.cla()
n, bins, patches = plt.hist(sizes, number_of_bins, weights=counts, log=True)
plt.ylim(ymin=0.9)
plt.xlim(xmin=1)
plt.xlabel('Size of insertion' if insertion_not_deletion else 'Size of deletion')
plt.ylabel('Count')
plt.tick_params(which='both', direction='out')
plt.title('Insertion size distribution' if insertion_not_deletion else 'Deletion size distribution')
plt.grid(True)
plt.savefig(output_file)
# Get nucleotide distribution
def nucleotide_distribution(alignments, ignore_case=False, include_gaps=True):
max_index = 0
distribution = defaultdict(Counter)
for a in alignments:
for position in alignment_iterator(a, ignore_case, include_gaps):
distribution[position.reference_index][position.query_nucleotide] += 1
max_index = max(max_index, a.target_start + a.target_length)
return [distribution[i] for i in range(max_index)]
def save_nucleotide_map(alignments, output, ignore_case=True, include_gaps=True):
nucleotides = nucleotide_distribution(alignments, ignore_case, include_gaps)
width = len(nucleotides)
keys = set()
for distribution_at_base in nucleotides:
keys.update(set(distribution_at_base.keys()))
keys = sorted(list(keys), key=lambda x: "ZZZ" if x == "-" else x)
nucleotide_count_array = np.zeros((len(keys), width), dtype=np.uint32)
for i, key in enumerate(keys):
for j, counts in enumerate(nucleotides):
nucleotide_count_array[i, j] = counts[key]
cum_sum = nucleotide_count_array.cumsum(axis=0)
height = cum_sum[-1,].max()
data_matrix = np.full((height, width, 3), 255, dtype=np.uint8)
for x in range(width):
for i, key in enumerate(keys):
start = 0 if i == 0 else cum_sum[i - 1, x]
end = cum_sum[i, x]
data_matrix[start:end, x, 0:3] = FIXED_COLOR_TABLE[key]
img = to_image(data_matrix[::-1,], ruler_underneath=True)
img.save(output)
# Get coverage map
def coverage_map(alignments, include_gaps=False):
max_index = 0
coverage = Counter()
for a in alignments:
for position in alignment_iterator(a, True, include_gaps):
coverage[position.reference_index] += 1
max_index = max(max_index, a.target_start + a.target_length)
return [coverage[i] for i in range(max_index)]
def save_coverage_map(alignments, output):
coverage_with_gaps = coverage_map(alignments, True)
coverage_without_gaps = coverage_map(alignments, False)
width = len(coverage_with_gaps)
height = max(coverage_with_gaps)
data_matrix = np.full((height, width, 3), 255, dtype=np.uint8)
for x in range(width):
y1 = coverage_without_gaps[x]
y2 = coverage_with_gaps[x]
data_matrix[0:y1, x, 0:3] = 0
data_matrix[y1:y2, x, 0:3] = 127
img = to_image(data_matrix[::-1], add_ruler=True, ruler_underneath=True)
img.save(output)
def save_alignment_map(coords, output_file, sort_key=sum, crop=True, no_ruler=False):
if crop:
minimum = min(coords, key=lambda x: x[0])[0]
else:
minimum = 0
maximum = max(coords, key=lambda x: x[1])[1]
dimensions = (len(coords), maximum - minimum)
data_matrix = np.full((dimensions[0], dimensions[1] + 1), 255, dtype=np.uint8)
if sort_key is not None:
coords.sort(key=sort_key)
is_multiple_alignment = len(coords[0]) > 3 and type(coords[0][3]) == list
# Greyscale over the bounds (or black if not multiple alignment)
for i, coord in enumerate(coords):
start = coord[0]
end = coord[1]
# np.put(data_matrix[i], range(start - minimum, end - minimum), 0)
data_matrix[i, (start - minimum):(end - minimum)] = LIGHT_GRAY if is_multiple_alignment else 0
# Black over the subalignments, if any
if is_multiple_alignment:
for i, coord in enumerate(coords):
for subalignment in coord[3]:
start = subalignment[0]
end = subalignment[1]
# np.put(data_matrix[i], range(start - minimum, end - minimum), 0)
data_matrix[i, (start - minimum):(end - minimum)] = 0
img = to_image(data_matrix, not no_ruler, offset=minimum)
img.save(output_file)
def to_image(data_matrix, add_ruler=True, ruler_underneath = False, offset=1):
maximum = offset + data_matrix.shape[1]
if add_ruler:
shape = list(data_matrix.shape)
shape[0] = 12 # Number of rows
ruler_matrix = np.full(shape, 255, dtype=data_matrix.dtype)
# tens ticks
ruler_matrix[0 if ruler_underneath else 11, 10-(offset%10)::10] = 0
# 50s ticks
ruler_matrix[1 if ruler_underneath else 10, 50-(offset%50)::50] = 0
if ruler_underneath:
img = toimage(np.vstack([data_matrix, ruler_matrix]))
else:
img = toimage(np.vstack([ruler_matrix, data_matrix]))
draw = ImageDraw.Draw(img)
# Hundreds words
for i in range((offset//100) + 1, maximum // 100 + 1):
centering = (6 * (int(np.log10(i)) + 3) - 1) // 2
draw.text((i * 100 - centering - offset, (data_matrix.shape[0] + 2) if ruler_underneath else 0), str(i) + "00", fill="black")
else:
img = toimage(data_matrix)
return img
|
mit
| -6,908,880,567,246,453,000 | 37.075 | 131 | 0.680893 | false | 3.10394 | false | false | false |
sunrenjie/py-windows-tools
|
py_windows_tools/windows/events.py
|
1
|
3139
|
# -*- coding: utf-8 -*-
import re
from oslo_log import log as logging
from py_windows_tools.utilities import misc
LOG = logging.getLogger(__name__)
class WindowsEvents(object):
@staticmethod
def get_command_get_events(category, n):
return ['powershell', 'Get-EventLog %s -newest %d' % (category, n)]
@staticmethod
def get_command_get_parsed_events(category, num_events=None):
if num_events:
return ['powershell', 'Get-EventLog %s -newest %d | Format-List' % (category, num_events)]
else:
return ['powershell', 'Get-EventLog %s | Format-List' % category]
@staticmethod
def get_command_clear_events(category):
return ['powershell', 'Clear-EventLog %s' % category]
@classmethod
def clear_events(cls, category):
cmd = cls.get_command_clear_events(category)
for l in misc.create_process_and_yield_output_lines(cmd):
l = l.rstrip()
if len(l) > 0:
LOG.debug(l)
@staticmethod
def search_string_for_ip_address(s):
search = re.search('[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+', s)
if search:
ip = search.group(0)
if ip != '0.0.0.0':
return ip
return None
@classmethod
def yield_lines_from_event_log_file(cls, f):
with open(f, 'r') as h:
for l in h:
yield misc.decode_string_with_unknown_encoding(l)
@classmethod
def yield_login_failure_ips(cls, num_events=None, data_source=None):
"""
Yield one ip (string) upon each request from the data source
:param num_events:
:param data_source: a yield object that emits one Windows event log
line upon every request; defaults to the Windows
event log system.
:return:
"""
if not data_source:
cmd = cls.get_command_get_parsed_events("Security", num_events)
data_source = misc.create_process_and_yield_output_lines(cmd)
within = False
for l in data_source:
if within:
if re.search('^TimeGenerated', l):
within = False
elif re.search(u'源网络地址', l): # TODO: ugly hacking
ip = cls.search_string_for_ip_address(l)
if ip:
yield ip
elif re.search(u'帐户登录失败。', l):
within = True
continue
@classmethod
def get_significant_login_failure_ips_by_count(cls, num_events, num_failures):
addr2count = {}
for ip in cls.yield_login_failure_ips(num_events):
if ip in addr2count:
addr2count[ip] += 1
else:
addr2count[ip] = 1
LOG.debug("login error statistics {IP => count} are: %s" % str(addr2count))
ips = set()
for a, c in addr2count.iteritems():
if c > num_failures:
ips.add(a)
LOG.debug("significant login error IPs are: %s" % ','.join(sorted(ips)))
return ips
|
apache-2.0
| -7,706,443,120,105,495,000 | 34 | 102 | 0.547673 | false | 3.762077 | false | false | false |
bram85/topydo
|
topydo/commands/ListCommand.py
|
1
|
10992
|
# Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import sys
from topydo.lib.Config import config
from topydo.lib.ExpressionCommand import ExpressionCommand
from topydo.lib.Filter import HiddenTagFilter, InstanceFilter
from topydo.lib.ListFormat import ListFormatError
from topydo.lib.prettyprinters.Format import PrettyPrinterFormatFilter
from topydo.lib.printers.PrettyPrinter import pretty_printer_factory
from topydo.lib.Sorter import Sorter
from topydo.lib.TodoListBase import InvalidTodoException
from topydo.lib.Utils import get_terminal_size
from topydo.lib.View import View
class ListCommand(ExpressionCommand):
def __init__(self, p_args, p_todolist, #pragma: no branch
p_out=lambda a: None,
p_err=lambda a: None,
p_prompt=lambda a: None):
super().__init__(
p_args, p_todolist, p_out, p_err, p_prompt)
self.printer = None
self.sort_expression = config().sort_string()
self.group_expression = config().group_string()
self.show_all = False
self.ids = None
self.format = config().list_format()
def _poke_icalendar(self):
"""
Attempts to import the icalendar package. Returns True if it
succeeds, otherwise False.
"""
try:
import icalendar as _
except ImportError: # pragma: no cover
self.error("icalendar package is not installed.")
return False
return True
def _process_flags(self):
opts, args = self.getopt('f:F:g:i:n:Ns:x')
for opt, value in opts:
if opt == '-x':
self.show_all = True
elif opt == '-s':
self.sort_expression = value
elif opt == '-f':
if value == 'json':
from topydo.lib.printers.Json import JsonPrinter
self.printer = JsonPrinter()
elif value == 'ical':
if self._poke_icalendar():
from topydo.lib.printers.Ical import IcalPrinter
self.printer = IcalPrinter(self.todolist)
elif value == 'dot':
from topydo.lib.printers.Dot import DotPrinter
self.printer = DotPrinter(self.todolist)
# a graph without dependencies is not so useful, hence
# show all
self.show_all = True
else:
self.printer = None
elif opt == '-F':
self.format = value
elif opt == '-g':
self.group_expression = value
elif opt == '-N':
# 2 lines are assumed to be taken up by printing the next prompt
# display at least one item
self.limit = ListCommand._N_lines()
elif opt == '-n':
try:
self.limit = int(value)
except ValueError:
pass # use default value in configuration
elif opt == '-i':
self.ids = value.split(',')
# when a user requests a specific ID, it should always be shown
self.show_all = True
self.args = args
def _filters(self):
"""
Additional filters to:
- select particular todo items given with the -i flag,
- hide appropriately tagged items in the absense of the -x flag.
"""
filters = super()._filters()
if self.ids:
def get_todo(p_id):
"""
Safely obtains a todo item given the user-supplied ID.
Returns None if an invalid ID was entered.
"""
try:
return self.todolist.todo(p_id)
except InvalidTodoException:
return None
todos = [get_todo(i) for i in self.ids]
filters.append(InstanceFilter(todos))
if not self.show_all:
filters.append(HiddenTagFilter())
return filters
def _print(self):
"""
Prints the todos in the right format.
Defaults to normal text output (with possible colors and other pretty
printing). If a format was specified on the commandline, this format is
sent to the output.
"""
if self.printer is None:
# create a standard printer with some filters
indent = config().list_indent()
final_format = ' ' * indent + self.format
filters = []
filters.append(PrettyPrinterFormatFilter(self.todolist, final_format))
self.printer = pretty_printer_factory(self.todolist, filters)
try:
if self.group_expression:
self.out(self.printer.print_groups(self._view().groups))
else:
self.out(self.printer.print_list(self._view().todos))
except ListFormatError:
self.error('Error while parsing format string (list_format config'
' option or -F)')
def _view(self):
sorter = Sorter(self.sort_expression, self.group_expression)
filters = self._filters()
return View(sorter, filters, self.todolist)
@staticmethod
def _N_lines():
''' Determine how many lines to print, such that the number of items
displayed will fit on the terminal (i.e one 'screen-ful' of items)
This looks at the environmental prompt variable, and tries to determine
how many lines it takes up.
On Windows, it does this by looking for the '$_' sequence, which indicates
a new line, in the environmental variable PROMPT.
Otherwise, it looks for a newline ('\n') in the environmental variable
PS1.
'''
lines_in_prompt = 1 # prompt is assumed to take up one line, even
# without any newlines in it
if "win32" in sys.platform:
lines_in_prompt += 1 # Windows will typically print a free line after
# the program output
a = re.findall(r'\$_', os.getenv('PROMPT', ''))
lines_in_prompt += len(a)
else:
a = re.findall('\\n', os.getenv('PS1', ''))
lines_in_prompt += len(a)
n_lines = get_terminal_size().lines - lines_in_prompt
# print a minimum of one item
n_lines = max(n_lines, 1)
return n_lines
def execute(self):
if not super().execute():
return False
try:
self._process_flags()
except SyntaxError: # pragma: no cover
# importing icalendar failed, most likely due to Python 3.2
self.error("icalendar is not supported in this Python version.")
return False
self._print()
return True
def usage(self):
return """Synopsis: ls [-x] [-s <SORT EXPRESSION>]
[-g <GROUP EXPRESSION>] [-f <OUTPUT FORMAT>] [-F <FORMAT STRING>]
[-i <NUMBER 1>[,<NUMBER 2> ...]] [-N | -n <INTEGER>] [EXPRESSION]"""
def help(self):
return """\
Lists all relevant todos. A todo is relevant when:
* has not been completed yet,
* the start date (if present) has passed, and
* there are no subitems that need to be completed.
When an EXPRESSION is given, only the todos matching that EXPRESSION are shown.
-f : Specify the OUTPUT format, being 'text' (default), 'dot' or 'ical' or
'json'.
* 'text' - Text output with colors and indentation if applicable.
* 'dot' - Prints a dependency graph for the selected items in GraphViz
Dot format.
* 'ical' - iCalendar (RFC 2445). Is not supported in Python 3.2. Be aware
that this is not a read-only operation, todo items may obtain
an 'ical' tag with a unique ID. Completed todo items may be
archived.
* 'json' - Javascript Object Notation (JSON)
-F : Specify the format of the text ('text' format), which may contain
placeholders that may be expanded if the todo has such attribute. If such
attribute does not exist, then it expands to an empty string.
%c: Absolute creation date.
%C: Relative creation date.
%d: Absolute due date.
%D: Relative due date.
%h: Relative due and start date (due in 3 days, started 3 days ago)
%H: Like %h with creation date.
%i: Todo number.
%I: Todo number padded with spaces (always 3 characters wide).
%k: List of tags separated by spaces (excluding hidden tags).
%K: List of all tags separated by spaces.
%l: Line number.
%L: Line number padded with spaces (always 3 characters wide).
%p: Priority.
%P: Priority or placeholder space if no priority.
%s: Todo text.
%S: Todo text, truncated such that an item fits on one line.
%t: Absolute creation date.
%T: Relative creation date.
%u: Todo's text-based ID.
%U: Todo's text-based ID padded with spaces.
%x: 'x' followed by absolute completion date.
%X: 'x' followed by relative completion date.
\%: Literal percent sign.
Conditional characters can be added with blocks surrounded by curly
braces, they will only appear when a placeholder expanded to a value.
E.g. %{(}p{)} will print '(C)' when the todo item has priority C, or ''
(empty string) when an item has no priority set.
A tab character serves as a marker to start right alignment.
-g : Group items according to a GROUP EXPRESSION. A group expression is similar
to a sort expression. Defaults to the group expression in the
configuration.
-i : Comma separated list of todo IDs to print.
-n : Number of items to display. Defaults to the value in the configuration.
-N : Limit number of items displayed such that they fit on the terminal.
-s : Sort the list according to a SORT EXPRESSION. Defaults to the sort
expression in the configuration.
-x : Show all todos (i.e. do not filter on dependencies, relevance, or hidden
status).\
"""
|
gpl-3.0
| -670,478,974,875,376,300 | 37.704225 | 86 | 0.593705 | false | 4.317361 | true | false | false |
andrewschaaf/pj-closure
|
js/goog/math.py
|
1
|
3556
|
#<pre>Copyright 2006 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.</pre>
from goog.array import map, reduce
def randomInt(a):
return Math.floor(Math.random() * a)
def uniformRandom(a, b):
'sample from [a, b)'
return a + Math.random() * (b - a)
def clamp(value, min, max):
return Math.min(Math.max(value, min), max)
def modulo(a, b):
r = a % b;
# If r and b differ in sign, add b to wrap the result to the correct sign.
return r + b if (r * b < 0) else r
def lerp(a, b, x):
return a + x * (b - a)
def nearlyEquals(a, b, opt_tolerance):
return Math.abs(a - b) <= (opt_tolerance or 0.000001)
def standardAngle(angle):
return modulo(angle, 360)
def toRadians(angleDegrees):
return angleDegrees * Math.PI / 180
def toDegrees(angleRadians):
return angleRadians * 180 / Math.PI
def angleDx(degrees, radius):
return radius * Math.cos(toRadians(degrees))
def angleDy(degrees, radius):
return radius * Math.sin(toRadians(degrees))
def angle(x1, y1, x2, y2):
return standardAngle(toDegrees(Math.atan2(y2 - y1, x2 - x1)))
def angleDifference(startAngle, endAngle):
d = standardAngle(endAngle) - standardAngle(startAngle)
if (d > 180):
d = d - 360
elif (d <= -180):
d = 360 + d
return d
def sign(x):
return (0 if x == 0 else (
-1 if x < 0 else 1))
def longestCommonSubsequence(array1, array2, opt_compareFn, opt_collectorFn):
compare = opt_compareFn or (lambda a, b: a == b)
collect = opt_collectorFn or (lambda i1, i2: array1[i1])
length1 = array1.length;
length2 = array2.length;
arr = [];
for i in range(length1 + 1):
arr[i] = []
arr[i][0] = 0
for j in range(length2 + 1):
arr[0][j] = 0
for i in range(1, length1 + 1):
for j in range(1, length1 + 1):
if compare(array1[i - 1], array2[j - 1]):
arr[i][j] = arr[i - 1][j - 1] + 1
else:
arr[i][j] = Math.max(arr[i - 1][j], arr[i][j - 1])
# Backtracking
result = [];
i = length1
j = length2
while i > 0 and j > 0:
if compare(array1[i - 1], array2[j - 1]):
result.unshift(collect(i - 1, j - 1))
i -= 1
j -= 1
else:
if arr[i - 1][j] > arr[i][j - 1]:
i -= 1
else:
j -= 1
return result
def sum(var_args):
return reduce(
arguments,
lambda sum, value: sum + value,
0)
def average(var_args):
return sum.apply(None, arguments) / arguments.length
def standardDeviation(var_args):
sampleSize = arguments.length
if sampleSize < 2:
return 0
mean = average.apply(None, arguments)
variance = (
sum.apply(
None,
map(
arguments,
lambda val: Math.pow(val - mean, 2))) /
(sampleSize - 1))
return Math.sqrt(variance)
def isInt(num):
return isFinite(num) and num % 1 == 0
def isFiniteNumber(num):
return isFinite(num) and not isNaN(num)
|
apache-2.0
| 244,956,752,805,644,600 | 20.950617 | 77 | 0.603487 | false | 3.197842 | false | false | false |
schalkneethling/snippets-service
|
snippets/base/cache.py
|
1
|
1854
|
# FROM https://raw.githubusercontent.com/mozilla/bedrock/master/bedrock/base/cache.py
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django.core.cache.backends.locmem import LocMemCache
class SimpleDictCache(LocMemCache):
"""A local memory cache that doesn't pickle values.
Only for use with simple immutable data structures that can be
inserted into a dict.
"""
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
if self._has_expired(key):
self._set(key, value, timeout)
return True
return False
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
value = None
with self._lock.reader():
if not self._has_expired(key):
value = self._cache[key]
if value is not None:
return value
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._set(key, value, timeout)
def incr(self, key, delta=1, version=None):
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
with self._lock.writer():
self._cache[key] = new_value
return new_value
|
mpl-2.0
| -7,616,568,562,176,174,000 | 33.333333 | 85 | 0.591154 | false | 4.012987 | false | false | false |
juniortada/signxml
|
setup.py
|
1
|
1193
|
#!/usr/bin/env python
import os, glob
from setuptools import setup, find_packages
install_requires = [line.rstrip() for line in open(os.path.join(os.path.dirname(__file__), "requirements.txt"))]
setup(
name='signxml',
version='0.4.2',
url='https://github.com/kislyuk/signxml',
license='Apache Software License',
author='Andrey Kislyuk',
author_email='[email protected]',
description='Python XML Signature library',
long_description=open('README.rst').read(),
install_requires=install_requires,
packages = find_packages(exclude=['test']),
platforms=['MacOS X', 'Posix'],
package_data={'signxml': ['schemas/*.xsd']},
zip_safe=False,
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
apache-2.0
| 3,744,242,557,129,585,700 | 34.088235 | 112 | 0.636211 | false | 3.937294 | false | false | false |
FRBs/FRB
|
frb/surveys/panstarrs.py
|
1
|
11891
|
"""
Slurp data from Pan-STARRS catalog using the MAST API.
A lot of this code has been directly taken from
http://ps1images.stsci.edu/ps1_dr2_api.html
"""
import numpy as np
from astropy import units as u,utils as astroutils
from astropy.io import fits
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy.table import Table
from ..galaxies.defs import PanSTARRS_bands
from .images import grab_from_url
import warnings
import requests
try:
from astroquery.vizier import Vizier
except ImportError:
warnings.warn("Warning: You need to install astroquery to use the survey tools...")
from frb.surveys import surveycoord,catalog_utils,images
from IPython import embed
#TODO: It's potentially viable to use the same code for other
#catalogs in the VizieR database. Maybe a generalization wouldn't
#be too bad in the future.
# Define the data model for Pan-STARRS data
photom = {}
photom['Pan-STARRS'] = {}
for band in PanSTARRS_bands:
# Pre 180301 paper
#photom["Pan-STARRS"]["Pan-STARRS"+'_{:s}'.format(band)] = '{:s}PSFmag'.format(band.lower())
#photom["Pan-STARRS"]["Pan-STARRS"+'_{:s}_err'.format(band)] = '{:s}PSFmagErr'.format(band.lower())
photom["Pan-STARRS"]["Pan-STARRS"+'_{:s}'.format(band)] = '{:s}KronMag'.format(band.lower())
photom["Pan-STARRS"]["Pan-STARRS"+'_{:s}_err'.format(band)] = '{:s}KronMagErr'.format(band.lower())
photom["Pan-STARRS"]["Pan-STARRS_ID"] = 'objID'
photom["Pan-STARRS"]['ra'] = 'raStack'
photom["Pan-STARRS"]['dec'] = 'decStack'
photom["Pan-STARRS"]["Pan-STARRS_field"] = 'field'
# Define the default set of query fields
# See: https://outerspace.stsci.edu/display/PANSTARRS/PS1+StackObjectView+table+fields
# for additional Fields
_DEFAULT_query_fields = ['objID','raStack','decStack','objInfoFlag','qualityFlag',
'rKronRad']#, 'rPSFMag', 'rKronMag']
_DEFAULT_query_fields +=['{:s}PSFmag'.format(band) for band in PanSTARRS_bands]
_DEFAULT_query_fields +=['{:s}PSFmagErr'.format(band) for band in PanSTARRS_bands]
_DEFAULT_query_fields +=['{:s}KronMag'.format(band) for band in PanSTARRS_bands]
_DEFAULT_query_fields +=['{:s}KronMagErr'.format(band) for band in PanSTARRS_bands]
class Pan_STARRS_Survey(surveycoord.SurveyCoord):
"""
A class to access all the catalogs hosted on the
Vizier database. Inherits from SurveyCoord. This
is a super class not meant for use by itself and
instead meant to instantiate specific children
classes like PAN-STARRS_Survey
"""
def __init__(self,coord,radius,**kwargs):
surveycoord.SurveyCoord.__init__(self,coord,radius,**kwargs)
self.Survey = "Pan_STARRS"
def get_catalog(self,query_fields=None,release="dr2",
table="stack",print_query=False,
use_psf=False):
"""
Query a catalog in the VizieR database for
photometry.
Args:
query_fields: list, optional
A list of query fields to
get in addition to the
default fields.
release: str, optional
"dr1" or "dr2" (default: "dr2").
Data release version.
table: str, optional
"mean","stack" or "detection"
(default: "stack"). The data table to
search within.
use_psf: bool, optional
If True, use PSFmag instead of KronMag
Returns:
catalog: astropy.table.Table
Contains all query results
"""
assert self.radius <= 0.5*u.deg, "Cone serches have a maximum radius"
#Validate table and release input
_check_legal(table,release)
url = "https://catalogs.mast.stsci.edu/api/v0.1/panstarrs/{:s}/{:s}.csv".format(release,table)
if query_fields is None:
query_fields = _DEFAULT_query_fields
else:
query_fields = _DEFAULT_query_fields+query_fields
#Validate columns
_check_columns(query_fields,table,release)
data = {}
data['ra'] = self.coord.ra.value
data['dec'] = self.coord.dec.value
data['radius'] = self.radius.to(u.deg).value
data['columns'] = query_fields
if print_query:
print(url)
ret = requests.get(url,params=data)
ret.raise_for_status()
if len(ret.text)==0:
self.catalog = Table()
self.catalog.meta['radius'] = self.radius
self.catalog.meta['survey'] = self.survey
# Validate
self.validate_catalog()
return self.catalog.copy()
photom_catalog = Table.read(ret.text,format="ascii.csv")
pdict = photom['Pan-STARRS'].copy()
# Allow for PSF
if use_psf:
for band in PanSTARRS_bands:
pdict["Pan-STARRS"+'_{:s}'.format(band)] = '{:s}PSFmag'.format(band.lower())
pdict["Pan-STARRS"+'_{:s}_err'.format(band)] = '{:s}PSFmagErr'.format(band.lower())
photom_catalog = catalog_utils.clean_cat(photom_catalog,pdict)
#Remove bad positions because Pan-STARRS apparently decided
#to flag some positions with large negative numbers. Why even keep
#them?
#import pdb; pdb.set_trace()
bad_ra = (photom_catalog['ra']<0)+(photom_catalog['ra']>360)
bad_dec = (photom_catalog['dec']<-90)+(photom_catalog['dec']>90)
bad_pos = bad_ra+bad_dec # bad_ra OR bad_dec
photom_catalog = photom_catalog[~bad_pos]
#
self.catalog = catalog_utils.sort_by_separation(photom_catalog, self.coord,
radec=('ra','dec'), add_sep=True)
# Meta
self.catalog.meta['radius'] = self.radius
self.catalog.meta['survey'] = self.survey
#Validate
self.validate_catalog()
#Return
return self.catalog.copy()
def get_cutout(self,imsize=30*u.arcsec,filt="irg",output_size=None):
"""
Grab a color cutout (PNG) from Pan-STARRS
Args:
imsize (Quantity): Angular size of image desired
filt (str): A string with the three filters to be used
output_size (int): Output image size in pixels. Defaults
to the original cutout size.
Returns:
PNG image, None (None for the header).
"""
assert len(filt)==3, "Need three filters for a cutout."
#Sort filters from red to blue
filt = filt.lower() #Just in case the user is cheeky about the filter case.
reffilt = "yzirg"
idx = np.argsort([reffilt.find(f) for f in filt])
newfilt = ""
for i in idx:
newfilt += filt[i]
#Get image url
url = _get_url(self.coord,imsize=imsize,filt=newfilt,output_size=output_size,color=True,imgformat='png')
self.cutout = images.grab_from_url(url)
self.cutout_size = imsize
return self.cutout.copy(),
def get_image(self,imsize=30*u.arcsec,filt="i",timeout=120):
"""
Grab a fits image from Pan-STARRS in a
specific band.
Args:
imsize (Quantity): Angular size of the image desired
filt (str): One of 'g','r','i','z','y' (default: 'i')
timeout (int): Number of seconds to timout the query (default: 120 s)
Returns:
hdu: fits header data unit for the downloaded image
"""
assert len(filt)==1 and filt in "grizy", "Filter name must be one of 'g','r','i','z','y'"
url = _get_url(self.coord,imsize=imsize,filt=filt,imgformat='fits')[0]
imagedat = fits.open(astroutils.data.download_file(url,cache=True,show_progress=False,timeout=timeout))[0]
return imagedat
def _get_url(coord,imsize=30*u.arcsec,filt="i",output_size=None,imgformat="fits",color=False):
"""
Returns the url corresponding to the requested image cutout
Args:
coord (astropy SkyCoord): Center of the search area.
imsize (astropy Angle): Length and breadth of the search area.
filt (str): 'g','r','i','z','y'
output_size (int): display image size (length) in pixels
imgformat (str): "fits","png" or "jpg"
"""
assert imgformat in ['jpg','png','fits'], "Image file can be only in the formats 'jpg', 'png' and 'fits'."
if color:
assert len(filt)==3,"Three filters are necessary for a color image"
assert imgformat in ['jpg','png'], "Color image not available in fits format"
pixsize = int(imsize.to(u.arcsec).value/0.25) #0.25 arcsec per pixel
service = "https://ps1images.stsci.edu/cgi-bin/ps1filenames.py"
filetaburl = ("{:s}?ra={:f}&dec={:f}&size={:d}&format=fits"
"&filters={:s}").format(service,coord.ra.value,
coord.dec.value, pixsize,filt)
file_extensions = Table.read(filetaburl, format='ascii')['filename']
url = "https://ps1images.stsci.edu/cgi-bin/fitscut.cgi?ra={:f}&dec={:f}&size={:d}&format={:s}".format(coord.ra.value,coord.dec.value,
pixsize,imgformat)
if output_size:
url += "&output_size={}".format(output_size)
if color:
cols = ['red','green','blue']
for col,extension in zip(cols,file_extensions):
url += "&{}={}".format(col,extension)
else:
urlbase = url + "&red="
url = []
for extensions in file_extensions:
url.append(urlbase+extensions)
return url
def _check_columns(columns,table,release):
"""
Checks if the requested columns are present in the
table from which data is to be pulled. Raises an error
if those columns aren't found.
Args:
columns (list of str): column names to retrieve
table (str): "mean","stack" or "detection"
release (str): "dr1" or "dr2"
"""
dcols = {}
for col in _ps1metadata(table,release)['name']:
dcols[col.lower()] = 1
badcols = []
for col in columns:
if col.lower().strip() not in dcols:
badcols.append(col)
if badcols:
raise ValueError('Some columns not found in table: {}'.format(', '.join(badcols)))
def _check_legal(table,release):
"""
Checks if this combination of table and release is acceptable
Raises a VelueError exception if there is problem.
Taken from http://ps1images.stsci.edu/ps1_dr2_api.html
Args:
table (str): "mean","stack" or "detection"
release (str): "dr1" or "dr2"
"""
releaselist = ("dr1", "dr2")
if release not in releaselist:
raise ValueError("Bad value for release (must be one of {})".format(', '.join(releaselist)))
if release=="dr1":
tablelist = ("mean", "stack")
else:
tablelist = ("mean", "stack", "detection")
if table not in tablelist:
raise ValueError("Bad value for table (for {} must be one of {})".format(release, ", ".join(tablelist)))
def _ps1metadata(table="stack",release="dr2",
baseurl="https://catalogs.mast.stsci.edu/api/v0.1/panstarrs"):
"""Return metadata for the specified catalog and table
Args:
table (string): mean, stack, or detection
release (string): dr1 or dr2
baseurl: base URL for the request
Returns an astropy table with columns name, type, description
"""
_check_legal(table,release)
url = "{baseurl}/{release}/{table}/metadata".format(**locals())
r = requests.get(url)
r.raise_for_status()
v = r.json()
# convert to astropy table
tab = Table(rows=[(x['name'],x['type'],x['description']) for x in v],
names=('name','type','description'))
return tab
|
bsd-3-clause
| -4,824,569,952,313,100,000 | 38.769231 | 137 | 0.600454 | false | 3.530582 | false | false | false |
jledbetter/openhatch
|
mysite/customs/bugimporters/bugzilla.py
|
1
|
14860
|
# This file is part of OpenHatch.
# Copyright (C) 2010, 2011 Jack Grigg
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import lxml.etree
import twisted.web.error
import twisted.web.http
import urlparse
from mysite.base.decorators import cached_property
import mysite.base.helpers
from mysite.customs.bugimporters.base import BugImporter
import mysite.search.models
class BugzillaBugImporter(BugImporter):
def __init__(self, *args, **kwargs):
# Create a list to store bug ids obtained from queries.
self.bug_ids = []
# Call the parent __init__.
super(BugzillaBugImporter, self).__init__(*args, **kwargs)
def process_queries(self, queries):
# Add all the queries to the waiting list.
for query in queries:
# Get the query URL.
query_url = query.get_query_url()
# Get the query type and set the callback.
query_type = query.query_type
if query_type == 'xml':
callback = self.handle_query_html
else:
callback = self.handle_tracking_bug_xml
# Add the query URL and callback.
self.add_url_to_waiting_list(
url=query_url,
callback=callback)
# Update query.last_polled and save it.
query.last_polled = datetime.datetime.utcnow()
query.save()
# URLs are now all prepped, so start pushing them onto the reactor.
self.push_urls_onto_reactor()
def handle_query_html(self, query_html_string):
# Turn the string into an HTML tree that can be parsed to find the list
# of bugs hidden in the 'XML' form.
query_html = lxml.etree.HTML(query_html_string)
# Find all form inputs at the level we want.
# This amounts to around three forms.
query_form_inputs = query_html.xpath('/html/body/div/table/tr/td/form/input')
# Extract from this the inputs corresponding to 'ctype' fields.
ctype_inputs = [i for i in query_form_inputs if 'ctype' in i.values()]
# Limit this to inputs with 'ctype=xml'.
ctype_xml = [i for i in ctype_inputs if 'xml' in i.values()]
if ctype_xml:
# Get the 'XML' form.
xml_form = ctype_xml[0].getparent()
# Get all its children.
xml_inputs = xml_form.getchildren()
# Extract from this all bug id inputs.
bug_id_inputs = [i for i in xml_inputs if 'id' in i.values()]
# Convert this to a list of bug ids.
bug_id_list = [int(i.get('value')) for i in bug_id_inputs]
# Add them to self.bug_ids.
self.bug_ids.extend(bug_id_list)
def handle_tracking_bug_xml(self, tracking_bug_xml_string):
# Turn the string into an XML tree.
tracking_bug_xml = lxml.etree.XML(tracking_bug_xml_string)
# Find all the bugs that this tracking bug depends on.
depends = tracking_bug_xml.findall('bug/dependson')
# Add them to self.bug_ids.
self.bug_ids.extend([int(depend.text) for depend in depends])
def prepare_bug_urls(self):
# Pull bug_ids our of the internal storage. This is done in case the
# list is simultaneously being written to, in which case just copying
# the entire thing followed by deleting the contents could lead to
# lost IDs.
bug_id_list = []
while self.bug_ids:
bug_id_list.append(self.bug_ids.pop())
# Convert the obtained bug ids to URLs.
bug_url_list = [urlparse.urljoin(self.tm.get_base_url(),
"show_bug.cgi?id=%d" % bug_id) for bug_id in bug_id_list]
# Get the sub-list of URLs that are fresh.
fresh_bug_urls = mysite.search.models.Bug.all_bugs.filter(
canonical_bug_link__in = bug_url_list,
last_polled__lt = datetime.datetime.now() - datetime.timedelta(days = 1)
).values_list('canonical_bug_link', flat=True)
# Remove the fresh URLs to be let with stale or new URLs.
for bug_url in fresh_bug_urls:
bug_url_list.remove(bug_url)
# Put the bug list in the form required for process_bugs.
# The second entry of the tuple is None as Bugzilla doesn't supply data
# in the queries above (although it does support grabbing data for
# multiple bugs at once, when all the bug ids are known.
bug_list = [(bug_url, None) for bug_url in bug_url_list]
# And now go on to process the bug list
self.process_bugs(bug_list)
def process_bugs(self, bug_list):
# If there are no bug URLs, finish now.
if not bug_list:
self.determine_if_finished()
return
# Convert the bug URLs into bug ids.
bug_id_list = []
for bug_url, _ in bug_list:
base, num = bug_url.rsplit('=', 1)
bug_id = int(num)
bug_id_list.append(bug_id)
# Create a single URL to fetch all the bug data.
big_url = urlparse.urljoin(
self.tm.get_base_url(),
'show_bug.cgi?ctype=xml&excludefield=attachmentdata')
for bug_id in bug_id_list:
big_url += '&id=%d' % bug_id
# Fetch the bug data.
self.add_url_to_waiting_list(
url=big_url,
callback=self.handle_bug_xml,
c_args={},
errback=self.errback_bug_xml,
e_args={'bug_id_list': bug_id_list})
# URLs are now all prepped, so start pushing them onto the reactor.
self.push_urls_onto_reactor()
def errback_bug_xml(self, failure, bug_id_list):
# Check if the failure was related to the size of the request.
size_related_errors = [
twisted.web.http.REQUEST_ENTITY_TOO_LARGE,
twisted.web.http.REQUEST_TIMEOUT,
twisted.web.http.REQUEST_URI_TOO_LONG
]
if failure.check(twisted.web.error.Error) and failure.value.status in size_related_errors:
big_url_base = urlparse.urljoin(
self.tm.get_base_url(),
'show_bug.cgi?ctype=xml&excludefield=attachmentdata')
# Split bug_id_list into pieces, and turn each piece into a URL.
# Note that (floor division)+1 is used to ensure that for
# odd-numbered lists we don't end up with one bug id left over.
split_bug_id_list = []
num_ids = len(bug_id_list)
step = (num_ids//2)+1
for i in xrange(0, num_ids, step):
bug_id_list_fragment = bug_id_list[i:i+step]
# Check the fragment actually has bug ids in it.
if not bug_id_list_fragment:
# This is our recursive end-point.
continue
# Create the URL for the fragment of bug ids.
big_url = big_url_base
for bug_id in bug_id_list_fragment:
big_url += '&id=%d' % bug_id
# Fetch the reduced bug data.
self.add_url_to_waiting_list(
url=big_url,
callback=self.handle_bug_xml,
c_args={},
errback=self.errback_bug_xml,
e_args={'bug_id_list': bug_id_list_fragment})
else:
# Pass the Failure on.
return failure
def handle_bug_xml(self, bug_list_xml_string):
# Turn the string into an XML tree.
bug_list_xml = lxml.etree.XML(bug_list_xml_string)
for bug_xml in bug_list_xml.xpath('bug'):
# Create a BugzillaBugParser with the XML data.
bbp = BugzillaBugParser(bug_xml)
# Get the parsed data dict from the BugzillaBugParser.
data = bbp.get_parsed_data_dict(self.tm)
# Get or create a Bug object to put the parsed data in.
try:
bug = mysite.search.models.Bug.all_bugs.get(
canonical_bug_link=bbp.bug_url)
except mysite.search.models.Bug.DoesNotExist:
bug = mysite.search.models.Bug(canonical_bug_link=bbp.bug_url)
# Fill the Bug
for key in data:
value = data[key]
setattr(bug, key, value)
# Save the project onto it
# Project name is generated from the bug_project_name_format property
# of the TrackerModel.
project_from_name, _ = mysite.search.models.Project.objects.get_or_create(
name=self.generate_bug_project_name(bbp))
# Manually save() the Project to ensure that if it was created then it has
# a display_name.
project_from_name.save()
bug.project = project_from_name
# Store the tracker that generated the Bug, update last_polled and save it!
bug.tracker = self.tm
bug.last_polled = datetime.datetime.utcnow()
bug.save()
def generate_bug_project_name(self, bbp):
return self.tm.bug_project_name_format.format(
tracker_name=self.tm.tracker_name,
product=bbp.product,
component=bbp.component)
def determine_if_finished(self):
# If we got here then there are no more URLs in the waiting list.
# So if self.bug_ids is also empty then we are done.
if self.bug_ids:
self.prepare_bug_urls()
else:
self.finish_import()
class BugzillaBugParser:
@staticmethod
def get_tag_text_from_xml(xml_doc, tag_name, index = 0):
"""Given an object representing <bug><tag>text</tag></bug>,
and tag_name = 'tag', returns 'text'."""
tags = xml_doc.xpath(tag_name)
try:
return tags[index].text
except IndexError:
return ''
def __init__(self, bug_xml):
self.bug_xml = bug_xml
self.bug_id = self._bug_id_from_bug_data()
self.bug_url = None # This gets filled in the data parser.
def _bug_id_from_bug_data(self):
return int(self.get_tag_text_from_xml(self.bug_xml, 'bug_id'))
@cached_property
def product(self):
return self.get_tag_text_from_xml(self.bug_xml, 'product')
@cached_property
def component(self):
return self.get_tag_text_from_xml(self.bug_xml, 'component')
@staticmethod
def _who_tag_to_username_and_realname(who_tag):
username = who_tag.text
realname = who_tag.attrib.get('name', '')
return username, realname
@staticmethod
def bugzilla_count_people_involved(xml_doc):
"""Strategy: Create a set of all the listed text values
inside a <who ...>(text)</who> tag
Return the length of said set."""
everyone = [tag.text for tag in xml_doc.xpath('.//who')]
return len(set(everyone))
@staticmethod
def bugzilla_date_to_datetime(date_string):
return mysite.base.helpers.string2naive_datetime(date_string)
def get_parsed_data_dict(self, tm):
# Generate the bug_url.
self.bug_url = urlparse.urljoin(
tm.get_base_url(),
'show_bug.cgi?id=%d' % self.bug_id)
xml_data = self.bug_xml
date_reported_text = self.get_tag_text_from_xml(xml_data, 'creation_ts')
last_touched_text = self.get_tag_text_from_xml(xml_data, 'delta_ts')
u, r = self._who_tag_to_username_and_realname(xml_data.xpath('.//reporter')[0])
status = self.get_tag_text_from_xml(xml_data, 'bug_status')
looks_closed = status in ('RESOLVED', 'WONTFIX', 'CLOSED', 'ASSIGNED')
ret_dict = {
'title': self.get_tag_text_from_xml(xml_data, 'short_desc'),
'description': (self.get_tag_text_from_xml(xml_data, 'long_desc/thetext') or
'(Empty description)'),
'status': status,
'importance': self.get_tag_text_from_xml(xml_data, 'bug_severity'),
'people_involved': self.bugzilla_count_people_involved(xml_data),
'date_reported': self.bugzilla_date_to_datetime(date_reported_text),
'last_touched': self.bugzilla_date_to_datetime(last_touched_text),
'submitter_username': u,
'submitter_realname': r,
'canonical_bug_link': self.bug_url,
'looks_closed': looks_closed
}
keywords_text = self.get_tag_text_from_xml(xml_data, 'keywords')
keywords = map(lambda s: s.strip(),
keywords_text.split(','))
# Check for the bitesized keyword
if tm.bitesized_type:
ret_dict['bite_size_tag_name'] = tm.bitesized_text
b_list = tm.bitesized_text.split(',')
if tm.bitesized_type == 'key':
ret_dict['good_for_newcomers'] = any(b in keywords for b in b_list)
elif tm.bitesized_type == 'wboard':
whiteboard_text = self.get_tag_text_from_xml(xml_data, 'status_whiteboard')
ret_dict['good_for_newcomers'] = any(b in whiteboard_text for b in b_list)
else:
ret_dict['good_for_newcomers'] = False
else:
ret_dict['good_for_newcomers'] = False
# Check whether this is a documentation bug.
if tm.documentation_type:
d_list = tm.documentation_text.split(',')
if tm.documentation_type == 'key':
ret_dict['concerns_just_documentation'] = any(d in keywords for d in d_list)
elif tm.documentation_type == 'comp':
ret_dict['concerns_just_documentation'] = any(d == self.component for d in d_list)
elif tm.documentation_type == 'prod':
ret_dict['concerns_just_documentation'] = any(d == self.product for d in d_list)
else:
ret_dict['concerns_just_documentation'] = False
else:
ret_dict['concerns_just_documentation'] = False
# And pass ret_dict on.
return ret_dict
|
agpl-3.0
| -8,360,372,221,718,884,000 | 41.824207 | 98 | 0.584859 | false | 3.794688 | false | false | false |
quattor/aquilon
|
lib/aquilon/worker/formats/network_device.py
|
1
|
6711
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2010,2011,2012,2013,2014,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NetworkDevice formatter."""
from collections import defaultdict
from operator import attrgetter
from aquilon.aqdb.model import NetworkDevice
from aquilon.worker.formats.formatters import ObjectFormatter
from aquilon.worker.formats.hardware_entity import HardwareEntityFormatter
from aquilon.exceptions_ import ProtocolError
class NetworkDeviceFormatter(HardwareEntityFormatter):
def header_raw(self, device, details, indent="", embedded=True,
indirect_attrs=True):
details.append(indent + " Switch Type: %s" % device.switch_type)
def format_raw(self, device, indent="", embedded=True,
indirect_attrs=True):
details = [super(NetworkDeviceFormatter, self).format_raw(device, indent)]
for slot in device.chassis_slot:
details.append(indent + " {0:c}: {0!s}".format(slot.chassis))
details.append(indent + " Slot: %d" % slot.slot_number)
ports = defaultdict(list)
for om in device.observed_macs:
ports[om.port].append(om)
for port in sorted(ports):
# Show most recent data first, otherwise sort by MAC address. sort()
# is stable so we can call it multiple times
ports[port].sort(key=attrgetter('mac_address'))
ports[port].sort(key=attrgetter('last_seen'), reverse=True)
details.append(indent + " Port: %s" % port)
for om in ports[port]:
details.append(indent + " MAC: %s, created: %s, last seen: %s" %
(om.mac_address, om.creation_date, om.last_seen))
for pg in device.port_groups:
details.append(indent + " VLAN %d: %s" % (pg.network_tag,
pg.network.ip))
details.append(indent + " Created: %s" % pg.creation_date)
if device.host:
details.append(self.redirect_raw_host_details(device.host))
return "\n".join(details)
def csv_fields(self, device):
base_details = [device.fqdn,
device.primary_ip,
device.switch_type,
device.location.rack.name if device.location.rack else None,
device.location.building.name,
device.model.vendor.name,
device.model.name,
device.serial_no]
if not device.interfaces:
yield base_details + [None, None]
else:
for interface in device.interfaces:
yield base_details + [interface.name, interface.mac]
def fill_proto(self, device, skeleton, embedded=True,
indirect_attrs=True):
skeleton.primary_name = str(device.primary_name)
if indirect_attrs:
self._fill_hardware_proto(device, skeleton.hardware)
self._fill_system_proto(device.host, skeleton.system)
def _fill_hardware_proto(self, hwent, skeleton, embedded=True,
indirect_attrs=True):
skeleton.hardware_type = skeleton.NETWORK_DEVICE
skeleton.label = hwent.label
if hwent.serial_no:
skeleton.serial_no = hwent.serial_no
self.redirect_proto(hwent.model, skeleton.model, indirect_attrs=False)
self.redirect_proto(hwent.location, skeleton.location, indirect_attrs=False)
if indirect_attrs:
for iface in sorted(hwent.interfaces, key=attrgetter('name')):
int_msg = skeleton.interfaces.add()
int_msg.device = iface.name
self.redirect_proto(iface, int_msg)
self._fill_address_assignment_proto(iface, int_msg.address_assignments)
def _fill_address_assignment_proto(self, iface, skeleton, embedded=True,
indirect_attrs=True):
for addr in iface.assignments:
addr_msg = skeleton.add()
if addr.assignment_type == 'standard':
addr_msg.assignment_type = addr_msg.STANDARD
elif addr.assignment_type == 'shared':
addr_msg.assignment_type = addr_msg.SHARED
else:
raise ProtocolError("Unknown address assignmment type %s." %
addr.assignment_type)
if addr.label:
addr_msg.label = addr.label
addr_msg.ip = str(addr.ip)
addr_msg.fqdn.extend([str(fqdn) for fqdn in addr.fqdns])
for dns_record in addr.dns_records:
if dns_record.alias_cnt:
addr_msg.aliases.extend([str(a.fqdn) for a in
dns_record.all_aliases])
if hasattr(addr, "priority"):
addr_msg.priority = addr.priority
def _fill_system_proto(self, host, skeleton, embedded=True,
indirect_attrs=True):
self.redirect_proto(host.branch, skeleton.domain)
skeleton.status = host.status.name
self.redirect_proto(host.personality_stage, skeleton.personality)
self.redirect_proto(host.operating_system, skeleton.operating_system)
if host.cluster and not embedded:
skeleton.cluster = host.cluster.name
if host.resholder:
self.redirect_proto(host.resholder.resources, skeleton.resources)
self.redirect_proto(host.services_used, skeleton.services_used,
indirect_attrs=False)
self.redirect_proto([srv.service_instance for srv in host.services_provided],
skeleton.services_provided, indirect_attrs=False)
skeleton.owner_eonid = host.effective_owner_grn.eon_id
for grn_rec in host.grns:
map = skeleton.eonid_maps.add()
map.target = grn_rec.target
map.eonid = grn_rec.eon_id
ObjectFormatter.handlers[NetworkDevice] = NetworkDeviceFormatter()
|
apache-2.0
| 1,171,513,508,783,954,400 | 42.577922 | 87 | 0.606318 | false | 4.178705 | false | false | false |
jdodds/pyrana
|
pyrana/plugins/pidginstatus.py
|
1
|
1213
|
import dbus
from feather import Plugin
class PidginStatus(Plugin):
listeners = set(['songstart', 'songpause', 'songresume'])
messengers = set()
def songstart(self, payload):
#hacky.
parts = payload.split('/')
artist = parts[-3]
album = parts[-2]
song = parts[-1]
self.song_msg = "%s (%s): %s" % (artist, album, song)
self.update_status(self.song_msg)
def songpause(self, payload=None):
self.update_status("Paused")
def songresume(self, payload=None):
self.update_status(self.song_msg)
def update_status(self, msg):
bus = dbus.SessionBus()
if "im.pidgin.purple.PurpleService" in bus.list_names():
purple = bus.get_object("im.pidgin.purple.PurpleService",
"/im/pidgin/purple/PurpleObject",
"im.pidgin.purple.PurpleInterface")
current = purple.PurpleSavedstatusGetType(
purple.PurpleSavedstatusGetCurrent())
status = purple.PurpleSavedstatusNew("", current)
purple.PurpleSavedstatusSetMessage(status, msg)
purple.PurpleSavedstatusActivate(status)
|
bsd-3-clause
| 7,194,651,118,975,871,000 | 31.783784 | 71 | 0.591096 | false | 3.790625 | false | false | false |
leesdolphin/rentme
|
api/trademe/enums.py
|
1
|
1262
|
import enum
def named_enum(name, item):
if isinstance(item, str):
item = item.split(' ')
item = list(map(str.strip, item))
return enum.Enum(name, dict(zip(item, item)), module=__name__)
@enum.unique
class AreaOfBusiness(enum.Enum):
All = 0
Marketplace = 1
Property = 2
Motors = 3
Jobs = 4
Services = 5
SearchSortOrder = named_enum('SearchSortOrder',
'Default FeaturedFirst SuperGridFeaturedFirst '
'TitleAsc ExpiryAsc ExpiryDesc PriceAsc PriceDesc '
'BidsMost BuyNowAsc BuyNowDesc ReviewsDesc '
'HighestSalary LowestSalary LowestKilometres '
'HighestKilometres NewestVehicle OldestVehicle '
'BestMatch LargestDiscount')
PhotoSize = named_enum('PhotoSize',
'Thumbnail List Medium Gallery Large FullSize')
PropertyType = named_enum('PropertyType', 'Apartment CarPark House Townhouse Unit')
AllowsPickups = enum.Enum('AllowsPickups', 'None Allow Demand Forbid', start=0)
GeographicLocationAccuracy = enum.Enum('GeographicLocationAccuracy',
'None Address Suburb Street', start=0)
|
agpl-3.0
| -7,150,790,870,608,253,000 | 37.242424 | 83 | 0.606181 | false | 3.981073 | false | false | false |
pareidolic/bharati-braille
|
bottle.py
|
1
|
128654
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2012, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.11.rc1'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, urllib, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
try: from json import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3,0,0)
py25 = py < (2,6,0)
py31 = (3,1,0) <= py < (3,2,0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
json_loads = json_lds
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# File uploads (which are implemented as empty FiledStorage instances...)
# have a negative truth value. That makes no sense, here is a fix.
class FieldStorage(cgi.FieldStorage):
def __nonzero__(self): return bool(self.list or self.file)
if py3k: __bool__ = __nonzero__
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been built """
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: Sorry for the mess. It works. Trust me.
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def __init__(self, strict=False):
self.rules = {} # A {rule: Rule} mapping
self.builder = {} # A rule/name->build_info mapping
self.static = {} # Cache for static routes: {path: {method: target}}
self.dynamic = [] # Cache for dynamic routes. See _compile()
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {'re': self.re_filter, 'int': self.int_filter,
'float': self.float_filter, 'path': self.path_filter}
def re_filter(self, conf):
return conf or self.default_pattern, None, None
def int_filter(self, conf):
return r'-?\d+', int, lambda x: str(int(x))
def float_filter(self, conf):
return r'-?[\d.]+', float, lambda x: str(float(x))
def path_filter(self, conf):
return r'.+?', None, None
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
def parse_rule(self, rule):
''' Parses a rule into a (name, filter, conf) token stream. If mode is
None, name contains a static rule part. '''
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix: yield prefix, None, None
name, filtr, conf = g[1:4] if not g[2] is None else g[4:7]
if not filtr: filtr = self.default_filter
yield name, filtr, conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new route or replace the target for an existing route. '''
if rule in self.rules:
self.rules[rule][method] = target
if name: self.builder[name] = self.builder[rule]
return
target = self.rules[rule] = {method: target}
# Build pattern and other structures for dynamic routes
anons = 0 # Number of anonymous wildcards
pattern = '' # Regular expression pattern
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self.parse_rule(rule):
if mode:
is_static = False
mask, in_filter, out_filter = self.filters[mode](conf)
if key:
pattern += '(?P<%s>%s)' % (key, mask)
else:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons; anons += 1
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static[self.build(rule)] = target
return
def fpat_sub(m):
return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
flat_pattern = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, pattern)
try:
re_match = re.compile('^(%s)$' % pattern).match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
def match(path):
""" Return an url-argument dictionary. """
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
try:
combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, flat_pattern)
self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
self.dynamic[-1][1].append((match, target))
except (AssertionError, IndexError): # AssertionError: Too many groups
self.dynamic.append((re.compile('(^%s$)' % flat_pattern),
[(match, target)]))
return match
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
path, targets, urlargs = environ['PATH_INFO'] or '/', None, {}
if path in self.static:
targets = self.static[path]
else:
for combined, rules in self.dynamic:
match = combined.match(path)
if not match: continue
getargs, targets = rules[match.lastindex - 1]
urlargs = getargs(path) if getargs else {}
break
if not targets:
raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO']))
method = environ['REQUEST_METHOD'].upper()
if method in targets:
return targets[method], urlargs
if method == 'HEAD' and 'GET' in targets:
return targets['GET'], urlargs
if 'ANY' in targets:
return targets['ANY'], urlargs
allowed = [verb for verb in targets if verb != 'ANY']
if 'GET' in allowed and 'HEAD' not in allowed:
allowed.append('HEAD')
raise HTTPError(405, "Method not allowed.",
header=[('Allow',",".join(allowed))])
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict(config)
def __call__(self, *a, **ka):
depr("Some APIs changed to return Route() instances instead of"\
" callables. Make sure to use the Route.call method and not to"\
" call Route instances directly.")
return self.call(*a, **ka)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
@property
def _context(self):
depr('Switch to Plugin API v2 and access the Route object directly.')
return dict(rule=self.rule, method=self.method, callback=self.callback,
name=self.name, app=self.app, config=self.config,
apply=self.plugins, skip=self.skiplist)
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
api = getattr(plugin, 'api', 1)
context = self if api > 1 else self._context
callback = plugin.apply(callback, context)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def __repr__(self):
return '<%s %r %r>' % (self.method, self.rule, self.callback)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
self.catchall = catchall
#: A :cls:`ResourceManager` for application files
self.resources = ResourceManager()
#: A :cls:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config.autojson = autojson
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
self.hooks = HooksPlugin()
self.install(self.hooks)
if self.config.autojson:
self.install(JSONPlugin())
self.install(TemplatePlugin())
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
prefix, app = app, prefix
depr('Parameter order of Bottle.mount() changed.') # 0.10
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = BaseResponse([], 200)
def start_response(status, header):
rs.status = status
for name, value in header: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
body = itertools.chain(rs.body, body)
return HTTPResponse(body, rs.status_code, rs.headers)
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'ANY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.hooks.trigger('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. Three hooks
are currently implemented:
- before_request: Executed once before each request
- after_request: Executed once after each request
- app_reset: Called whenever :meth:`reset` is called.
"""
def wrapper(func):
self.hooks.add(name, func)
return func
return wrapper
def handle(self, path, method='GET'):
""" (deprecated) Execute the first matching route callback and return
the result. :exc:`HTTPResponse` exceptions are caught and returned.
If :attr:`Bottle.catchall` is true, other exceptions are caught as
well and returned as :exc:`HTTPError` instances (500).
"""
depr("This method will change semantics in 0.10. Try to avoid it.")
if isinstance(path, dict):
return self._handle(path)
return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = next(out)
while not first:
first = next(out)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
if isinstance(first, bytes):
return itertools.chain([first], out)
if isinstance(first, unicode):
return imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'\
% type(first)))
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers)
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 1024000
#: Maximum number pr GET or POST parameters per request
MAX_PARAMS = 100
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
cookies = list(cookies.values())[:self.MAX_PARAMS]
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs[:self.MAX_PARAMS]:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The values are instances of
:class:`cgi.FieldStorage`. The most important attributes are:
filename
The filename, if specified; otherwise None; this is the client
side filename, *not* the file name on which it is stored (that's
a temporary file you don't deal with)
file
The file(-like) object from which you can read the data.
value
The value as a *string*; for file uploads, this transparently
reads the file every time you request the value. Do not do this
on big files.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if hasattr(item, 'filename'):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', '') \
and 0 < self.content_length < self.MEMFILE_MAX:
return json_loads(self.body.read(self.MEMFILE_MAX))
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part: break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
maxlen = max(0, min(self.content_length, self.MEMFILE_MAX))
pairs = _parse_qsl(tonat(self.body.read(maxlen), 'latin1'))
for key, value in pairs[:self.MAX_PARAMS]:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='ISO-8859-1',
newline='\n')
elif py3k:
args['encoding'] = 'ISO-8859-1'
data = FieldStorage(**args)
for item in (data.list or [])[:self.MAX_PARAMS]:
post[item.name] = item if item.filename else item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
''' The Content-Type header as a lowercase-string (default: empty). '''
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, **headers):
self._cookies = None
self._headers = {'Content-Type': [self.default_content_type]}
self.body = body
self.status = status or self.default_status
if headers:
for name, value in headers.items():
self[name] = value
def copy(self):
''' Returns a copy of self. '''
copy = Response()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
return self.headerlist
def wsgiheader(self):
depr('The wsgiheader method is deprecated. See headerlist.') #0.10
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
out = []
headers = self._headers.items()
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for name, vals in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
@property
def charset(self):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. This should not be used directly.
See :meth:`set_cookie`. """
depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10
if not self._cookies:
self._cookies = SimpleCookie()
return self._cookies
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
#: Thread-local storage for :class:`LocalRequest` and :class:`LocalResponse`
#: attributes.
_lctx = threading.local()
def local_property(name):
def fget(self):
try:
return getattr(_lctx, name)
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(self, value): setattr(_lctx, name, value)
def fdel(self): delattr(_lctx, name)
return property(fget, fset, fdel,
'Thread-local property stored in :data:`_lctx.%s`' % name)
class LocalRequest(BaseRequest):
''' A thread-local subclass of :class:`BaseRequest` with a different
set of attribues for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). '''
bind = BaseRequest.__init__
environ = local_property('request_environ')
class LocalResponse(BaseResponse):
''' A thread-local subclass of :class:`BaseResponse` with a different
set of attribues for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
'''
bind = BaseResponse.__init__
_status_line = local_property('response_status_line')
_status_code = local_property('response_status_code')
_cookies = local_property('response_cookies')
_headers = local_property('response_headers')
body = local_property('response_body')
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, header=None, **headers):
if header or 'output' in headers:
depr('Call signature changed (for the better)')
if header: headers.update(header)
if 'output' in headers: body = headers.pop('output')
super(HTTPResponse, self).__init__(body, status, **headers)
def apply(self, response):
response._status_code = self._status_code
response._status_line = self._status_line
response._headers = self._headers
response._cookies = self._cookies
response.body = self.body
def _output(self, value=None):
depr('Use HTTPResponse.body instead of HTTPResponse.output')
if value is None: return self.body
self.body = value
output = property(_output, _output, doc='Alias for .body')
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None, header=None, **headers):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, header, **headers)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, route):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
return rv
return wrapper
class HooksPlugin(object):
name = 'hooks'
api = 2
_names = 'before_request', 'after_request', 'app_reset'
def __init__(self):
self.hooks = dict((name, []) for name in self._names)
self.app = None
def _empty(self):
return not (self.hooks['before_request'] or self.hooks['after_request'])
def setup(self, app):
self.app = app
def add(self, name, func):
''' Attach a callback to a hook. '''
was_empty = self._empty()
self.hooks.setdefault(name, []).append(func)
if self.app and was_empty and not self._empty(): self.app.reset()
def remove(self, name, func):
''' Remove a callback from a hook. '''
was_empty = self._empty()
if name in self.hooks and func in self.hooks[name]:
self.hooks[name].remove(func)
if self.app and not was_empty and self._empty(): self.app.reset()
def trigger(self, name, *a, **ka):
''' Trigger a hook and return a list of results. '''
hooks = self.hooks[name]
if ka.pop('reversed', False): hooks = hooks[::-1]
return [hook(*a, **ka) for hook in hooks]
def apply(self, callback, route):
if self._empty(): return callback
def wrapper(*a, **ka):
self.trigger('before_request')
rv = callback(*a, **ka)
self.trigger('after_request', reversed=True)
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str) and 'template_opts' in route.config:
depr('The `template_opts` parameter is deprecated.') #0.9
return view(conf, **route.config['template_opts'])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname, modname = fullname.rsplit('.', 1)
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
packname, modname = fullname.rsplit('.', 1)
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
s = s.encode('latin1')
if isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
return s
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a 'HTTP_' prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-subclass with some extras: You can access keys like attributes.
Uppercase attributes create new ConfigDicts and act as name-spaces.
Other missing attributes return None. Calling a ConfigDict updates its
values and returns itself.
>>> cfg = ConfigDict()
>>> cfg.Namespace.value = 5
>>> cfg.OtherNamespace(a=1, b=2)
>>> cfg
{'Namespace': {'value': 5}, 'OtherNamespace': {'a': 1, 'b': 2}}
'''
def __getattr__(self, key):
if key not in self and key[0].isupper():
self[key] = ConfigDict()
return self.get(key)
def __setattr__(self, key, value):
if hasattr(dict, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], ConfigDict):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self: del self[key]
def __call__(self, *a, **ka):
for key, value in dict(*a, **ka).items(): setattr(self, key, value)
return self
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(name, mode=mode, *args, **kwargs)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error: Application stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if code is None:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
location = urljoin(request.url, url)
raise HTTPResponse("", status=code, header=dict(Location=location))
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 401 or 404. Set Content-Type, Content-Encoding,
Content-Length and Last-Modified header. Obey If-Modified-Since header
and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
header = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if mimetype: header['Content-Type'] = mimetype
if encoding: header['Content-Encoding'] = encoding
elif mimetype:
header['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
header['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
header['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
header['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
header["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
header["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
header["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, header=header, status=206)
return HTTPResponse(body, header=header)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
DEBUG = bool(mode)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
''' Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n','%#10;')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/:x/:y'
c(x, y=5) -> '/c/:x' and '/c/:x/:y'
d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
"""
import inspect # Expensive module. Only import if necessary.
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
depr('Use route wildcard filters instead.')
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kargs):
for key, value in vkargs.items():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(*args, **kargs)
return wrapper
return decorator
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return HTTPError(401, text)
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **config):
self.options = config
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
if not self.quiet:
from paste.translogger import TransLogger
handler = TransLogger(handler)
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(_lctx, local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.get('fast'): wsgi = pywsgi
log = None if self.quiet else 'default'
wsgi.WSGIServer((self.host, self.port), handler, log=log).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
try:
wsgi.server(listen((self.host, self.port)), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=False, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
_debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (*args)
or directly, as keywords (**kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTALTemplate(BaseTemplate):
''' Deprecated, do not use. '''
def prepare(self, **options):
depr('The SimpleTAL template handler is deprecated'\
' and will be removed in 0.12')
from simpletal import simpleTAL
if self.source:
self.tpl = simpleTAL.compileHTMLTemplate(self.source)
else:
with open(self.filename, 'rb') as fp:
self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read()))
def render(self, *args, **kwargs):
from simpletal import simpleTALES
for dictarg in args: kwargs.update(dictarg)
context = simpleTALES.Context()
for k,v in self.defaults.items():
context.addGlobal(k, v)
for k,v in kwargs.items():
context.addGlobal(k, v)
output = StringIO()
self.tpl.expand(context, output)
return output.getvalue()
class SimpleTemplate(BaseTemplate):
blocks = ('if', 'elif', 'else', 'try', 'except', 'finally', 'for', 'while',
'with', 'def', 'class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
@lazy_attribute
def re_pytokens(cls):
''' This matches comments and all kinds of quoted strings but does
NOT match comments (#...) within quoted strings. (trust me) '''
return re.compile(r'''
(''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types)
|'(?:[^\\']|\\.)+?' # Single quotes (')
|"(?:[^\\"]|\\.)+?" # Double quotes (")
|'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (')
|"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (")
|\#.* # Comments
)''', re.VERBOSE)
def prepare(self, escape_func=html_escape, noescape=False, **kwargs):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
if noescape:
self._str, self._escape = self._escape, self._str
@classmethod
def split_comment(cls, code):
""" Removes comments (#...) from python code. """
if '#' not in code: return code
#: Remove comments only (leave quoted strings as they are)
subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)
return re.sub(cls.re_pytokens, subf, code)
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
multiline = dedent = oneline = False
template = self.source or open(self.filename, 'rb').read()
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'): yield 'RAW', part[1:]
else: yield 'CMD', part
else: yield 'TXT', part
def flush(): # Flush the ptrbuffer
if not ptrbuffer: return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT': cline += repr(value)
elif token == 'RAW': cline += '_str(%s)' % value
elif token == 'CMD': cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = touni(line, self.encoding)
sline = line.lstrip()
if lineno <= 2:
m = re.match(r"%\s*#.*coding[:=]\s*([-\w.]+)", sline)
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if sline and sline[0] == '%' and sline[:2] != '%%':
line = line.split('%',1)[1].lstrip() # Full line following the %
cline = self.split_comment(line).strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() # You are actually reading this? Good luck, it's a mess :)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else: # Empty %include -> reverse of %rebase
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, kwargs)
def execute(self, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
env = self.defaults.copy()
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'_include': self.subtemplate, '_str': self._str,
'_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__})
env.update(kwargs)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
rargs['_base'] = _stdout[:] #copy stdout
del _stdout[:] # clear stdout
return self.subtemplate(subtpl,_stdout,rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
for dictarg in args: kwargs.update(dictarg)
stdout = []
self.execute(stdout, kwargs)
return ''.join(stdout)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host:
host, port = host.rsplit(':', 1)
run(args[0], host=host, port=port, server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
|
agpl-3.0
| 24,366,193,587,758,924 | 38.671292 | 103 | 0.582197 | false | 4.143313 | false | false | false |
moccu/django-markymark
|
tests/test_fields.py
|
1
|
1065
|
from markymark.fields import MarkdownField, MarkdownFormField
from markymark.widgets import MarkdownTextarea
class CustomMarkdownTextarea(MarkdownTextarea):
pass
def test_markdownfield_formfield():
field = MarkdownField()
form_field = field.formfield()
assert isinstance(form_field, MarkdownFormField)
assert isinstance(form_field.widget, MarkdownTextarea)
def test_markdownfield_formfield_no_override():
field = MarkdownField()
form_field = field.formfield(widget=CustomMarkdownTextarea)
assert isinstance(form_field, MarkdownFormField)
assert isinstance(form_field.widget, CustomMarkdownTextarea)
def test_markdownfield_widget_instance():
field = MarkdownField()
widget_instance = MarkdownTextarea(attrs={'rows': 30, 'autofocus': True})
form_field = field.formfield(widget=widget_instance)
assert isinstance(form_field, MarkdownFormField)
assert isinstance(form_field.widget, MarkdownTextarea)
assert form_field.widget.attrs['rows'] == 30
assert form_field.widget.attrs['autofocus'] is True
|
mit
| -8,218,850,503,655,239,000 | 34.5 | 77 | 0.767136 | false | 4.064885 | false | false | false |
zseder/hunmisc
|
hunmisc/liblinear/filter_problem.py
|
1
|
2772
|
import sys
def filter_fs(problem_f_handler, needed_features_list, orig_num_fname,
needed_labels_list, orig_num_labelname, filtered_name):
a = open('{0}.problem'.format(filtered_name), 'w')
orig_new_nums = {}
new_num_fname = {}
orig_new_labelnums = {}
new_num_labelname = {}
max_new_value = 0
max_new_labelvalue = -1
needed_feats = set(needed_features_list)
needed_labels = set(needed_labels_list)
for l in problem_f_handler:
data = l.strip().split(' ')
label_index = str(data[0])
if label_index in needed_labels:
if label_index not in orig_new_labelnums:
max_new_labelvalue += 1
orig_new_labelnums[label_index] = max_new_labelvalue
new_num_labelname[max_new_labelvalue] =\
orig_num_labelname[label_index]
needed_data = []
for d in data[1:]:
index, value = d.split(':')
if index in needed_feats:
if index not in orig_new_nums:
max_new_value += 1
orig_new_nums[index] = max_new_value
new_num_fname[max_new_value] = orig_num_fname[index]
needed_data.append('{0}:{1}'.format(orig_new_nums[index],
value))
needed_data.sort(key=lambda x:int(x.split(':')[0]))
a.write('{0} {1}\n'.format(orig_new_labelnums\
[label_index], ' '.join(needed_data)))
a.close()
b = open('{0}.featureNumbers'.format(filtered_name), 'w')
for i in new_num_fname:
b.write('{0}\t{1}\n'.format(new_num_fname[i], i))
b.close()
c = open('{0}.labelNumbers'.format(filtered_name), 'w')
for i in new_num_labelname:
c.write('{0}\t{1}\n'.format(new_num_labelname[i], i))
c.close()
def main():
orig_name = sys.argv[1]
problem_file = '{0}.problem'.format(orig_name)
#nums_of_needed_features_file = sys.argv[2]
orig_feature_name_nums_file = '{0}.featureNumbers'.format(orig_name)
orig_label_nums_file = '{0}.labelNumbers'.format(orig_name)
name_of_resulting = sys.argv[2]
filter_fs(open(problem_file), [ l.strip().split('\t')[1] for l in
open(orig_feature_name_nums_file).readlines()],\
dict([(l.strip().split('\t')[1],l.strip().split('\t')[0])
for l in open(orig_feature_name_nums_file)]), [ str(0), str(9), str(11)], #needed_label_nums
dict([(l.strip().split('\t')[1],l.strip().split('\t')[0])
for l in open(orig_label_nums_file)]), name_of_resulting)
if __name__ == "__main__":
main()
|
gpl-3.0
| -7,254,434,017,910,495,000 | 36.459459 | 103 | 0.527778 | false | 3.384615 | false | false | false |
googleapis/googleapis-gen
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/ad_group_asset_service/client.py
|
1
|
23429
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.enums.types import asset_field_type
from google.ads.googleads.v8.enums.types import asset_link_status
from google.ads.googleads.v8.resources.types import ad_group_asset
from google.ads.googleads.v8.services.types import ad_group_asset_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import AdGroupAssetServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AdGroupAssetServiceGrpcTransport
class AdGroupAssetServiceClientMeta(type):
"""Metaclass for the AdGroupAssetService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[AdGroupAssetServiceTransport]]
_transport_registry['grpc'] = AdGroupAssetServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[AdGroupAssetServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdGroupAssetServiceClient(metaclass=AdGroupAssetServiceClientMeta):
"""Service to manage ad group assets."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupAssetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupAssetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdGroupAssetServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdGroupAssetServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def ad_group_path(customer_id: str,ad_group_id: str,) -> str:
"""Return a fully-qualified ad_group string."""
return "customers/{customer_id}/adGroups/{ad_group_id}".format(customer_id=customer_id, ad_group_id=ad_group_id, )
@staticmethod
def parse_ad_group_path(path: str) -> Dict[str,str]:
"""Parse a ad_group path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/adGroups/(?P<ad_group_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def ad_group_asset_path(customer_id: str,ad_group_id: str,asset_id: str,field_type: str,) -> str:
"""Return a fully-qualified ad_group_asset string."""
return "customers/{customer_id}/adGroupAssets/{ad_group_id}~{asset_id}~{field_type}".format(customer_id=customer_id, ad_group_id=ad_group_id, asset_id=asset_id, field_type=field_type, )
@staticmethod
def parse_ad_group_asset_path(path: str) -> Dict[str,str]:
"""Parse a ad_group_asset path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/adGroupAssets/(?P<ad_group_id>.+?)~(?P<asset_id>.+?)~(?P<field_type>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def asset_path(customer_id: str,asset_id: str,) -> str:
"""Return a fully-qualified asset string."""
return "customers/{customer_id}/assets/{asset_id}".format(customer_id=customer_id, asset_id=asset_id, )
@staticmethod
def parse_asset_path(path: str) -> Dict[str,str]:
"""Parse a asset path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/assets/(?P<asset_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AdGroupAssetServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad group asset service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdGroupAssetServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdGroupAssetServiceTransport):
# transport is a AdGroupAssetServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdGroupAssetServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad_group_asset(self,
request: ad_group_asset_service.GetAdGroupAssetRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_asset.AdGroupAsset:
r"""Returns the requested ad group asset in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetAdGroupAssetRequest`):
The request object. Request message for
[AdGroupAssetService.GetAdGroupAsset][google.ads.googleads.v8.services.AdGroupAssetService.GetAdGroupAsset].
resource_name (:class:`str`):
Required. The resource name of the ad
group asset to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.AdGroupAsset:
A link between an ad group and an
asset.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_asset_service.GetAdGroupAssetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_group_asset_service.GetAdGroupAssetRequest):
request = ad_group_asset_service.GetAdGroupAssetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_ad_group_asset]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def mutate_ad_group_assets(self,
request: ad_group_asset_service.MutateAdGroupAssetsRequest = None,
*,
customer_id: str = None,
operations: Sequence[ad_group_asset_service.AdGroupAssetOperation] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_asset_service.MutateAdGroupAssetsResponse:
r"""Creates, updates, or removes ad group assets. Operation statuses
are returned.
List of thrown errors: `AssetLinkError <>`__
`AuthenticationError <>`__ `AuthorizationError <>`__
`ContextError <>`__ `FieldError <>`__ `HeaderError <>`__
`InternalError <>`__ `MutateError <>`__
`NotAllowlistedError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.MutateAdGroupAssetsRequest`):
The request object. Request message for
[AdGroupAssetService.MutateAdGroupAssets][google.ads.googleads.v8.services.AdGroupAssetService.MutateAdGroupAssets].
customer_id (:class:`str`):
Required. The ID of the customer
whose ad group assets are being
modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v8.services.types.AdGroupAssetOperation]`):
Required. The list of operations to
perform on individual ad group assets.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.MutateAdGroupAssetsResponse:
Response message for an ad group
asset mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_asset_service.MutateAdGroupAssetsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_group_asset_service.MutateAdGroupAssetsRequest):
request = ad_group_asset_service.MutateAdGroupAssetsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_ad_group_assets]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('customer_id', request.customer_id),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'AdGroupAssetServiceClient',
)
|
apache-2.0
| 3,013,399,569,031,883,300 | 44.142582 | 193 | 0.622733 | false | 4.43647 | false | false | false |
etkirsch/legends-of-erukar
|
config/world/regions/BarlenRegion.py
|
1
|
3367
|
import erukar
from erukar.system.engine import EnvironmentProfile, OverlandSector, Sector, Region, Location, Chunk, EconomicProfile
def create():
barlen = Region()
barlen.name = "Greater Barlen Region"
barlen.description = "A fertile area, known best for its vast barley and wheat fields. The seat of this region is a large town known as Barlen whose economy consists mostly on agriculture taxes and exports of the barley harvest."
barlen.add_sector(create_barlen_outskirts)
barlen.add_sector(create_razorwoods_camp)
barlen.add_sector(create_izeth_terrace)
barlen.add_sector(create_izeth_citadel_1f)
barlen.sector_limits = acceptable_bounds()
barlen.sector_template = create_sector_template(barlen)
return barlen
def acceptable_bounds():
return [
(0, 0, 0),
(2, -2, 0),
(2, -3, 1),
(1, -1, 0),
(1, -2, 1),
(1, -3, 2),
(0, -1, 1),
(0, -2, 2),
(0, -3, 3),
(-1, 0, 1)
]
def create_barlen_outskirts(region):
def econ_seed(sector):
econ = EconomicProfile()
econ.demand[erukar.IurwoodLumber] = 2000
econ.supply[erukar.IurwoodLumber] = 100
econ.demand[erukar.AshLumber] = 1000
econ.supply[erukar.AshLumber] = 100
return econ
sector = create_sector_template(region, econ_seed)
sector.name = 'Barlen Town Outskirts'
sector.environment_profile = EnvironmentProfile.CityOutdoors()
sector.set_coordinates((0,0,0))
town = Location(sector)
town.is_named = True
town.name = 'Barlen Town Outskirts'
town.dungeon_file_name = 'BarlenOutskirts'
sector.locations.add(town)
return sector
def create_razorwoods_camp(region):
def econ_seed(sector):
econ = EconomicProfile()
econ.demand[erukar.IurwoodLumber] = 10
econ.supply[erukar.IurwoodLumber] = 5000
econ.demand[erukar.AshLumber] = 10
econ.supply[erukar.AshLumber] = 5000
return econ
sector = create_sector_template(region, econ_seed)
sector.name = 'Feriden Razorwoods Camp'
sector.set_coordinates((0,-3,3))
sector.environment_profile = EnvironmentProfile.SnowyWoodlands()
camp = Location(sector)
camp.is_named = True
camp.name = 'Feriden Razorwoods Camp'
camp.dungeon_file_name = 'RazorwoodsCamp'
sector.locations.add(camp)
return sector
def create_izeth_terrace(region):
sector = create_sector_template(region)
sector.name = 'Izeth Citadel Terrace'
sector.set_coordinates((0,-2,2))
sector.environment_profile = EnvironmentProfile.SnowyWoodlands()
terrace = Location(sector)
terrace.is_named = True
terrace.name = 'Izeth Citadel Terrace'
terrace.chunks = [Chunk()]
sector.locations.add(terrace)
return sector
def create_izeth_citadel_1f(region):
sector = Sector(region)
sector.name = 'Izeth Citadel 1F'
sector.set_coordinates("IzethCitadel1F")
citadel_1f = Location(sector)
citadel_1f.is_named = True
citadel_1f.name = 'Izeth Citadel 1F'
citadel_1f.dungeon_file_name = 'IzethCitadel1F'
sector.locations.add(citadel_1f)
return sector
def create_sector_template(region=None, econ_seed_fn=None):
sector = OverlandSector(region, econ_seed_fn)
sector.environment_profile = EnvironmentProfile.SnowyWoodlands()
return sector
|
agpl-3.0
| -5,317,180,016,940,680,000 | 31.68932 | 233 | 0.674785 | false | 2.992889 | false | false | false |
luk156/brick
|
documenti_acquisto/models.py
|
1
|
3097
|
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
# Create your models here.
from django import forms
from suit.widgets import SuitDateWidget
from django.contrib.admin import widgets
from ore.models import *
# Create your models here.
class Fornitore(models.Model):
rag = models.TextField('Ragione Sociale', max_length=50)
mail = models.EmailField('E-Mail', blank=True, null=True)
telefono = models.IntegerField('Telefono principale', blank=True, null=True)
indirizzo = models.TextField('Indirizzo', max_length=100, blank=True, null=True)
def __unicode__(self):
return u'%s' % (self.rag)
class Meta:
verbose_name = "Fornitore"
verbose_name_plural = "Fornitori"
class Articolo(models.Model):
descrizione = models.TextField('Descrizione', max_length=50)
class Meta:
verbose_name = "Articolo"
verbose_name_plural = "Articoli"
def __unicode__(self):
return u'%s' % (self.descrizione)
class Documento_acquisto(models.Model):
data_emissione = models.DateField('Data di emissione')
fornitore = models.ForeignKey(Fornitore, related_name='fornitore_ddt')
class Meta:
abstract = False
def importo(self):
i = 0
for b in self.documento_bene.all():
i = i + b.importo()
return i
class Bene(models.Model):
articolo = models.ForeignKey(Articolo, related_name='articolo_bene')
quantita = models.DecimalField('Quantita', max_digits=8, decimal_places=2)
prezzo_unitario = models.DecimalField('Prezzo unitario', max_digits=8, decimal_places=2)
documento = models.ForeignKey('Documento_acquisto', related_name='documento_bene')
cantiere = models.ForeignKey(Cantiere, related_name='cantiere_bene')
class Meta:
verbose_name = "Bene"
verbose_name_plural = "Beni"
def importo(self):
return self.quantita * self.prezzo_unitario
def __unicode__(self):
return u'%s x %s' % (self.articolo,self.quantita)
class Documento_trasporto(Documento_acquisto):
convertito = models.BooleanField(default=False)
class Meta:
verbose_name = 'Documento di trasporto'
verbose_name_plural = 'Documenti di trasporto'
def __unicode__(self):
return u'%s (%s)' % (self.fornitore,self.data_emissione)
class Documento_trasportoForm(forms.ModelForm):
#data = forms.DateField(widget=widgets.AdminDateWidget)
class Meta:
model = Documento_trasporto
exclude = ['convertito']
widgets = {
'data_emissione': SuitDateWidget,
}
class Fattura_acquisto(Documento_acquisto):
data_scadenza = models.DateField('Data di scadenza')
class Meta:
verbose_name = 'Fattura di acquisto'
verbose_name_plural = 'Fatture di acquisto'
class Fattura_acquistoForm(forms.ModelForm):
#data = forms.DateField(widget=widgets.AdminDateWidget)
class Meta:
model = Fattura_acquisto
widgets = {
'data_emissione': SuitDateWidget,
'data_scadenza': SuitDateWidget,
}
|
agpl-3.0
| -7,517,543,585,246,214,000 | 33.422222 | 92 | 0.671295 | false | 3.312299 | false | false | false |
xmendez/wfuzz
|
src/wfuzz/plugins/payloads/hexrange.py
|
1
|
1661
|
from wfuzz.externals.moduleman.plugin import moduleman_plugin
from wfuzz.plugin_api.base import BasePayload
from wfuzz.exception import FuzzExceptBadOptions
from wfuzz.fuzzobjects import FuzzWordType
@moduleman_plugin
class hexrange(BasePayload):
name = "hexrange"
author = (
"Carlos del Ojo",
"Christian Martorella",
"Adapted to newer versions Xavi Mendez (@xmendez)",
)
version = "0.1"
description = ()
summary = "Returns each hex number of the given hex range."
category = ["default"]
priority = 99
parameters = (
("range", "", True, "Range of hex numbers to generate in the form of 00-ff."),
)
default_parameter = "range"
def __init__(self, params):
BasePayload.__init__(self, params)
try:
ran = self.params["range"].split("-")
self.minimum = int(ran[0], 16)
self.maximum = int(ran[1], 16)
self.__count = self.maximum - self.minimum + 1
self.current = self.minimum
self.lgth = max(
len(ran[0]), len(ran[1]), len(hex(self.maximum).replace("0x", ""))
)
except ValueError:
raise FuzzExceptBadOptions('Bad range format (eg. "0-ffa")')
def count(self):
return self.__count
def get_type(self):
return FuzzWordType.WORD
def get_next(self):
if self.current > self.maximum:
raise StopIteration
pl = "%" + str(self.lgth) + "s"
num = hex(self.current).replace("0x", "")
pl = pl % (num)
payl = pl.replace(" ", "0")
self.current += 1
return payl
|
gpl-2.0
| 7,504,221,438,334,975,000 | 27.152542 | 86 | 0.571945 | false | 3.691111 | false | false | false |
jdmonaco/vmo-feedback-model
|
src/remapping/simulate.py
|
1
|
5749
|
# encoding: utf-8
"""
simulate.py -- Simulate double rotation experiments using VMOModel
Exported namespace: VMOExperiment
Created by Joe Monaco on 2010-02-03.
Copyright (c) 2009-2011 Johns Hopkins University. All rights reserved.
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php.
"""
# Library imports
from IPython.kernel.client import MapTask
import numpy as np
import os
# Package imports
from ..core.analysis import BaseAnalysis
from ..double_rotation import VMODoubleRotation
from ..session import VMOSession
# Directory constants
RAT_DIR = "Rat%02d"
def run_session(model_dict, save_dir, get_clone=False):
"""Run a session as part of a double-rotation experiment
"""
success = False
try:
model = VMODoubleRotation(**model_dict)
model.advance()
data = VMOSession(model)
VMOSession.save_session_list([data], save_dir)
except:
raise
else:
success = True
if get_clone:
success = model.clone_dict()
return success
class VMOExperiment(BaseAnalysis):
"""
Run double-rotation experiments using the VMODoubleRotation model class
Convenience methods:
run_mismatch_analyses -- Run mismatch analysis on each simulated mismatch
angle followed by a remapping trends analysis. All data is saved to
the analysis data directory.
"""
label = "Cue Experiment"
def collect_data(self, rats=1, mismatch=None, **kwargs):
"""Run the simulations and collect results data
Simulated experimental data is saved in per-rat directories containing
MIS_XXX.tar.gz archive files of VMOSession objects.
Keyword arguments:
rats -- number of experiments to run with different random networks
mismatch -- list of mismatch angles (in degrees; don't include 0 for
standard session, this is done automatically)
Additional keywords are passed in as model parameters.
"""
# Set the mismatch angles and convert to radians
if mismatch is None:
mismatch = [45, 90, 135, 180]
self.results['mismatch'] = mismatch
self.results['rats'] = rats
mismatch = [(np.pi/180) * angle for angle in mismatch]
# Set up parameter dictionary
pdict = dict(
N_theta=1000,
N_outputs=500,
C_W=0.05,
cue_std=np.pi/24
)
pdict.update(kwargs)
# Set up IPython engines
mec = self.get_multiengine_client()
tc = self.get_task_client()
mec.execute('from vmo_feedback import VMODoubleRotation, VMOSession')
mec.clear_queue()
tc.clear()
# Run STD sessions and save model states
mis_args = []
for rat in xrange(rats):
# Run the standard session to get network, fixed points
self.out('Running standard session for rat %d...'%rat)
if rats == 1:
rat_dir = self.datadir
else:
rat_dir = os.path.join(self.datadir, RAT_DIR%rat)
pdict.update(mismatch=[0])
clone_dict = run_session(pdict, rat_dir, get_clone=True)
if not clone_dict:
self.out('STD session failed', error=True)
continue
mis_args.append((rat_dir, clone_dict))
# Farm out mismatch sessions to task controller
self.out('Now task-farming the mismatch sessions...')
for rat in xrange(rats):
rat_dir, clone_dict = mis_args[rat]
tasks = []
for angle in mismatch:
clone_dict.update(mismatch=[angle])
tasks.append(
tc.run(
MapTask(run_session,
args=(clone_dict, rat_dir),
kwargs={'get_clone':False})))
tc.barrier(tasks)
success = np.all([tc.get_task_result(t_id) for t_id in tasks])
tc.clear()
if success:
self.out('Successfully completed mismatch sessions!')
else:
self.out('Error(s) detected during mismatch sessions',
error=True)
# Good-bye
self.out('All done!')
def run_mismatch_analyses(self):
"""Perform MismatchAnalysis for each mismatch angle in this experiment
and then perform MismatchTrends on those results, saving all the data
and figures in this experiment's data directory.
"""
if not self.finished:
self.out('Analysis has not been completed yet!', error=True)
return
from mismatch import MismatchAnalysis
from trends import MismatchTrends
from pylab import close
self.out('Running mismatch analysis for each angle...')
mismatch = self.results['mismatch']
MA_list = []
for angle in mismatch:
MA_dir = os.path.join(self.datadir, 'mismatch_%03d'%angle)
MA = MismatchAnalysis(desc='mismatch', datadir=MA_dir)
MA(load_dir=self.datadir, mismatch=angle)
MA.view()
MA.save_plots()
close('all')
MA_list.append(MA)
self.out('Running mismatch trends analysis across angles...')
trends_dir = os.path.join(self.datadir, 'trends')
trends = MismatchTrends(desc='experiment', datadir=trends_dir)
trends(*MA_list)
trends.save_data()
trends.view()
trends.save_plots()
close('all')
|
mit
| -745,938,006,877,985,200 | 32.823529 | 80 | 0.589494 | false | 4.245938 | false | false | false |
jlyonsmith/pytools
|
Test/test_buckle.py
|
1
|
2894
|
import os
import subprocess
def writeFile(fileName, contents):
with open(fileName, "w") as f:
f.write(contents)
os.makedirs('Scratch', exist_ok=True)
os.chdir('Scratch')
writeFile('test_buckle.resx', '''<?xml version="1.0" encoding="utf-8"?>
<root>
<xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
<xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
<xsd:element name="root" msdata:IsDataSet="true">
<xsd:complexType>
<xsd:choice maxOccurs="unbounded">
<xsd:element name="metadata">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" />
</xsd:sequence>
<xsd:attribute name="name" use="required" type="xsd:string" />
<xsd:attribute name="type" type="xsd:string" />
<xsd:attribute name="mimetype" type="xsd:string" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="assembly">
<xsd:complexType>
<xsd:attribute name="alias" type="xsd:string" />
<xsd:attribute name="name" type="xsd:string" />
</xsd:complexType>
</xsd:element>
<xsd:element name="data">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
<xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
<xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
<xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="resheader">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" />
</xsd:complexType>
</xsd:element>
</xsd:choice>
</xsd:complexType>
</xsd:element>
</xsd:schema>
<resheader name="resmimetype">
<value>text/microsoft-resx</value>
</resheader>
<resheader name="version">
<value>2.0</value>
</resheader>
<resheader name="reader">
<value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<resheader name="writer">
<value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<data name="String" xml:space="preserve">
<value>String</value>
</data>
<data name="StringWithArgs" xml:space="preserve">
<value>String {0} {1}</value>
</data>
</root>''')
# Create .resources and .cs files
subprocess.call(
("/usr/local/bin/python3", "../../buckle.py",
"test_buckle.resx", "-o", "test_buckle.cs", "-r", "test_buckle.resources",
"-n", "ToolBelt", "-c", "TestBuckleResources", "-b", "SillyBaseName"))
os.chdir('..')
|
mit
| -112,656,978,342,174,060 | 33.047059 | 139 | 0.707326 | false | 2.801549 | false | false | false |
cczhu/baseband
|
baseband/mark4/base.py
|
1
|
21350
|
# Licensed under the GPLv3 - see LICENSE
import numpy as np
from astropy.utils import lazyproperty
import astropy.units as u
from ..vlbi_base.base import (make_opener, VLBIFileBase, VLBIFileReaderBase,
VLBIStreamBase, VLBIStreamReaderBase,
VLBIStreamWriterBase)
from .header import Mark4Header
from .payload import Mark4Payload
from .frame import Mark4Frame
from .file_info import Mark4FileReaderInfo
__all__ = ['Mark4FileReader', 'Mark4FileWriter',
'Mark4StreamBase', 'Mark4StreamReader', 'Mark4StreamWriter',
'open']
# Look-up table for the number of bits in a byte.
nbits = ((np.arange(256)[:, np.newaxis] >> np.arange(8) & 1)
.sum(1).astype(np.int16))
class Mark4FileReader(VLBIFileReaderBase):
"""Simple reader for Mark 4 files.
Wraps a binary filehandle, providing methods to help interpret the data,
such as `locate_frame`, `read_frame` and `get_frame_rate`.
Parameters
----------
fh_raw : filehandle
Filehandle of the raw binary data file.
ntrack : int or None, optional.
Number of Mark 4 bitstreams. Can be determined automatically as
part of locating the first frame.
decade : int or None
Decade in which the observations were taken. Can instead pass an
approximate ``ref_time``.
ref_time : `~astropy.time.Time` or None
Reference time within 4 years of the observation time. Used only
if ``decade`` is not given.
"""
def __init__(self, fh_raw, ntrack=None, decade=None, ref_time=None):
self.ntrack = ntrack
self.decade = decade
self.ref_time = ref_time
super().__init__(fh_raw)
def __repr__(self):
return ("{name}(fh_raw={s.fh_raw}, ntrack={s.ntrack}, "
"decade={s.decade}, ref_time={s.ref_time})"
.format(name=self.__class__.__name__, s=self))
info = Mark4FileReaderInfo()
def read_header(self):
"""Read a single header from the file.
Returns
-------
header : `~baseband.mark4.Mark4Header`
"""
return Mark4Header.fromfile(self, ntrack=self.ntrack,
decade=self.decade, ref_time=self.ref_time)
def read_frame(self, verify=True):
"""Read a single frame (header plus payload).
Returns
-------
frame : `~baseband.mark4.Mark4Frame`
With ``.header`` and ``.data`` properties that return the
:class:`~baseband.mark4.Mark4Header` and data encoded in the frame,
respectively.
verify : bool, optional
Whether to do basic checks of frame integrity. Default: `True`.
"""
return Mark4Frame.fromfile(self.fh_raw, self.ntrack,
decade=self.decade, ref_time=self.ref_time,
verify=verify)
def get_frame_rate(self):
"""Determine the number of frames per second.
The frame rate is calculated from the time elapsed between the
first two frames, as inferred from their time stamps.
Returns
-------
frame_rate : `~astropy.units.Quantity`
Frames per second.
"""
with self.temporary_offset():
self.seek(0)
self.locate_frame()
header0 = self.read_header()
self.seek(header0.payload_nbytes, 1)
header1 = self.read_header()
# Mark 4 specification states frames-lengths range from 1.25 ms
# to 160 ms.
tdelta = header1.fraction[0] - header0.fraction[0]
return np.round(1 / tdelta) * u.Hz
def locate_frame(self, forward=True, maximum=None):
"""Locate the frame nearest the current position.
The search is for the following pattern:
* 32*tracks bits set at offset bytes
* 1*tracks bits unset before offset
* 32*tracks bits set at offset+2500*tracks bytes
This reflects 'sync_pattern' of 0xffffffff for a given header and one
a frame ahead, which is in word 2, plus the lsb of word 1, which is
'system_id'.
If the file does not have ntrack is set, it will be auto-determined.
Parameters
----------
forward : bool, optional
Whether to search forwards or backwards. Default: `True`.
maximum : int, optional
Maximum number of bytes forward to search through.
Default: twice the frame size (``20000 * ntrack // 8``).
Returns
-------
offset : int or `None`
Byte offset of the next frame. `None` if the search was not
successful.
"""
fh = self.fh_raw
file_pos = fh.tell()
# Use initializer value (determines ntrack if not already given).
ntrack = self.ntrack
if ntrack is None:
fh.seek(0)
ntrack = self.determine_ntrack(maximum=maximum)
if ntrack is None:
raise ValueError("cannot determine ntrack automatically. "
"Try passing in an explicit value.")
if forward and fh.tell() >= file_pos:
return fh.tell()
fh.seek(file_pos)
nset = np.ones(32 * ntrack // 8, dtype=np.int16)
nunset = np.ones(ntrack // 8, dtype=np.int16)
frame_nbytes = ntrack * 2500
fh.seek(0, 2)
filesize = fh.tell()
if filesize < frame_nbytes:
fh.seek(file_pos)
return None
if maximum is None:
maximum = 2 * frame_nbytes
# Loop over chunks to try to find the frame marker.
step = frame_nbytes // 2
# Read a bit more at every step to ensure we don't miss a "split"
# header.
block = step + 160 * ntrack // 8
if forward:
iterate = range(max(min(file_pos, filesize - block), 0),
max(min(file_pos + maximum, filesize - block + 1),
1),
step)
else:
iterate = range(min(max(file_pos - step, 0), filesize - block),
min(max(file_pos - step - maximum - 1, -1),
filesize - block),
-step)
for frame in iterate:
fh.seek(frame)
data = np.frombuffer(fh.read(block), dtype=np.uint8)
assert len(data) == block
# Find header pattern.
databits1 = nbits[data]
nosync = np.convolve(databits1[len(nunset):] < 6, nset, 'valid')
nolow = np.convolve(databits1[:-len(nset)] > 1, nunset, 'valid')
wrong = nosync + nolow
possibilities = np.where(wrong == 0)[0]
# Check candidates by seeing whether there is a sync word
# a frame size ahead. (Note: loop can be empty.)
for possibility in possibilities[::1 if forward else -1]:
# Real start of possible header.
frame_start = frame + possibility - 63 * ntrack // 8
if (forward and frame_start < file_pos or
not forward and frame_start > file_pos):
continue
# Check there is a header following this.
check = frame_start + frame_nbytes
if check >= filesize - 32 * 2 * ntrack // 8 - len(nunset):
# But do before this one if we're beyond end of file.
check = frame_start - frame_nbytes
if check < 0: # Assume OK if only one frame fits in file.
if frame_start + frame_nbytes > filesize:
continue
else:
break
fh.seek(check + 32 * 2 * ntrack // 8)
check_data = np.frombuffer(fh.read(len(nunset)),
dtype=np.uint8)
databits2 = nbits[check_data]
if np.all(databits2 >= 6):
break # Got it!
else: # None of them worked, so do next block.
continue
fh.seek(frame_start)
return frame_start
fh.seek(file_pos)
return None
def determine_ntrack(self, maximum=None):
"""Determines the number of tracks, by seeking the next frame.
Uses `locate_frame` to look for the first occurrence of a frame from
the current position for all supported ``ntrack`` values. Returns the
first ``ntrack`` for which `locate_frame` is successful, setting
the file's ``ntrack`` property appropriately, and leaving the
file pointer at the start of the frame.
Parameters
----------
maximum : int, optional
Maximum number of bytes forward to search through.
Default: twice the frame size (``20000 * ntrack // 8``).
Returns
-------
ntrack : int or None
Number of Mark 4 bitstreams. `None` if no frame was found.
"""
# Currently only 16, 32 and 64-track frames supported.
old_ntrack = self.ntrack
for ntrack in 16, 32, 64:
try:
self.ntrack = ntrack
if self.locate_frame(maximum=maximum) is not None:
return ntrack
except Exception:
self.ntrack = old_ntrack
raise
self.ntrack = old_ntrack
return None
def find_header(self, forward=True, maximum=None):
"""Find the nearest header from the current position.
If successful, the file pointer is left at the start of the header.
Parameters
----------
forward : bool, optional
Seek forward if `True` (default), backward if `False`.
maximum : int, optional
Maximum number of bytes forward to search through.
Default: twice the frame size (``20000 * ntrack // 8``).
Returns
-------
header : :class:`~baseband.mark4.Mark4Header` or None
Retrieved Mark 4 header, or `None` if nothing found.
"""
offset = self.locate_frame(forward=forward)
if offset is None:
return None
header = self.read_header()
self.fh_raw.seek(offset)
return header
class Mark4FileWriter(VLBIFileBase):
"""Simple writer for Mark 4 files.
Adds `write_frame` method to the VLBI binary file wrapper.
"""
def write_frame(self, data, header=None, **kwargs):
"""Write a single frame (header plus payload).
Parameters
----------
data : `~numpy.ndarray` or `~baseband.mark4.Mark4Frame`
If an array, a header should be given, which will be used to
get the information needed to encode the array, and to construct
the Mark 4 frame.
header : `~baseband.mark4.Mark4Header`
Can instead give keyword arguments to construct a header. Ignored
if payload is a :class:`~baseband.mark4.Mark4Frame` instance.
**kwargs :
If ``header`` is not given, these are used to initialize one.
"""
if not isinstance(data, Mark4Frame):
data = Mark4Frame.fromdata(data, header, **kwargs)
return data.tofile(self.fh_raw)
class Mark4StreamBase(VLBIStreamBase):
"""Base for Mark 4 streams."""
def __init__(self, fh_raw, header0, sample_rate=None, squeeze=True,
subset=(), fill_value=0., verify=True):
super().__init__(
fh_raw, header0=header0, sample_rate=sample_rate,
samples_per_frame=header0.samples_per_frame,
unsliced_shape=(header0.nchan,),
bps=header0.bps, complex_data=False, squeeze=squeeze,
subset=subset, fill_value=fill_value, verify=verify)
self._frame_rate = int(round((self.sample_rate /
self.samples_per_frame).to_value(u.Hz)))
class Mark4StreamReader(Mark4StreamBase, VLBIStreamReaderBase):
"""VLBI Mark 4 format reader.
Allows access to a Mark 4 file as a continuous series of samples. Parts
of the data stream replaced by header values are filled in.
Parameters
----------
fh_raw : filehandle
Filehandle of the raw Mark 4 stream.
sample_rate : `~astropy.units.Quantity`, optional
Number of complete samples per second, i.e. the rate at which each
channel is sampled. If `None`, will be inferred from scanning two
frames of the file.
ntrack : int or None, optional
Number of Mark 4 bitstreams. If `None` (default), will attempt to
automatically detect it by scanning the file.
decade : int or None
Decade of the observation start time (eg. ``2010`` for 2018), needed to
remove ambiguity in the Mark 4 time stamp. Can instead pass an
approximate ``ref_time``.
ref_time : `~astropy.time.Time` or None
Reference time within 4 years of the start time of the observations.
Used only if ``decade`` is not given.
squeeze : bool, optional
If `True` (default), remove any dimensions of length unity from
decoded data.
subset : indexing object, optional
Specific channels of the complete sample to decode (after possible
squeezing). If an empty tuple (default), all channels are read.
fill_value : float or complex, optional
Value to use for invalid or missing data. Default: 0.
verify : bool, optional
Whether to do basic checks of frame integrity when reading. The first
frame of the stream is always checked. Default: `True`.
"""
_sample_shape_maker = Mark4Payload._sample_shape_maker
def __init__(self, fh_raw, sample_rate=None, ntrack=None, decade=None,
ref_time=None, squeeze=True, subset=(), fill_value=0.,
verify=True):
if decade is None and ref_time is None:
raise TypeError("Mark 4 stream reader requires either decade or "
"ref_time to be passed in.")
# Get binary file reader.
fh_raw = Mark4FileReader(fh_raw, ntrack=ntrack, decade=decade,
ref_time=ref_time)
# Find first header, determining ntrack if needed.
header0 = fh_raw.find_header()
assert header0 is not None, (
"could not find a first frame using ntrack={}. Perhaps "
"try ntrack=None for auto-determination.".format(ntrack))
self._offset0 = fh_raw.tell()
super().__init__(
fh_raw, header0=header0, sample_rate=sample_rate,
squeeze=squeeze, subset=subset, fill_value=fill_value,
verify=verify)
# Use reference time in preference to decade so that a stream wrapping
# a decade will work.
self.fh_raw.decade = None
self.fh_raw.ref_time = self.start_time
@lazyproperty
def _last_header(self):
"""Last header of the file."""
last_header = super()._last_header
# Infer the decade, assuming the end of the file is no more than
# 4 years away from the start.
last_header.infer_decade(self.start_time)
return last_header
def _read_frame(self, index):
self.fh_raw.seek(self._offset0 + index * self.header0.frame_nbytes)
frame = self.fh_raw.read_frame(verify=self.verify)
# Set decoded value for invalid data.
frame.fill_value = self.fill_value
# TODO: add check that we got the right frame.
return frame
class Mark4StreamWriter(Mark4StreamBase, VLBIStreamWriterBase):
"""VLBI Mark 4 format writer.
Encodes and writes sequences of samples to file.
Parameters
----------
raw : filehandle
Which will write filled sets of frames to storage.
header0 : `~baseband.mark4.Mark4Header`
Header for the first frame, holding time information, etc. Can instead
give keyword arguments to construct a header (see ``**kwargs``).
sample_rate : `~astropy.units.Quantity`
Number of complete samples per second, i.e. the rate at which each
channel is sampled. Needed to calculate header timestamps.
squeeze : bool, optional
If `True` (default), `write` accepts squeezed arrays as input, and
adds any dimensions of length unity.
**kwargs
If no header is given, an attempt is made to construct one from these.
For a standard header, this would include the following.
--- Header keywords : (see :meth:`~baseband.mark4.Mark4Header.fromvalues`)
time : `~astropy.time.Time`
Start time of the file. Sets bcd-encoded unit year, day, hour, minute,
second in the header.
ntrack : int
Number of Mark 4 bitstreams (equal to number of channels times
``fanout`` times ``bps``)
bps : int
Bits per elementary sample.
fanout : int
Number of tracks over which a given channel is spread out.
"""
_sample_shape_maker = Mark4Payload._sample_shape_maker
def __init__(self, fh_raw, header0=None, sample_rate=None, squeeze=True,
**kwargs):
if header0 is None:
header0 = Mark4Header.fromvalues(**kwargs)
super().__init__(fh_raw=fh_raw, header0=header0,
sample_rate=sample_rate, squeeze=squeeze)
# Set up initial payload with right shape.
samples_per_payload = (
header0.samples_per_frame * header0.payload_nbytes //
header0.frame_nbytes)
self._payload = Mark4Payload.fromdata(
np.zeros((samples_per_payload, header0.nchan), np.float32),
header0)
def _make_frame(self, frame_index):
header = self.header0.copy()
header.update(time=self.start_time + frame_index /
self._frame_rate * u.s)
# Reuse payload.
return Mark4Frame(header, self._payload)
open = make_opener('Mark4', globals(), doc="""
--- For reading a stream : (see `~baseband.mark4.base.Mark4StreamReader`)
sample_rate : `~astropy.units.Quantity`, optional
Number of complete samples per second, i.e. the rate at which each channel
is sampled. If not given, will be inferred from scanning two frames of
the file.
ntrack : int, optional
Number of Mark 4 bitstreams. If `None` (default), will attempt to
automatically detect it by scanning the file.
decade : int or None
Decade of the observation start time (eg. ``2010`` for 2018), needed to
remove ambiguity in the Mark 4 time stamp (default: `None`). Can instead
pass an approximate ``ref_time``.
ref_time : `~astropy.time.Time` or None
Reference time within 4 years of the start time of the observations. Used
only if ``decade`` is not given.
squeeze : bool, optional
If `True` (default), remove any dimensions of length unity from
decoded data.
subset : indexing object, optional
Specific channels of the complete sample to decode (after possible
squeezing). If an empty tuple (default), all channels are read.
fill_value : float or complex, optional
Value to use for invalid or missing data. Default: 0.
verify : bool, optional
Whether to do basic checks of frame integrity when reading. The first
frame of the stream is always checked. Default: `True`.
--- For writing a stream : (see `~baseband.mark4.base.Mark4StreamWriter`)
header0 : `~baseband.mark4.Mark4Header`
Header for the first frame, holding time information, etc. Can instead
give keyword arguments to construct a header (see ``**kwargs``).
sample_rate : `~astropy.units.Quantity`
Number of complete samples per second, i.e. the rate at which each channel
is sampled. Needed to calculate header timestamps.
squeeze : bool, optional
If `True` (default), writer accepts squeezed arrays as input, and adds
any dimensions of length unity.
file_size : int or None, optional
When writing to a sequence of files, the maximum size of one file in bytes.
If `None` (default), the file size is unlimited, and only the first
file will be written to.
**kwargs
If the header is not given, an attempt will be made to construct one
with any further keyword arguments. See
:class:`~baseband.mark4.base.Mark4StreamWriter`.
Returns
-------
Filehandle
:class:`~baseband.mark4.base.Mark4FileReader` or
:class:`~baseband.mark4.base.Mark4FileWriter` (binary), or
:class:`~baseband.mark4.base.Mark4StreamReader` or
:class:`~baseband.mark4.base.Mark4StreamWriter` (stream)
Notes
-----
Although it is not generally expected to be useful for Mark 4, like for
other formats one can also pass to ``name`` a list, tuple, or subclass of
`~baseband.helpers.sequentialfile.FileNameSequencer`. For writing to multiple
files, the ``file_size`` keyword must be passed or only the first file will be
written to. One may also pass in a `~baseband.helpers.sequentialfile` object
(opened in 'rb' mode for reading or 'w+b' for writing), though for typical use
cases it is practically identical to passing in a list or template.
""")
|
gpl-3.0
| 7,541,743,570,628,958,000 | 39.131579 | 79 | 0.603419 | false | 4.145631 | false | false | false |
openai/cleverhans
|
cleverhans/model_zoo/madry_lab_challenges/cifar10_model.py
|
2
|
10334
|
"""cleverhans.model.Model implementation of cifar10_challenge.model.Model
This re-implementation factors variable creation apart from forward
propagation so it is possible to run forward propagation more than once
in the same model.
based on https://github.com/tensorflow/models/tree/master/resnet
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from cleverhans.serial import NoRefModel
class Layer(object):
def get_output_shape(self):
return self.output_shape
class ResNet(NoRefModel):
"""ResNet model."""
def __init__(self, layers, input_shape, scope=None):
"""ResNet constructor.
:param layers: a list of layers in CleverHans format
each with set_input_shape() and fprop() methods.
:param input_shape: 4-tuple describing input shape (e.g None, 32, 32, 3)
:param scope: string name of scope for Variables
This works in two ways.
If scope is None, the variables are not put in a scope, and the
model is compatible with Saver.restore from the public downloads
for the CIFAR10 Challenge.
If the scope is a string, then Saver.restore won't work, but the
model functions as a picklable NoRefModels that finds its variables
based on the scope.
"""
super(ResNet, self).__init__(scope, 10, {}, scope is not None)
if scope is None:
before = list(tf.trainable_variables())
before_vars = list(tf.global_variables())
self.build(layers, input_shape)
after = list(tf.trainable_variables())
after_vars = list(tf.global_variables())
self.params = [param for param in after if param not in before]
self.vars = [var for var in after_vars if var not in before_vars]
else:
with tf.variable_scope(self.scope):
self.build(layers, input_shape)
def get_vars(self):
if hasattr(self, "vars"):
return self.vars
return super(ResNet, self).get_vars()
def build(self, layers, input_shape):
self.layer_names = []
self.layers = layers
self.input_shape = input_shape
if isinstance(layers[-1], Softmax):
layers[-1].name = 'probs'
layers[-2].name = 'logits'
else:
layers[-1].name = 'logits'
for i, layer in enumerate(self.layers):
if hasattr(layer, 'name'):
name = layer.name
else:
name = layer.__class__.__name__ + str(i)
layer.name = name
self.layer_names.append(name)
layer.set_input_shape(input_shape)
input_shape = layer.get_output_shape()
def make_input_placeholder(self):
return tf.placeholder(tf.float32, (None, 32, 32, 3))
def make_label_placeholder(self):
return tf.placeholder(tf.float32, (None, 10))
def fprop(self, x, set_ref=False):
if self.scope is not None:
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
return self._fprop(x, set_ref)
return self._prop(x, set_ref)
def _fprop(self, x, set_ref=False):
states = []
for layer in self.layers:
if set_ref:
layer.ref = x
x = layer.fprop(x)
assert x is not None
states.append(x)
states = dict(zip(self.layer_names, states))
return states
def add_internal_summaries(self):
pass
def _stride_arr(stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
class Input(Layer):
def __init__(self):
pass
def set_input_shape(self, input_shape):
batch_size, rows, cols, input_channels = input_shape
# assert self.mode == 'train' or self.mode == 'eval'
"""Build the core model within the graph."""
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = batch_size
self.output_shape = tuple(output_shape)
def fprop(self, x):
with tf.variable_scope('input', reuse=tf.AUTO_REUSE):
input_standardized = tf.map_fn(
lambda img: tf.image.per_image_standardization(img), x)
return _conv('init_conv', input_standardized,
3, 3, 16, _stride_arr(1))
class Conv2D(Layer):
def __init__(self):
pass
def set_input_shape(self, input_shape):
batch_size, rows, cols, input_channels = input_shape
# Uncomment the following codes to use w28-10 wide residual network.
# It is more memory efficient than very deep residual network and has
# comparably good performance.
# https://arxiv.org/pdf/1605.07146v1.pdf
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = batch_size
self.output_shape = tuple(output_shape)
def fprop(self, x):
# Update hps.num_residual_units to 9
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
filters = [16, 160, 320, 640]
res_func = _residual
with tf.variable_scope('unit_1_0', reuse=tf.AUTO_REUSE):
x = res_func(x, filters[0], filters[1], _stride_arr(strides[0]),
activate_before_residual[0])
for i in range(1, 5):
with tf.variable_scope(('unit_1_%d' % i), reuse=tf.AUTO_REUSE):
x = res_func(x, filters[1], filters[1],
_stride_arr(1), False)
with tf.variable_scope(('unit_2_0'), reuse=tf.AUTO_REUSE):
x = res_func(x, filters[1], filters[2], _stride_arr(strides[1]),
activate_before_residual[1])
for i in range(1, 5):
with tf.variable_scope(('unit_2_%d' % i), reuse=tf.AUTO_REUSE):
x = res_func(x, filters[2], filters[2],
_stride_arr(1), False)
with tf.variable_scope(('unit_3_0'), reuse=tf.AUTO_REUSE):
x = res_func(x, filters[2], filters[3], _stride_arr(strides[2]),
activate_before_residual[2])
for i in range(1, 5):
with tf.variable_scope(('unit_3_%d' % i), reuse=tf.AUTO_REUSE):
x = res_func(x, filters[3], filters[3],
_stride_arr(1), False)
with tf.variable_scope(('unit_last'), reuse=tf.AUTO_REUSE):
x = _batch_norm('final_bn', x)
x = _relu(x, 0.1)
x = _global_avg_pool(x)
return x
class Linear(Layer):
def __init__(self, num_hid):
self.num_hid = num_hid
def set_input_shape(self, input_shape):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.dim = dim
self.output_shape = [batch_size, self.num_hid]
self.make_vars()
def make_vars(self):
with tf.variable_scope('logit', reuse=tf.AUTO_REUSE):
w = tf.get_variable(
'DW', [self.dim, self.num_hid],
initializer=tf.initializers.variance_scaling(
distribution='uniform'))
b = tf.get_variable('biases', [self.num_hid],
initializer=tf.initializers.constant())
return w, b
def fprop(self, x):
w, b = self.make_vars()
return tf.nn.xw_plus_b(x, w, b)
def _batch_norm(name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(
inputs=x,
decay=.9,
center=True,
scale=True,
activation_fn=None,
updates_collections=None,
is_training=False)
def _residual(x, in_filter, out_filter, stride,
activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = _batch_norm('init_bn', x)
x = _relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = _batch_norm('init_bn', x)
x = _relu(x, 0.1)
with tf.variable_scope('sub1'):
x = _conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = _batch_norm('bn2', x)
x = _relu(x, 0.1)
x = _conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0],
[0, 0], [(out_filter - in_filter) // 2,
(out_filter - in_filter) // 2]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay():
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _global_avg_pool(x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
class Softmax(Layer):
def __init__(self):
pass
def set_input_shape(self, shape):
self.input_shape = shape
self.output_shape = shape
def fprop(self, x):
return tf.nn.softmax(x)
class Flatten(Layer):
def __init__(self):
pass
def set_input_shape(self, shape):
self.input_shape = shape
output_width = 1
for factor in shape[1:]:
output_width *= factor
self.output_width = output_width
self.output_shape = [None, output_width]
def fprop(self, x):
return tf.reshape(x, [-1, self.output_width])
def make_wresnet(nb_classes=10, input_shape=(None, 32, 32, 3), scope=None):
layers = [Input(),
Conv2D(), # the whole ResNet is basically created in this layer
Flatten(),
Linear(nb_classes),
Softmax()]
model = ResNet(layers, input_shape, scope)
return model
|
mit
| 6,172,888,378,718,482,000 | 29.394118 | 76 | 0.613993 | false | 3.290035 | false | false | false |
taigaio/taiga-back
|
taiga/hooks/api.py
|
1
|
2892
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext as _
from taiga.base import exceptions as exc
from taiga.base import response
from taiga.base.api.viewsets import GenericViewSet
from taiga.base.utils import json
from taiga.projects.models import Project
from .exceptions import ActionSyntaxException
class BaseWebhookApiViewSet(GenericViewSet):
# We don't want rest framework to parse the request body and transform it in
# a dict in request.DATA, we need it raw
parser_classes = ()
# This dict associates the event names we are listening for
# with their responsible classes (extending event_hooks.BaseEventHook)
event_hook_classes = {}
def _validate_signature(self, project, request):
raise NotImplemented
def _get_project(self, request):
project_id = request.GET.get("project", None)
try:
project = Project.objects.get(id=project_id)
return project
except (ValueError, Project.DoesNotExist):
return None
def _get_payload(self, request):
try:
payload = json.loads(request.body.decode("utf-8"))
except ValueError:
raise exc.BadRequest(_("The payload is not valid json"))
return payload
def _get_event_name(self, request):
raise NotImplemented
def create(self, request, *args, **kwargs):
project = self._get_project(request)
if not project:
raise exc.BadRequest(_("The project doesn't exist"))
if not self._validate_signature(project, request):
raise exc.BadRequest(_("Bad signature"))
if project.blocked_code is not None:
raise exc.Blocked(_("Blocked element"))
event_name = self._get_event_name(request)
payload = self._get_payload(request)
event_hook_class = self.event_hook_classes.get(event_name, None)
if event_hook_class is not None:
event_hook = event_hook_class(project, payload)
try:
event_hook.process_event()
except ActionSyntaxException as e:
raise exc.BadRequest(e)
return response.NoContent()
|
agpl-3.0
| 7,278,701,815,280,412,000 | 34.703704 | 80 | 0.677732 | false | 4.284444 | false | false | false |
saeki-masaki/cinder
|
cinder/tests/unit/test_ibm_flashsystem.py
|
1
|
47040
|
# Copyright 2014 IBM Corp.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Tests for the IBM FlashSystem volume driver.
"""
import mock
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
import random
import re
from cinder import context
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import flashsystem
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class FlashSystemManagementSimulator(object):
def __init__(self):
# Default protocol is FC
self._protocol = 'FC'
self._volumes_list = {}
self._hosts_list = {}
self._mappings_list = {}
self._next_cmd_error = {
'lsnode': '',
'lssystem': '',
'lsmdiskgrp': ''
}
self._errors = {
# CMMVC50000 is a fake error which indicates that command has not
# got expected results. This error represents kinds of CLI errors.
'CMMVC50000': ('', 'CMMVC50000 The command can not be executed '
'successfully.')
}
@staticmethod
def _find_unused_id(d):
ids = []
for v in d.values():
ids.append(int(v['id']))
ids.sort()
for index, n in enumerate(ids):
if n > index:
return six.text_type(index)
return six.text_type(len(ids))
@staticmethod
def _is_invalid_name(name):
if re.match(r'^[a-zA-Z_][\w ._-]*$', name):
return False
return True
@staticmethod
def _cmd_to_dict(arg_list):
no_param_args = [
'bytes',
'force'
]
one_param_args = [
'delim',
'hbawwpn',
'host',
'iogrp',
'iscsiname',
'mdiskgrp',
'name',
'scsi',
'size',
'unit'
]
# All commands should begin with svcinfo or svctask
if arg_list[0] not in ('svcinfo', 'svctask') or len(arg_list) < 2:
raise exception.InvalidInput(reason=six.text_type(arg_list))
ret = {'cmd': arg_list[1]}
arg_list.pop(0)
skip = False
for i in range(1, len(arg_list)):
if skip:
skip = False
continue
if arg_list[i][0] == '-':
param = arg_list[i][1:]
if param in no_param_args:
ret[param] = True
elif param in one_param_args:
ret[param] = arg_list[i + 1]
skip = True
else:
raise exception.InvalidInput(
reason=('unrecognized argument %s') % arg_list[i])
else:
ret['obj'] = arg_list[i]
return ret
@staticmethod
def _print_cmd_info(rows, delim=' ', nohdr=False, **kwargs):
"""Generic function for printing information."""
if nohdr:
del rows[0]
for index in range(len(rows)):
rows[index] = delim.join(rows[index])
return ('%s' % '\n'.join(rows), '')
@staticmethod
def _convert_units_bytes(num, unit):
unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit_index = 0
while unit.lower() != unit_array[unit_index].lower():
num = num * 1024
unit_index += 1
return six.text_type(num)
def _cmd_lshost(self, **kwargs):
"""svcinfo lshost -delim !
svcinfo lshost -delim ! <host>
"""
if 'obj' not in kwargs:
rows = []
rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status'])
for host in self._hosts_list.values():
rows.append([host['id'], host['host_name'], '1', '1',
'degraded'])
if len(rows) > 1:
return self._print_cmd_info(rows=rows, **kwargs)
else:
return ('', '')
else:
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC50000']
host = self._hosts_list[host_name]
rows = []
rows.append(['id', host['id']])
rows.append(['name', host['host_name']])
rows.append(['port_count', '1'])
rows.append(['type', 'generic'])
rows.append(['mask', '1111'])
rows.append(['iogrp_count', '1'])
rows.append(['status', 'degraded'])
for port in host['iscsi_names']:
rows.append(['iscsi_name', port])
rows.append(['node_logged_in_count', '0'])
rows.append(['state', 'offline'])
for port in host['wwpns']:
rows.append(['WWPN', port])
rows.append(['node_logged_in_count', '0'])
rows.append(['state', 'active'])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
def _cmd_lshostvdiskmap(self, **kwargs):
"""svcinfo lshostvdiskmap -delim ! <host_name>"""
if 'obj' not in kwargs:
return self._errors['CMMVC50000']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC50000']
rows = []
rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name',
'vdisk_UID'])
for mapping in self._mappings_list.values():
if (host_name == '') or (mapping['host'] == host_name):
volume = self._volumes_list[mapping['vol']]
rows.append([mapping['id'], mapping['host'],
mapping['lun'], volume['id'],
volume['name'], volume['vdisk_UID']])
return self._print_cmd_info(rows=rows, **kwargs)
def _cmd_lsmdiskgrp(self, **kwargs):
"""svcinfo lsmdiskgrp -gui -bytes -delim ! <pool>"""
status = 'online'
if self._next_cmd_error['lsmdiskgrp'] == 'error':
self._next_cmd_error['lsmdiskgrp'] = ''
return self._errors['CMMVC50000']
if self._next_cmd_error['lsmdiskgrp'] == 'status=offline':
self._next_cmd_error['lsmdiskgrp'] = ''
status = 'offline'
rows = [None] * 2
rows[0] = ['id', 'status', 'mdisk_count', 'vdisk_count', 'capacity',
'free_capacity', 'virtual_capacity', 'used_capacity',
'real_capacity', 'encrypted', 'type', 'encrypt']
rows[1] = ['0', status, '1', '0', '3573412790272',
'3529432325160', '1693247906775', '277841182',
'38203734097', 'no', 'parent', 'no']
if kwargs['obj'] == 'mdiskgrp0':
row = rows[1]
else:
return self._errors['CMMVC50000']
objrows = []
for idx, val in enumerate(rows[0]):
objrows.append([val, row[idx]])
if 'delim' in kwargs:
for index in range(len(objrows)):
objrows[index] = kwargs['delim'].join(objrows[index])
return ('%s' % '\n'.join(objrows), '')
def _cmd_lsnode(self, **kwargs):
"""svcinfo lsnode -delim !
svcinfo lsnode -delim ! <node>
"""
if self._protocol == 'FC' or self._protocol == 'both':
port_status = 'active'
else:
port_status = 'unconfigured'
rows1 = [None] * 7
rows1[0] = ['name', 'node1']
rows1[1] = ['port_id', '000000000000001']
rows1[2] = ['port_status', port_status]
rows1[3] = ['port_speed', '8Gb']
rows1[4] = ['port_id', '000000000000001']
rows1[5] = ['port_status', port_status]
rows1[6] = ['port_speed', '8Gb']
rows2 = [None] * 7
rows2[0] = ['name', 'node2']
rows2[1] = ['port_id', '000000000000002']
rows2[2] = ['port_status', port_status]
rows2[3] = ['port_speed', '8Gb']
rows2[4] = ['port_id', '000000000000002']
rows2[5] = ['port_status', port_status]
rows2[6] = ['port_speed', 'N/A']
rows3 = [None] * 3
rows3[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status',
'IO_group_id', 'IO_group_name', 'config_node',
'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias',
'panel_name', 'enclosure_id', 'canister_id',
'enclosure_serial_number']
rows3[1] = ['1', 'node1', '', '0123456789ABCDEF', 'online', '0',
'io_grp0', 'yes', '', 'TR1', 'naa.0123456789ABCDEF', '',
'01-1', '1', '1', 'H441028']
rows3[2] = ['2', 'node2', '', '0123456789ABCDEF', 'online', '0',
'io_grp0', 'no', '', 'TR1', 'naa.0123456789ABCDEF', '',
'01-2', '1', '2', 'H441028']
if self._next_cmd_error['lsnode'] == 'error':
self._next_cmd_error['lsnode'] = ''
return self._errors['CMMVC50000']
rows = None
if 'obj' not in kwargs:
rows = rows3
elif kwargs['obj'] == '1':
rows = rows1
elif kwargs['obj'] == '2':
rows = rows2
else:
return self._errors['CMMVC50000']
if self._next_cmd_error['lsnode'] == 'header_mismatch':
rows[0].pop(2)
self._next_cmd_error['lsnode'] = ''
return self._print_cmd_info(rows=rows, delim=kwargs.get('delim', None))
def _cmd_lssystem(self, **kwargs):
"""svcinfo lssystem -delim !"""
open_access_enabled = 'off'
if self._next_cmd_error['lssystem'] == 'error':
self._next_cmd_error['lssystem'] = ''
return self._errors['CMMVC50000']
if self._next_cmd_error['lssystem'] == 'open_access_enabled=on':
self._next_cmd_error['lssystem'] = ''
open_access_enabled = 'on'
rows = [None] * 3
rows[0] = ['id', '0123456789ABCDEF']
rows[1] = ['name', 'flashsystem_1.2.3.4']
rows[2] = ['open_access_enabled', open_access_enabled]
return self._print_cmd_info(rows=rows, **kwargs)
def _cmd_lsportfc(self, **kwargs):
"""svcinfo lsportfc"""
if self._protocol == 'FC' or self._protocol == 'both':
status = 'active'
else:
status = 'unconfigured'
rows = [None] * 3
rows[0] = ['id', 'canister_id', 'adapter_id', 'port_id', 'type',
'port_speed', 'node_id', 'node_name', 'WWPN',
'nportid', 'status', 'attachment', 'topology']
rows[1] = ['0', '1', '1', '1', 'fc',
'8Gb', '1', 'node_1', 'AABBCCDDEEFF0011',
'000000', status, 'host', 'al']
rows[2] = ['1', '1', '1', '1', 'fc',
'8Gb', '1', 'node_1', 'AABBCCDDEEFF0010',
'000000', status, 'host', 'al']
return self._print_cmd_info(rows=rows, **kwargs)
def _cmd_lsportip(self, **kwargs):
"""svcinfo lsportip"""
if self._protocol == 'iSCSI' or self._protocol == 'both':
IP_address1 = '192.168.1.10'
IP_address2 = '192.168.1.11'
state = 'online'
speed = '8G'
else:
IP_address1 = ''
IP_address2 = ''
state = ''
speed = ''
rows = [None] * 3
rows[0] = ['id', 'node_id', 'node_name', 'canister_id', 'adapter_id',
'port_id', 'IP_address', 'mask', 'gateway', 'IP_address_6',
'prefix_6', 'gateway_6', 'MAC', 'duplex', 'state', 'speed',
'failover', 'link_state', 'host', 'host_6', 'vlan',
'vlan_6', 'adapter_location', 'adapter_port_id']
rows[1] = ['1', '1', 'node1', '0', '0',
'0', IP_address1, '', '', '',
'0', '', '11:22:33:44:55:AA', '', state, speed,
'no', 'active', '', '', '', '', '0', '0']
rows[2] = ['2', '2', 'node2', '0', '0',
'0', IP_address2, '', '', '',
'0', '', '11:22:33:44:55:BB', '', state, speed,
'no', 'active', '', '', '', '', '0', '0']
return self._print_cmd_info(rows=rows, **kwargs)
def _cmd_lsvdisk(self, **kwargs):
"""cmd: svcinfo lsvdisk -gui -bytes -delim ! <vdisk_name>"""
if 'obj' not in kwargs or (
'delim' not in kwargs) or (
'bytes' not in kwargs):
return self._errors['CMMVC50000']
if kwargs['obj'] not in self._volumes_list:
return self._errors['CMMVC50000']
vol = self._volumes_list[kwargs['obj']]
rows = []
rows.append(['id', vol['id']])
rows.append(['name', vol['name']])
rows.append(['status', vol['status']])
rows.append(['capacity', vol['capacity']])
rows.append(['vdisk_UID', vol['vdisk_UID']])
rows.append(['udid', ''])
rows.append(['open_access_scsi_id', '1'])
rows.append(['parent_mdisk_grp_id', '0'])
rows.append(['parent_mdisk_grp_name', 'mdiskgrp0'])
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
def _cmd_lsvdiskhostmap(self, **kwargs):
"""svcinfo lsvdiskhostmap -delim ! <vdisk_name>"""
if 'obj' not in kwargs or (
'delim' not in kwargs):
return self._errors['CMMVC50000']
vdisk_name = kwargs['obj']
if vdisk_name not in self._volumes_list:
return self._errors['CMMVC50000']
rows = []
rows.append(['id', 'name', 'SCSI_id', 'host_id', 'host_name',
'vdisk_UID', 'IO_group_id', 'IO_group_name'])
mappings_found = 0
for mapping in self._mappings_list.values():
if (mapping['vol'] == vdisk_name):
mappings_found += 1
volume = self._volumes_list[mapping['vol']]
host = self._hosts_list[mapping['host']]
rows.append([volume['id'], volume['name'], '1', host['id'],
host['host_name'], volume['vdisk_UID'],
'0', 'mdiskgrp0'])
if mappings_found:
return self._print_cmd_info(rows=rows, **kwargs)
else:
return ('', '')
def _cmd_expandvdisksize(self, **kwargs):
"""svctask expandvdisksize -size <size> -unit gb <vdisk_name>"""
if 'obj' not in kwargs:
return self._errors['CMMVC50000']
vol_name = kwargs['obj'].strip('\'\"')
if 'size' not in kwargs:
return self._errors['CMMVC50000']
size = int(kwargs['size'])
if vol_name not in self._volumes_list:
return self._errors['CMMVC50000']
curr_size = int(self._volumes_list[vol_name]['capacity'])
addition = size * units.Gi
self._volumes_list[vol_name]['capacity'] = six.text_type(
curr_size + addition)
return ('', '')
def _cmd_mkvdisk(self, **kwargs):
"""svctask mkvdisk -name <name> -mdiskgrp <mdiskgrp> -iogrp <iogrp>
-size <size> -unit <unit>
"""
if 'name' not in kwargs or (
'size' not in kwargs) or (
'unit' not in kwargs):
return self._errors['CMMVC50000']
vdisk_info = {}
vdisk_info['id'] = self._find_unused_id(self._volumes_list)
vdisk_info['name'] = kwargs['name'].strip('\'\"')
vdisk_info['status'] = 'online'
vdisk_info['capacity'] = self._convert_units_bytes(
int(kwargs['size']), kwargs['unit'])
vdisk_info['vdisk_UID'] = ('60050760') + ('0' * 14) + vdisk_info['id']
if vdisk_info['name'] in self._volumes_list:
return self._errors['CMMVC50000']
else:
self._volumes_list[vdisk_info['name']] = vdisk_info
return ('Virtual Disk, id [%s], successfully created' %
(vdisk_info['id']), '')
def _cmd_rmvdisk(self, **kwargs):
"""svctask rmvdisk -force <vdisk_name>"""
if 'obj' not in kwargs:
return self._errors['CMMVC50000']
vdisk_name = kwargs['obj'].strip('\'\"')
if vdisk_name not in self._volumes_list:
return self._errors['CMMVC50000']
del self._volumes_list[vdisk_name]
return ('', '')
def _add_port_to_host(self, host_info, **kwargs):
if 'iscsiname' in kwargs:
added_key = 'iscsi_names'
added_val = kwargs['iscsiname'].strip('\'\"')
elif 'hbawwpn' in kwargs:
added_key = 'wwpns'
added_val = kwargs['hbawwpn'].strip('\'\"')
else:
return self._errors['CMMVC50000']
host_info[added_key].append(added_val)
for v in self._hosts_list.values():
if v['id'] == host_info['id']:
continue
for port in v[added_key]:
if port == added_val:
return self._errors['CMMVC50000']
return ('', '')
def _cmd_mkhost(self, **kwargs):
"""svctask mkhost -force -hbawwpn <wwpn> -name <host_name>
svctask mkhost -force -iscsiname <initiator> -name <host_name>
"""
if 'name' not in kwargs:
return self._errors['CMMVC50000']
host_name = kwargs['name'].strip('\'\"')
if self._is_invalid_name(host_name):
return self._errors['CMMVC50000']
if host_name in self._hosts_list:
return self._errors['CMMVC50000']
host_info = {}
host_info['id'] = self._find_unused_id(self._hosts_list)
host_info['host_name'] = host_name
host_info['iscsi_names'] = []
host_info['wwpns'] = []
out, err = self._add_port_to_host(host_info, **kwargs)
if not len(err):
self._hosts_list[host_name] = host_info
return ('Host, id [%s], successfully created' %
(host_info['id']), '')
else:
return (out, err)
def _cmd_addhostport(self, **kwargs):
"""svctask addhostport -force -hbawwpn <wwpn> <host>
svctask addhostport -force -iscsiname <initiator> <host>
"""
if 'obj' not in kwargs:
return self._errors['CMMVC50000']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC50000']
host_info = self._hosts_list[host_name]
return self._add_port_to_host(host_info, **kwargs)
def _cmd_rmhost(self, **kwargs):
"""svctask rmhost <host>"""
if 'obj' not in kwargs:
return self._errors['CMMVC50000']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC50000']
for v in self._mappings_list.values():
if (v['host'] == host_name):
return self._errors['CMMVC50000']
del self._hosts_list[host_name]
return ('', '')
def _cmd_mkvdiskhostmap(self, **kwargs):
"""svctask mkvdiskhostmap -host <host> -scsi <lun> <vdisk_name>"""
mapping_info = {}
mapping_info['id'] = self._find_unused_id(self._mappings_list)
if 'host' not in kwargs or (
'scsi' not in kwargs) or (
'obj' not in kwargs):
return self._errors['CMMVC50000']
mapping_info['host'] = kwargs['host'].strip('\'\"')
mapping_info['lun'] = kwargs['scsi'].strip('\'\"')
mapping_info['vol'] = kwargs['obj'].strip('\'\"')
if mapping_info['vol'] not in self._volumes_list:
return self._errors['CMMVC50000']
if mapping_info['host'] not in self._hosts_list:
return self._errors['CMMVC50000']
if mapping_info['vol'] in self._mappings_list:
return self._errors['CMMVC50000']
for v in self._mappings_list.values():
if ((v['host'] == mapping_info['host']) and
(v['lun'] == mapping_info['lun'])):
return self._errors['CMMVC50000']
for v in self._mappings_list.values():
if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs):
return self._errors['CMMVC50000']
self._mappings_list[mapping_info['id']] = mapping_info
return ('Virtual Disk to Host map, id [%s], successfully created'
% (mapping_info['id']), '')
def _cmd_rmvdiskhostmap(self, **kwargs):
"""svctask rmvdiskhostmap -host <host> <vdisk_name>"""
if 'host' not in kwargs or 'obj' not in kwargs:
return self._errors['CMMVC50000']
host = kwargs['host'].strip('\'\"')
vdisk = kwargs['obj'].strip('\'\"')
mapping_ids = []
for v in self._mappings_list.values():
if v['vol'] == vdisk:
mapping_ids.append(v['id'])
if not mapping_ids:
return self._errors['CMMVC50000']
this_mapping = None
for mapping_id in mapping_ids:
if self._mappings_list[mapping_id]['host'] == host:
this_mapping = mapping_id
if this_mapping is None:
return self._errors['CMMVC50000']
del self._mappings_list[this_mapping]
return ('', '')
def set_protocol(self, protocol):
self._protocol = protocol
def execute_command(self, cmd, check_exit_code=True):
try:
kwargs = self._cmd_to_dict(cmd)
except exception.InvalidInput:
return self._errors['CMMVC50000']
command = kwargs['cmd']
del kwargs['cmd']
func = getattr(self, '_cmd_' + command)
out, err = func(**kwargs)
if (check_exit_code) and (len(err) != 0):
raise processutils.ProcessExecutionError(exit_code=1,
stdout=out,
stderr=err,
cmd=command)
return (out, err)
def error_injection(self, cmd, error):
self._next_cmd_error[cmd] = error
class FlashSystemFakeDriver(flashsystem.FlashSystemDriver):
def __init__(self, *args, **kwargs):
super(FlashSystemFakeDriver, self).__init__(*args, **kwargs)
def set_fake_storage(self, fake):
self.fake_storage = fake
def _ssh(self, cmd, check_exit_code=True):
try:
LOG.debug('Run CLI command: %s' % cmd)
utils.check_ssh_injection(cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
(stdout, stderr) = ret
LOG.debug('CLI output:\n stdout: %(stdout)s\n stderr: '
'%(stderr)s' % {'stdout': stdout, 'stderr': stderr})
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.debug('CLI Exception output:\n stdout: %(out)s\n '
'stderr: %(err)s' % {'out': e.stdout,
'err': e.stderr})
return ret
class FlashSystemDriverTestCase(test.TestCase):
def _set_flag(self, flag, value):
group = self.driver.configuration.config_group
self.driver.configuration.set_override(flag, value, group)
def _reset_flags(self):
self.driver.configuration.local_conf.reset()
for k, v in self._def_flags.items():
self._set_flag(k, v)
def _generate_vol_info(self,
vol_name,
vol_size=10,
vol_status='available'):
rand_id = six.text_type(random.randint(10000, 99999))
if not vol_name:
vol_name = 'test_volume%s' % rand_id
return {'name': vol_name,
'size': vol_size,
'id': '%s' % rand_id,
'volume_type_id': None,
'status': vol_status,
'mdisk_grp_name': 'mdiskgrp0'}
def _generate_snap_info(self,
vol_name,
vol_id,
vol_size,
vol_status,
snap_status='available'):
rand_id = six.text_type(random.randint(10000, 99999))
return {'name': 'test_snap_%s' % rand_id,
'id': rand_id,
'volume': {'name': vol_name,
'id': vol_id,
'size': vol_size,
'status': vol_status},
'volume_size': vol_size,
'status': snap_status,
'mdisk_grp_name': 'mdiskgrp0'}
def setUp(self):
super(FlashSystemDriverTestCase, self).setUp()
self._def_flags = {'san_ip': 'hostname',
'san_login': 'username',
'san_password': 'password',
'flashsystem_connection_protocol': 'FC',
'flashsystem_multipath_enabled': False,
'flashsystem_multihostmap_enabled': True}
self.connector = {
'host': 'flashsystem',
'wwnns': ['0123456789abcdef', '0123456789abcdeg'],
'wwpns': ['abcd000000000001', 'abcd000000000002'],
'initiator': 'iqn.123456'}
self.sim = FlashSystemManagementSimulator()
self.driver = FlashSystemFakeDriver(
configuration=conf.Configuration(None))
self.driver.set_fake_storage(self.sim)
self._reset_flags()
self.ctxt = context.get_admin_context()
self.driver.do_setup(None)
self.driver.check_for_setup_error()
self.sleeppatch = mock.patch('eventlet.greenthread.sleep')
self.sleeppatch.start()
def tearDown(self):
self.sleeppatch.stop()
super(FlashSystemDriverTestCase, self).tearDown()
def test_flashsystem_do_setup(self):
# case 1: cmd lssystem encounters error
self.sim.error_injection('lssystem', 'error')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
# case 2: open_access_enabled is not off
self.sim.error_injection('lssystem', 'open_access_enabled=on')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
# case 3: cmd lsmdiskgrp encounters error
self.sim.error_injection('lsmdiskgrp', 'error')
self.assertRaises(exception.InvalidInput,
self.driver.do_setup, None)
# case 4: status is not online
self.sim.error_injection('lsmdiskgrp', 'status=offline')
self.assertRaises(exception.InvalidInput,
self.driver.do_setup, None)
# case 5: cmd lsnode encounters error
self.sim.error_injection('lsnode', 'error')
self.assertRaises(processutils.ProcessExecutionError,
self.driver.do_setup, None)
# case 6: cmd lsnode header does not match
self.sim.error_injection('lsnode', 'header_mismatch')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
# case 7: set as FC
self.sim.set_protocol('FC')
self.driver.do_setup(None)
self.assertEqual('FC', self.driver._protocol)
# case 8: no configured nodes available
self.sim.set_protocol('unknown')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
# clear environment
self.sim.set_protocol('FC')
self.driver.do_setup(None)
def test_flashsystem_check_for_setup_error(self):
self._set_flag('san_ip', '')
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('san_ssh_port', '')
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('san_login', '')
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('san_password', None)
self._set_flag('san_private_key', None)
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('flashsystem_connection_protocol', 'foo')
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
# clear environment
self.driver.do_setup(None)
def test_flashsystem_validate_connector(self):
conn_neither = {'host': 'host'}
conn_iscsi = {'host': 'host', 'initiator': 'foo'}
conn_fc = {'host': 'host', 'wwpns': 'bar'}
conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'}
protocol = self.driver._protocol
# case 1: when protocol is FC
self.driver._protocol = 'FC'
self.driver.validate_connector(conn_fc)
self.driver.validate_connector(conn_both)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, conn_iscsi)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, conn_neither)
# clear environment
self.driver._protocol = protocol
def test_flashsystem_volumes(self):
# case 1: create volume
vol = self._generate_vol_info(None)
self.driver.create_volume(vol)
# Check whether volume is created successfully
attributes = self.driver._get_vdisk_attributes(vol['name'])
attr_size = float(attributes['capacity']) / units.Gi
self.assertEqual(float(vol['size']), attr_size)
# case 2: delete volume
self.driver.delete_volume(vol)
# case 3: delete volume that doesn't exist (expected not fail)
vol_no_exist = self._generate_vol_info(None)
self.driver.delete_volume(vol_no_exist)
def test_flashsystem_extend_volume(self):
vol = self._generate_vol_info(None)
self.driver.create_volume(vol)
self.driver.extend_volume(vol, '200')
attrs = self.driver._get_vdisk_attributes(vol['name'])
vol_size = int(attrs['capacity']) / units.Gi
self.assertAlmostEqual(vol_size, 200)
# clear environment
self.driver.delete_volume(vol)
def test_flashsystem_connection(self):
# case 1: initialize_connection/terminate_connection for good path
vol1 = self._generate_vol_info(None)
self.driver.create_volume(vol1)
self.driver.initialize_connection(vol1, self.connector)
self.driver.terminate_connection(vol1, self.connector)
# case 2: when volume is not existed
vol2 = self._generate_vol_info(None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
vol2, self.connector)
# case 3: _get_vdisk_map_properties raises exception
with mock.patch.object(flashsystem.FlashSystemDriver,
'_get_vdisk_map_properties') as get_properties:
get_properties.side_effect = exception.VolumeBackendAPIException
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
vol1, self.connector)
# clear environment
self.driver.delete_volume(vol1)
@mock.patch.object(flashsystem.FlashSystemDriver,
'_create_and_copy_vdisk_data')
def test_flashsystem_create_snapshot(self, _create_and_copy_vdisk_data):
# case 1: good path
vol1 = self._generate_vol_info(None)
snap1 = self._generate_snap_info(vol1['name'],
vol1['id'],
vol1['size'],
vol1['status'])
self.driver.create_snapshot(snap1)
# case 2: when volume status is error
vol2 = self._generate_vol_info(None, vol_status='error')
snap2 = self._generate_snap_info(vol2['name'],
vol2['id'],
vol2['size'],
vol2['status'])
self.assertRaises(exception.InvalidVolume,
self.driver.create_snapshot, snap2)
@mock.patch.object(flashsystem.FlashSystemDriver,
'_delete_vdisk')
def test_flashsystem_delete_snapshot(self, _delete_vdisk):
vol1 = self._generate_vol_info(None)
snap1 = self._generate_snap_info(vol1['name'],
vol1['id'],
vol1['size'],
vol1['status'])
self.driver.delete_snapshot(snap1)
@mock.patch.object(flashsystem.FlashSystemDriver,
'_create_and_copy_vdisk_data')
def test_flashsystem_create_volume_from_snapshot(
self, _create_and_copy_vdisk_data):
# case 1: good path
vol = self._generate_vol_info(None)
snap = self._generate_snap_info(vol['name'],
vol['id'],
vol['size'],
vol['status'])
self.driver.create_volume_from_snapshot(vol, snap)
# case 2: when size does not match
vol = self._generate_vol_info(None, vol_size=100)
snap = self._generate_snap_info(vol['name'],
vol['id'],
200,
vol['status'])
self.assertRaises(exception.VolumeDriverException,
self.driver.create_volume_from_snapshot,
vol, snap)
# case 3: when snapshot status is not available
vol = self._generate_vol_info(None)
snap = self._generate_snap_info(vol['name'],
vol['id'],
vol['size'],
vol['status'],
snap_status='error')
self.assertRaises(exception.InvalidSnapshot,
self.driver.create_volume_from_snapshot,
vol, snap)
@mock.patch.object(flashsystem.FlashSystemDriver,
'_create_and_copy_vdisk_data')
def test_flashsystem_create_cloned_volume(
self, _create_and_copy_vdisk_data):
# case 1: good path
vol1 = self._generate_vol_info(None)
vol2 = self._generate_vol_info(None)
self.driver.create_cloned_volume(vol2, vol1)
# case 2: when size does not match
vol1 = self._generate_vol_info(None, vol_size=10)
vol2 = self._generate_vol_info(None, vol_size=20)
self.assertRaises(exception.VolumeDriverException,
self.driver.create_cloned_volume,
vol2, vol1)
def test_flashsystem_get_volume_stats(self):
# case 1: good path
self._set_flag('reserved_percentage', 25)
pool = 'mdiskgrp0'
backend_name = 'flashsystem_1.2.3.4' + '_' + pool
stats = self.driver.get_volume_stats()
self.assertEqual(25, stats['reserved_percentage'])
self.assertEqual('IBM', stats['vendor_name'])
self.assertEqual('FC', stats['storage_protocol'])
self.assertEqual(backend_name, stats['volume_backend_name'])
self._reset_flags()
# case 2: when lsmdiskgrp returns error
self.sim.error_injection('lsmdiskgrp', 'error')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.get_volume_stats, refresh=True)
@mock.patch.object(flashsystem.FlashSystemDriver,
'_copy_vdisk_data')
def test_flashsystem_create_and_copy_vdisk_data(self, _copy_vdisk_data):
# case 1: when volume does not exist
vol1 = self._generate_vol_info(None)
vol2 = self._generate_vol_info(None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._create_and_copy_vdisk_data,
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
# case 2: good path
self.driver.create_volume(vol1)
self.driver._create_and_copy_vdisk_data(
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
self.driver.delete_volume(vol1)
self.driver.delete_volume(vol2)
# case 3: _copy_vdisk_data raises exception
self.driver.create_volume(vol1)
_copy_vdisk_data.side_effect = exception.VolumeBackendAPIException
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver._create_and_copy_vdisk_data,
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
self.assertEqual(set(), self.driver._vdisk_copy_in_progress)
# clear environment
self.driver.delete_volume(vol1)
self.driver.delete_volume(vol2)
@mock.patch.object(volume_utils, 'copy_volume')
@mock.patch.object(flashsystem.FlashSystemDriver, '_scan_device')
@mock.patch.object(flashsystem.FlashSystemDriver, '_remove_device')
@mock.patch.object(utils, 'brick_get_connector_properties')
def test_flashsystem_copy_vdisk_data(self,
_connector,
_remove_device,
_scan_device,
copy_volume):
connector = _connector.return_value = self.connector
vol1 = self._generate_vol_info(None)
vol2 = self._generate_vol_info(None)
self.driver.create_volume(vol1)
self.driver.create_volume(vol2)
# case 1: no mapped before copy
self.driver._copy_vdisk_data(
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
(v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
(v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
self.assertFalse(v1_mapped)
self.assertFalse(v2_mapped)
# case 2: mapped before copy
self.driver.initialize_connection(vol1, connector)
self.driver.initialize_connection(vol2, connector)
self.driver._copy_vdisk_data(
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
(v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
(v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
self.assertTrue(v1_mapped)
self.assertTrue(v2_mapped)
self.driver.terminate_connection(vol1, connector)
self.driver.terminate_connection(vol2, connector)
# case 3: no mapped before copy, raise exception when scan
_scan_device.side_effect = exception.VolumeBackendAPIException
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver._copy_vdisk_data,
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
(v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
(v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
self.assertFalse(v1_mapped)
self.assertFalse(v2_mapped)
# case 4: no mapped before copy, raise exception when copy
copy_volume.side_effect = exception.VolumeBackendAPIException
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver._copy_vdisk_data,
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
(v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
(v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
self.assertFalse(v1_mapped)
self.assertFalse(v2_mapped)
# clear environment
self.driver.delete_volume(vol1)
self.driver.delete_volume(vol2)
def test_flashsystem_connector_to_hostname_prefix(self):
# Invalid characters will be translated to '-'
# case 1: host name is unicode with invalid characters
conn = {'host': u'unicode.test}.abc{.abc'}
self.assertEqual(u'unicode.test-.abc-.abc',
self.driver._connector_to_hostname_prefix(conn))
# case 2: host name is string with invalid characters
conn = {'host': 'string.test}.abc{.abc'}
self.assertEqual('string.test-.abc-.abc',
self.driver._connector_to_hostname_prefix(conn))
# case 3: host name is neither unicode nor string
conn = {'host': 12345}
self.assertRaises(exception.NoValidHost,
self.driver._connector_to_hostname_prefix,
conn)
# case 4: host name started with number will be translated
conn = {'host': '192.168.1.1'}
self.assertEqual('_192.168.1.1',
self.driver._connector_to_hostname_prefix(conn))
def test_flashsystem_create_host(self):
# case 1: create host
conn = {
'host': 'flashsystem',
'wwnns': ['0123456789abcdef', '0123456789abcdeg'],
'wwpns': ['abcd000000000001', 'abcd000000000002'],
'initiator': 'iqn.123456'}
host = self.driver._create_host(conn)
# case 2: create host that already exists
self.assertRaises(processutils.ProcessExecutionError,
self.driver._create_host,
conn)
# case 3: delete host
self.driver._delete_host(host)
# case 4: create host with empty ports
conn = {'host': 'flashsystem', 'wwpns': []}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._create_host,
conn)
def test_flashsystem_find_host_exhaustive(self):
# case 1: create host and find it
conn1 = {
'host': 'flashsystem-01',
'wwnns': ['1111111111abcdef', '1111111111abcdeg'],
'wwpns': ['1111111111000001', '1111111111000002'],
'initiator': 'iqn.111111'}
conn2 = {
'host': 'flashsystem-02',
'wwnns': ['2222222222abcdef', '2222222222abcdeg'],
'wwpns': ['2222222222000001', '2222222222000002'],
'initiator': 'iqn.222222'}
conn3 = {
'host': 'flashsystem-03',
'wwnns': ['3333333333abcdef', '3333333333abcdeg'],
'wwpns': ['3333333333000001', '3333333333000002'],
'initiator': 'iqn.333333'}
host1 = self.driver._create_host(conn1)
host2 = self.driver._create_host(conn2)
self.assertEqual(
host2,
self.driver._find_host_exhaustive(conn2, [host1, host2]))
self.assertEqual(
None,
self.driver._find_host_exhaustive(conn3, [host1, host2]))
# clear environment
self.driver._delete_host(host1)
self.driver._delete_host(host2)
def test_flashsystem_get_vdisk_params(self):
# case 1: use default params
self.driver._get_vdisk_params(None)
# case 2: use extra params from type
opts1 = {'storage_protocol': 'FC'}
opts2 = {'capabilities:storage_protocol': 'FC'}
opts3 = {'storage_protocol': 'iSCSI'}
type1 = volume_types.create(self.ctxt, 'opts1', opts1)
type2 = volume_types.create(self.ctxt, 'opts2', opts2)
type3 = volume_types.create(self.ctxt, 'opts3', opts3)
self.assertEqual(
'FC',
self.driver._get_vdisk_params(type1['id'])['protocol'])
self.assertEqual(
'FC',
self.driver._get_vdisk_params(type2['id'])['protocol'])
self.assertRaises(exception.InvalidInput,
self.driver._get_vdisk_params,
type3['id'])
# clear environment
volume_types.destroy(self.ctxt, type1['id'])
volume_types.destroy(self.ctxt, type2['id'])
def test_flashsystem_map_vdisk_to_host(self):
# case 1: no host found
vol1 = self._generate_vol_info(None)
self.driver.create_volume(vol1)
self.assertEqual(
# lun id shoud begin with 1
1,
self.driver._map_vdisk_to_host(vol1['name'], self.connector))
# case 2: host already exists
vol2 = self._generate_vol_info(None)
self.driver.create_volume(vol2)
self.assertEqual(
# lun id shoud be sequential
2,
self.driver._map_vdisk_to_host(vol2['name'], self.connector))
# case 3: test if already mapped
self.assertEqual(
1,
self.driver._map_vdisk_to_host(vol1['name'], self.connector))
# clean environment
self.driver._unmap_vdisk_from_host(vol1['name'], self.connector)
self.driver._unmap_vdisk_from_host(vol2['name'], self.connector)
self.driver.delete_volume(vol1)
self.driver.delete_volume(vol2)
# case 4: If there is no vdisk mapped to host, host should be removed
self.assertEqual(
None,
self.driver._get_host_from_connector(self.connector))
|
apache-2.0
| -6,044,078,275,345,714,000 | 37.243902 | 79 | 0.531803 | false | 3.864925 | true | false | false |
RDXT/django-userena
|
userena/views.py
|
1
|
38355
|
from django.urls import reverse
from django.shortcuts import redirect, get_object_or_404,render
from django.contrib.auth import authenticate, login, logout, REDIRECT_FIELD_NAME
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.views import logout as Signout
from django.views.generic import TemplateView
from django.views.generic.list import ListView
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from django.http import Http404, HttpResponseRedirect,HttpResponse
from userena.forms import (SignupForm, SignupFormOnlyEmail, AuthenticationForm,
ChangeEmailForm, EditProfileForm,InviteForm)
from userena.models import UserenaSignup
from userena.decorators import secure_required
from userena.utils import signin_redirect, get_profile_model, get_user_profile
from userena import signals as userena_signals
from userena import settings as userena_settings
from guardian.decorators import permission_required_or_403
from django.contrib.auth.decorators import login_required
import warnings
class ExtraContextTemplateView(TemplateView):
""" Add extra context to a simple template view """
extra_context = None
def get_context_data(self, *args, **kwargs):
context = super(ExtraContextTemplateView, self).get_context_data(*args, **kwargs)
if self.extra_context:
context.update(self.extra_context)
return context
# this view is used in POST requests, e.g. signup when the form is not valid
post = TemplateView.get
class InvitedUsersListView(ListView):
""" Lists all profiles """
context_object_name='invited_user_list'
page=1
paginate_by=50
template_name='userena/list_invited_users.html'
extra_context=None
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(InvitedUsersListView, self).get_context_data(**kwargs)
try:
page = int(self.request.GET.get('page', None))
except (TypeError, ValueError):
page = self.page
if not self.extra_context: self.extra_context = dict()
context['page'] = page
context['paginate_by'] = self.paginate_by
context['extra_context'] = self.extra_context
profile_model= get_profile_model()
currentProfile=profile_model.objects.get(user=self.request.user)
context['numOfRemainingInvitationTicket']= currentProfile.get_remaining_invite_tickets_number()
return context
def get_queryset(self):
profile_model= get_profile_model()
currentProfile=profile_model.objects.get(user=self.request.user)
queryset = currentProfile.invited_users.all()
return queryset
class ProfileListView(ListView):
""" Lists all profiles """
context_object_name='profile_list'
page=1
paginate_by=50
template_name=userena_settings.USERENA_PROFILE_LIST_TEMPLATE
extra_context=None
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ProfileListView, self).get_context_data(**kwargs)
try:
page = int(self.request.GET.get('page', None))
except (TypeError, ValueError):
page = self.page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not self.request.user.is_staff:
raise Http404
if not self.extra_context: self.extra_context = dict()
context['page'] = page
context['paginate_by'] = self.paginate_by
context['extra_context'] = self.extra_context
return context
def get_queryset(self):
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(self.request.user).select_related()
return queryset
@secure_required
@login_required
def invite_new_user(request,invite_form=InviteForm,template_name='userena/invite_new_user.html',success_url='userena_list_invited_users',extra_context=None):
if(request.user.has_perm('invite_user')):
if not extra_context:
extra_context = dict()
if request.method == 'POST':
form = invite_form(request.user,request.POST, request.FILES)
if form.is_valid():
result=form.save()
if result: #if result is True everythin was ok
return redirect(success_url)
else:
return HttpResponse(status=500)
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
form=invite_form(request.user)
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
else:
raise PermissionDenied
@secure_required
@login_required
def list_invited_users(request,template_name='userena/list_invited_users.html'):
return InvitedUsersListView.as_view(template_name=template_name)(request)
@secure_required
def signup(request, signup_form=SignupForm,
template_name='userena/signup_form.html', success_url=None,
extra_context=None):
"""
Signup of an account.
Signup requiring a username, email and password. After signup a user gets
an email with an activation link used to activate their account. After
successful signup redirects to ``success_url``.
:param signup_form:
Form that will be used to sign a user. Defaults to userena's
:class:`SignupForm`.
:param template_name:
String containing the template name that will be used to display the
signup form. Defaults to ``userena/signup_form.html``.
:param success_url:
String containing the URI which should be redirected to after a
successful signup. If not supplied will redirect to
``userena_signup_complete`` view.
:param extra_context:
Dictionary containing variables which are added to the template
context. Defaults to a dictionary with a ``form`` key containing the
``signup_form``.
**Context**
``form``
Form supplied by ``signup_form``.
"""
# If signup is disabled, return 403
if userena_settings.USERENA_DISABLE_SIGNUP:
raise PermissionDenied
# If no usernames are wanted and the default form is used, fallback to the
# default form that doesn't display to enter the username.
if userena_settings.USERENA_WITHOUT_USERNAMES and (signup_form == SignupForm):
signup_form = SignupFormOnlyEmail
form = signup_form()
if request.method == 'POST':
form = signup_form(request.POST, request.FILES)
if form.is_valid():
user = form.save()
# Send the signup complete signal
userena_signals.signup_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_signup_complete',
kwargs={'username': user.username})
# A new signed user should logout the old one.
if request.user.is_authenticated():
logout(request)
if (userena_settings.USERENA_SIGNIN_AFTER_SIGNUP and
not userena_settings.USERENA_ACTIVATION_REQUIRED):
user = authenticate(identification=user.email, check_password=False)
login(request, user)
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def activate(request, activation_key,
template_name='userena/activate_fail.html',
retry_template_name='userena/activate_retry.html',
success_url=None, extra_context=None):
"""
Activate a user with an activation key.
The key is a SHA1 string. When the SHA1 is found with an
:class:`UserenaSignup`, the :class:`User` of that account will be
activated. After a successful activation the view will redirect to
``success_url``. If the SHA1 is not found, the user will be shown the
``template_name`` template displaying a fail message.
If the SHA1 is found but expired, ``retry_template_name`` is used instead,
so the user can proceed to :func:`activate_retry` to get a new activation key.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when the
``activation_key`` is invalid and the activation fails. Defaults to
``userena/activate_fail.html``.
:param retry_template_name:
String containing the template name that is used when the
``activation_key`` is expired. Defaults to
``userena/activate_retry.html``.
:param success_url:
String containing the URL where the user should be redirected to after
a successful activation. Will replace ``%(username)s`` with string
formatting if supplied. If ``success_url`` is left empty, will direct
to ``userena_profile_detail`` view.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
try:
if (not UserenaSignup.objects.check_expired_activation(activation_key)
or not userena_settings.USERENA_ACTIVATION_RETRY):
user = UserenaSignup.objects.activate_user(activation_key)
if user:
# Sign the user in.
auth_user = authenticate(identification=user.email,
check_password=False)
login(request, auth_user)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your account has been activated and you have been signed in.'),
fail_silently=True)
if success_url: redirect_to = success_url % {'username': user.username }
else: redirect_to = reverse('userena_profile_detail',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(
request)
else:
if not extra_context: extra_context = dict()
extra_context['activation_key'] = activation_key
return ExtraContextTemplateView.as_view(template_name=retry_template_name,
extra_context=extra_context)(request)
except UserenaSignup.DoesNotExist:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def activate_retry(request, activation_key,
template_name='userena/activate_retry_success.html',
extra_context=None):
"""
Reissue a new ``activation_key`` for the user with the expired
``activation_key``.
If ``activation_key`` does not exists, or ``USERENA_ACTIVATION_RETRY`` is
set to False and for any other error condition user is redirected to
:func:`activate` for error message display.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when new
``activation_key`` has been created. Defaults to
``userena/activate_retry_success.html``.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
if not userena_settings.USERENA_ACTIVATION_RETRY:
return redirect(reverse('userena_activate', args=(activation_key,)))
try:
if UserenaSignup.objects.check_expired_activation(activation_key):
new_key = UserenaSignup.objects.reissue_activation(activation_key)
if new_key:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
except UserenaSignup.DoesNotExist:
return redirect(reverse('userena_activate',args=(activation_key,)))
@secure_required
def activate_invited_user(request, invitation_key,
template_name='userena/invite_fail.html',
retry_template_name='userena/invite_retry.html',
success_url=None, extra_context=None):
"""
Activate an invited user with an invitation key.
The key is a SHA1 string. When the SHA1 is found with an
:class:`UserenaSignup`, the :class:`User` of that account will be
activated. After a successful activation the view will redirect to
``success_url``. If the SHA1 is not found, the user will be shown the
``template_name`` template displaying a fail message.
If the SHA1 is found but expired, ``retry_template_name`` is used instead,
so the user can proceed to :func:`activate_retry` to get a new activation key.
:param invitation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when the
``activation_key`` is invalid and the activation fails. Defaults to
``userena/activate_fail.html``.
:param retry_template_name:
String containing the template name that is used when the
``activation_key`` is expired. Defaults to
``userena/activate_retry.html``.
:param success_url:
String containing the URL where the user should be redirected to after
a successful activation. Will replace ``%(username)s`` with string
formatting if supplied. If ``success_url`` is left empty, will direct
to ``userena_profile_detail`` view.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
try:
if (not UserenaSignup.objects.check_expired_invitation(invitation_key)
or not userena_settings.USERENA_ACTIVATION_RETRY):
user = UserenaSignup.objects.activate_invited_user(invitation_key)
if user:
# Sign the user in.
auth_user = authenticate(identification=user.email,
check_password=False)
login(request, auth_user)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your account has been activated and you have been signed in.'),
fail_silently=True)
if success_url: redirect_to = success_url % {'username': user.username }
else: redirect_to = reverse('userena_profile_detail',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(
request)
else:
if not extra_context: extra_context = dict()
extra_context['invitation_key'] = invitation_key
return ExtraContextTemplateView.as_view(template_name=retry_template_name,
extra_context=extra_context)(request)
except UserenaSignup.DoesNotExist:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def email_confirm(request, confirmation_key,
template_name='userena/email_confirm_fail.html',
success_url=None, extra_context=None):
"""
Confirms an email address with a confirmation key.
Confirms a new email address by running :func:`User.objects.confirm_email`
method. If the method returns an :class:`User` the user will have his new
e-mail address set and redirected to ``success_url``. If no ``User`` is
returned the user will be represented with a fail message from
``template_name``.
:param confirmation_key:
String with a SHA1 representing the confirmation key used to verify a
new email address.
:param template_name:
String containing the template name which should be rendered when
confirmation fails. When confirmation is successful, no template is
needed because the user will be redirected to ``success_url``.
:param success_url:
String containing the URL which is redirected to after a successful
confirmation. Supplied argument must be able to be rendered by
``reverse`` function.
:param extra_context:
Dictionary of variables that are passed on to the template supplied by
``template_name``.
"""
user = UserenaSignup.objects.confirm_email(confirmation_key)
if user:
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your email address has been changed.'),
fail_silently=True)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_email_confirm_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def direct_to_user_template(request, username, template_name,
extra_context=None):
"""
Simple wrapper for Django's :func:`direct_to_template` view.
This view is used when you want to show a template to a specific user. A
wrapper for :func:`direct_to_template` where the template also has access to
the user that is found with ``username``. For ex. used after signup,
activation and confirmation of a new e-mail.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if not extra_context: extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def disabled_account(request, username, template_name, extra_context=None):
"""
Checks if the account is disabled, if so, returns the disabled account template.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
``profile``
Profile of the viewed user.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if user.is_active:
raise Http404
if not extra_context: extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def signin(request, auth_form=AuthenticationForm,
template_name='userena/signin_form.html',
redirect_field_name=REDIRECT_FIELD_NAME,
redirect_signin_function=signin_redirect, extra_context=None):
"""
Signin using email or username with password.
Signs a user in by combining email/username with password. If the
combination is correct and the user :func:`is_active` the
:func:`redirect_signin_function` is called with the arguments
``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is
trying the login. The returned value of the function will be the URL that
is redirected to.
A user can also select to be remembered for ``USERENA_REMEMBER_DAYS``.
:param auth_form:
Form to use for signing the user in. Defaults to the
:class:`AuthenticationForm` supplied by userena.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signin_form.html``.
:param redirect_field_name:
Form field name which contains the value for a redirect to the
succeeding page. Defaults to ``next`` and is set in
``REDIRECT_FIELD_NAME`` setting.
:param redirect_signin_function:
Function which handles the redirect. This functions gets the value of
``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It
must return a string which specifies the URI to redirect to.
:param extra_context:
A dictionary containing extra variables that should be passed to the
rendered template. The ``form`` key is always the ``auth_form``.
**Context**
``form``
Form used for authentication supplied by ``auth_form``.
"""
form = auth_form()
if request.method == 'POST':
form = auth_form(request.POST, request.FILES)
if form.is_valid():
identification, password, remember_me = (form.cleaned_data['identification'],
form.cleaned_data['password'],
form.cleaned_data['remember_me'])
user = authenticate(identification=identification,
password=password)
if user.is_active:
login(request, user)
if remember_me:
request.session.set_expiry(userena_settings.USERENA_REMEMBER_ME_DAYS[1] * 86400)
else: request.session.set_expiry(0)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('You have been signed in.'),
fail_silently=True)
#send a signal that a user has signed in
userena_signals.account_signin.send(sender=None, user=user)
# Whereto now?
redirect_to = redirect_signin_function(
request.GET.get(redirect_field_name,
request.POST.get(redirect_field_name)), user)
return redirect(redirect_to)
else:
return redirect(reverse('userena_disabled',
kwargs={'username': user.username}))
if not extra_context: extra_context = dict()
extra_context.update({
'form': form,
'next': request.GET.get(redirect_field_name,
request.POST.get(redirect_field_name)),
})
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def signout(request, next_page=userena_settings.USERENA_REDIRECT_ON_SIGNOUT,
template_name='userena/signout.html', *args, **kwargs):
"""
Signs out the user and adds a success message ``You have been signed
out.`` If next_page is defined you will be redirected to the URI. If
not the template in template_name is used.
:param next_page:
A string which specifies the URI to redirect to.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signout.html``.
"""
if request.user.is_authenticated() and userena_settings.USERENA_USE_MESSAGES: # pragma: no cover
messages.success(request, _('You have been signed out.'), fail_silently=True)
userena_signals.account_signout.send(sender=None, user=request.user)
return Signout(request, next_page=next_page, template_name=template_name, *args, **kwargs)
@secure_required
@permission_required_or_403('change_user', (get_user_model(), 'username', 'username'))
def email_change(request, username, email_form=ChangeEmailForm,
template_name='userena/email_form.html', success_url=None,
extra_context=None):
"""
Change email address
:param username:
String of the username which specifies the current account.
:param email_form:
Form that will be used to change the email address. Defaults to
:class:`ChangeEmailForm` supplied by userena.
:param template_name:
String containing the template to be used to display the email form.
Defaults to ``userena/email_form.html``.
:param success_url:
Named URL where the user will get redirected to when successfully
changing their email address. When not supplied will redirect to
``userena_email_complete`` URL.
:param extra_context:
Dictionary containing extra variables that can be used to render the
template. The ``form`` key is always the form supplied by the keyword
argument ``form`` and the ``user`` key by the user whose email address
is being changed.
**Context**
``form``
Form that is used to change the email address supplied by ``form``.
``account``
Instance of the ``Account`` whose email address is about to be changed.
**Todo**
Need to have per-object permissions, which enables users with the correct
permissions to alter the email address of others.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
prev_email = user.email
form = email_form(user)
if request.method == 'POST':
form = email_form(user, request.POST, request.FILES)
if form.is_valid():
form.save()
if success_url:
# Send a signal that the email has changed
userena_signals.email_change.send(sender=None,
user=user,
prev_email=prev_email,
new_email=user.email)
redirect_to = success_url
else: redirect_to = reverse('userena_email_change_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
@permission_required_or_403('change_user', (get_user_model(), 'username', 'username'))
def password_change(request, username, template_name='userena/password_form.html',
pass_form=PasswordChangeForm, success_url=None, extra_context=None):
""" Change password of user.
This view is almost a mirror of the view supplied in
:func:`contrib.auth.views.password_change`, with the minor change that in
this view we also use the username to change the password. This was needed
to keep our URLs logical (and REST) across the entire application. And
that in a later stadium administrators can also change the users password
through the web application itself.
:param username:
String supplying the username of the user who's password is about to be
changed.
:param template_name:
String of the name of the template that is used to display the password
change form. Defaults to ``userena/password_form.html``.
:param pass_form:
Form used to change password. Default is the form supplied by Django
itself named ``PasswordChangeForm``.
:param success_url:
Named URL that is passed onto a :func:`reverse` function with
``username`` of the active user. Defaults to the
``userena_password_complete`` URL.
:param extra_context:
Dictionary of extra variables that are passed on to the template. The
``form`` key is always used by the form supplied by ``pass_form``.
**Context**
``form``
Form used to change the password.
"""
user = get_object_or_404(get_user_model(),
username__iexact=username)
form = pass_form(user=user)
if request.method == "POST":
form = pass_form(user=user, data=request.POST)
if form.is_valid():
form.save()
# Send a signal that the password has changed
userena_signals.password_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_password_change_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
@permission_required_or_403('change_profile', (get_profile_model(), 'user__username', 'username'))
def profile_edit(request, username, edit_profile_form=EditProfileForm,
template_name='userena/profile_form.html', success_url=None,
extra_context=None, **kwargs):
"""
Edit profile.
Edits a profile selected by the supplied username. First checks
permissions if the user is allowed to edit this profile, if denied will
show a 404. When the profile is successfully edited will redirect to
``success_url``.
:param username:
Username of the user which profile should be edited.
:param edit_profile_form:
Form that is used to edit the profile. The :func:`EditProfileForm.save`
method of this form will be called when the form
:func:`EditProfileForm.is_valid`. Defaults to :class:`EditProfileForm`
from userena.
:param template_name:
String of the template that is used to render this view. Defaults to
``userena/edit_profile_form.html``.
:param success_url:
Named URL which will be passed on to a django ``reverse`` function after
the form is successfully saved. Defaults to the ``userena_detail`` url.
:param extra_context:
Dictionary containing variables that are passed on to the
``template_name`` template. ``form`` key will always be the form used
to edit the profile, and the ``profile`` key is always the edited
profile.
**Context**
``form``
Form that is used to alter the profile.
``profile``
Instance of the ``Profile`` that is edited.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
profile = get_user_profile(user=user)
user_initial = {'first_name': user.first_name,
'last_name': user.last_name}
form = edit_profile_form(instance=profile, initial=user_initial)
if request.method == 'POST':
form = edit_profile_form(request.POST, request.FILES, instance=profile,
initial=user_initial)
if form.is_valid():
profile = form.save()
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your profile has been updated.'),
fail_silently=True)
if success_url:
# Send a signal that the profile has changed
userena_signals.profile_change.send(sender=None,
user=user)
redirect_to = success_url
else: redirect_to = reverse('userena_profile_detail', kwargs={'username': username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = profile
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def profile_detail(request, username,
template_name=userena_settings.USERENA_PROFILE_DETAIL_TEMPLATE,
extra_context=None, **kwargs):
"""
Detailed view of an user.
:param username:
String of the username of which the profile should be viewed.
:param template_name:
String representing the template name that should be used to display
the profile.
:param extra_context:
Dictionary of variables which should be supplied to the template. The
``profile`` key is always the current profile.
**Context**
``profile``
Instance of the currently viewed ``Profile``.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
profile = get_user_profile(user=user)
if not profile.can_view_profile(request.user):
raise PermissionDenied
if not extra_context: extra_context = dict()
extra_context['profile'] = profile
extra_context['hide_email'] = userena_settings.USERENA_HIDE_EMAIL
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def profile_list(request, page=1, template_name='userena/profile_list.html',
paginate_by=50, extra_context=None, **kwargs): # pragma: no cover
"""
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
"""
warnings.warn("views.profile_list is deprecated. Use ProfileListView instead", DeprecationWarning, stacklevel=2)
try:
page = int(request.GET.get('page', None))
except (TypeError, ValueError):
page = page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not request.user.is_staff:
raise Http404
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context: extra_context = dict()
return ProfileListView.as_view(queryset=queryset,
paginate_by=paginate_by,
page=page,
template_name=template_name,
extra_context=extra_context,
**kwargs)(request)
|
bsd-3-clause
| -7,902,587,174,113,698,000 | 39.673383 | 157 | 0.63087 | false | 4.441292 | false | false | false |
django-extensions/django-extensions
|
django_extensions/management/commands/validate_templates.py
|
1
|
3627
|
# -*- coding: utf-8 -*-
import os
import fnmatch
from django.apps import apps
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import color_style
from django.template.loader import get_template
from django_extensions.compat import get_template_setting
from django_extensions.management.utils import signalcommand
#
# TODO: Render the template with fake request object ?
#
class Command(BaseCommand):
args = ''
help = "Validate templates on syntax and compile errors"
ignores = set([
".DS_Store",
"*.swp",
"*~",
])
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
'--no-apps', action='store_true', dest='no_apps',
default=False, help="Do not automatically include apps.")
parser.add_argument(
'--break', '-b', action='store_true', dest='break',
default=False, help="Break on first error.")
parser.add_argument(
'--include', '-i', action='append', dest='includes',
default=[], help="Append these paths to TEMPLATE DIRS")
parser.add_argument(
'--ignore-app', action='append', dest='ignore_apps',
default=[], help="Ignore these apps")
def ignore_filename(self, filename):
filename = os.path.basename(filename)
for ignore_pattern in self.ignores:
if fnmatch.fnmatch(filename, ignore_pattern):
return True
return False
@signalcommand
def handle(self, *args, **options):
if hasattr(settings, 'VALIDATE_TEMPLATES_IGNORES'):
self.ignores = getattr(settings, 'VALIDATE_TEMPLATES_IGNORES')
style = color_style()
template_dirs = set(get_template_setting('DIRS', []))
template_dirs |= set(options['includes'])
template_dirs |= set(getattr(settings, 'VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS', []))
if not options['no_apps']:
ignore_apps = options['ignore_apps']
if not ignore_apps and hasattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS'):
ignore_apps = getattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS')
for app in apps.get_app_configs():
if app.name in ignore_apps:
continue
app_template_dir = os.path.join(app.path, 'templates')
if os.path.isdir(app_template_dir):
template_dirs.add(app_template_dir)
settings.TEMPLATES[0]['DIRS'] = list(template_dirs)
settings.TEMPLATE_DEBUG = True
verbosity = options["verbosity"]
errors = 0
for template_dir in template_dirs:
for root, dirs, filenames in os.walk(template_dir):
for filename in filenames:
if self.ignore_filename(filename):
continue
filepath = os.path.join(root, filename)
if verbosity > 1:
self.stdout.write(filepath)
try:
get_template(filepath)
except Exception as e:
errors += 1
self.stdout.write("%s: %s" % (filepath, style.ERROR("%s %s" % (e.__class__.__name__, str(e)))))
if errors and options['break']:
raise CommandError("Errors found")
if errors:
raise CommandError("%s errors found" % errors)
self.stdout.write("%s errors found" % errors)
|
mit
| 256,871,737,937,782,240 | 36.78125 | 119 | 0.576234 | false | 4.450307 | false | false | false |
firmadyne/scraper
|
firmware/spiders/openwrt.py
|
1
|
2425
|
from scrapy import Spider
from scrapy.http import Request
from firmware.items import FirmwareImage
from firmware.loader import FirmwareLoader
import urllib.request, urllib.parse, urllib.error
class OpenWRTSpider(Spider):
name = "openwrt"
allowed_domains = ["downloads.openwrt.org"]
start_urls = ["http://downloads.openwrt.org/"]
def parse(self, response):
for link in response.xpath("//a"):
text = link.xpath("text()").extract_first()
href = link.xpath("@href").extract_first()
if text is None and href == "/":
# <a href="/"><em>(root)</em></a>
continue
yield Request(
url=urllib.parse.urljoin(response.url, href),
headers={"Referer": response.url},
meta={"version": FirmwareLoader.find_version_period(text)},
callback=self.parse_url)
def parse_url(self, response):
for link in response.xpath("//a"):
text = link.xpath("text()").extract_first()
href = link.xpath("@href").extract_first()
if text is None and href == "/":
# <a href="/"><em>(root)</em></a>
continue
if ".." in href:
continue
elif href.endswith('/'):
if "package/" not in text:
product = "%s-%s" % (response.meta["product"], text[0: -1]) if "product" in response.meta else text[0: -1]
yield Request(
url=urllib.parse.urljoin(response.url, href),
headers={"Referer": response.url},
meta={"version": response.meta[
"version"], "product": product},
callback=self.parse_url)
elif any(href.endswith(x) for x in [".bin", ".elf", ".fdt", ".imx", ".chk", ".trx"]):
item = FirmwareLoader(
item=FirmwareImage(), response=response, date_fmt=["%d-%b-%Y"])
item.add_value("version", response.meta["version"])
item.add_value("url", href)
item.add_value("date", item.find_date(
link.xpath("following::text()").extract()))
item.add_value("product", response.meta["product"])
item.add_value("vendor", self.name)
yield item.load_item()
|
mit
| 7,624,054,713,847,366,000 | 39.416667 | 126 | 0.51299 | false | 4.299645 | false | false | false |
kozistr/Awesome-GANs
|
awesome_gans/discogan/discogan_train.py
|
1
|
4150
|
import sys
import time
import tensorflow as tf
import awesome_gans.discogan.discogan_model as discogan
import awesome_gans.image_utils as iu
from awesome_gans.datasets import Pix2PixDataSet as DataSets
# import numpy as np
sys.path.insert(0, '../')
results = {'sample_output': './gen_img/', 'model': './model/DiscoGAN-model.ckpt'}
paras = {'epoch': 200, 'batch_size': 64, 'logging_interval': 5}
def main():
start_time = time.time() # clocking start
# Dataset
dataset = DataSets(height=64, width=64, channel=3, ds_path='D:/DataSets/pix2pix/', ds_name="vangogh2photo")
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# DiscoGAN model
model = discogan.DiscoGAN(s)
# load model & graph & weight
global_step = 0
ckpt = tf.train.get_checkpoint_state('./model/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model.saver.restore(s, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print("[+] global step : %s" % global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
# initializing variables
tf.global_variables_initializer().run()
d_overpowered = False # G loss > D loss * 2
for epoch in range(paras['epoch']):
for step in range(1000):
offset_a = (step * paras['batch_size']) % (dataset.images_a.shape[0] - paras['batch_size'])
offset_b = (step * paras['batch_size']) % (dataset.images_b.shape[0] - paras['batch_size'])
# batch data set
batch_a = dataset.images_a[offset_a : (offset_a + paras['batch_size']), :]
batch_b = dataset.images_b[offset_b : (offset_b + paras['batch_size']), :]
# update D network
if not d_overpowered:
s.run(model.d_op, feed_dict={model.A: batch_a})
# update G network
s.run(model.g_op, feed_dict={model.B: batch_b})
if epoch % paras['logging_interval'] == 0:
d_loss, g_loss, summary = s.run(
[model.d_loss, model.g_loss, model.merged], feed_dict={model.A: batch_a, model.B: batch_b}
)
# print loss
print(
"[+] Epoch %03d Step %04d => " % (epoch, global_step),
" D loss : {:.8f}".format(d_loss),
" G loss : {:.8f}".format(g_loss),
)
# update overpowered
d_overpowered = d_loss < g_loss / 2.0
# training G model with sample image and noise
ab_samples = s.run(model.G_s2b, feed_dict={model.A: batch_a})
ba_samples = s.run(model.G_b2s, feed_dict={model.B: batch_b})
# summary saver
model.writer.add_summary(summary, global_step=global_step)
# export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_ab_dir = results['sample_output'] + 'train_A_{0}_{1}.png'.format(epoch, global_step)
sample_ba_dir = results['sample_output'] + 'train_B_{0}_{1}.png'.format(epoch, global_step)
# Generated image save
iu.save_images(ab_samples, size=[sample_image_height, sample_image_width], image_path=sample_ab_dir)
iu.save_images(ba_samples, size=[sample_image_height, sample_image_width], image_path=sample_ba_dir)
# model save
model.saver.save(s, results['model'], global_step=global_step)
end_time = time.time() - start_time
# elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# close tf.Session
s.close()
if __name__ == '__main__':
main()
|
mit
| -2,482,674,350,346,778,000 | 37.425926 | 120 | 0.536145 | false | 3.649956 | false | false | false |
speksofdust/BeyondDreams
|
beyonddreams/char/attribs.py
|
1
|
3234
|
# ---------------------------------------------------------------------------- #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ---------------------------------------------------------------------------- #
from pdict import ChildPdict
from .. import bd
class CharAttrib:
__slots__ = ("_parent",)
"""Base class for all character attributes."""
@property
def char(self):
"""The char this attribute belongs to."""
try: return self._parent.char
except: return self._parent # parent is char
@property
def base(self):
"""Defaults for the char which this attribute belongs to."""
return self._parent.base
class CharAttrDict(ChildPDict, CharAttrib):
"""CharAttrib dict type for use with Char attributes and sub attributes."""
__slots__ = ChildPDict.__slots__
class Equip(CharAttrib):
__slots__ = CharAttrib.__slots__ + "_slots"
def __init__(self, char, slots):
self._char = char
self._slots = {}
def __bool__(self): return len(self._slots) > 0
def __len__(self): return len(self._slots)
def __iter__(self): return iter(self._slots)
def __contains__(self, i): return i in self._slots
def __getitem__(self, i): return self._slots[i]
class Body(CharAttrib):
__slots__ = CharAttrib.__slots__ + "_subparts", "_attribs", "_mesh"
def __init__(self, char):
self._char = char
self._subparts = {}
self._attribs = {}
self._mesh = None
#bd.datapath() TODO
@property
def subparts(self):
return self._subparts
@property
def attribs(self):
return self._attribs
class Stats(CharAttrib):
__slots__ = CharAttrib.__slots__
def __init__(self, char):
self._char = char
def base_stats(self):
"""The base stats."""
return self._char._base._stats
class StatusEffects(CharAttrib):
__slots__ = CharAttrib.__slots__
def __init__(self, char):
self._char = char
def base_statuseffects(self):
"""The base status effects."""
return self._char._base._statuseffects
|
gpl-3.0
| 6,986,478,758,046,898,000 | 33.774194 | 80 | 0.497217 | false | 4.67341 | false | false | false |
romeric/Fastor
|
benchmark/external/benchmark_inverse/benchmark_plot.py
|
1
|
1615
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino'],'size':14})
rc('text', usetex=True)
def read_results():
ms, ns, times_eigen, times_fastor = [], [], [], []
with open("benchmark_results.txt", "r") as f:
lines = f.readlines()
for line in lines:
sline = line.split(' ')
if len(sline) == 4:
times_eigen.append(float(sline[1]))
times_fastor.append(float(sline[2]))
elif len(sline) == 7 and "size" in sline[1]:
ms.append(int(sline[4]))
ns.append(int(sline[5]))
return np.array(ms), np.array(ns), np.array(times_eigen), np.array(times_fastor)
def main():
ms, ns, times_eigen, times_fastor = read_results()
fig, ax = plt.subplots()
index = np.arange(len(ms))
bar_width = 0.2
opacity = 0.8
rects1 = plt.bar(index, times_eigen/1e-6, bar_width,
alpha=opacity,
color='#C03B22',
label='Eigen')
rects3 = plt.bar(index + bar_width, times_fastor/1e-6, bar_width,
alpha=opacity,
color='#E98604',
label='Fastor')
xticks = [str(dim[0]) + 'x' + str(dim[1]) for dim in zip(ms,ns)]
plt.xlabel('(M,M)')
plt.ylabel('Time ($\mu$sec)')
plt.title("B = inv(A)")
plt.xticks(index, xticks, rotation=45)
plt.legend()
plt.tight_layout()
plt.grid(True)
# plt.savefig('benchmark_inverse_single.png', format='png', dpi=300)
# plt.savefig('benchmark_inverse_single.png', format='png', dpi=300)
plt.show()
if __name__ == "__main__":
main()
|
mit
| -533,837,418,136,971,400 | 26.389831 | 84 | 0.573994 | false | 3.05293 | false | false | false |
Bitcoin-ABC/bitcoin-abc
|
test/functional/abc-replay-protection.py
|
1
|
11337
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This test checks activation of UAHF and the different consensus
related to this activation.
It is derived from the much more complex p2p-fullblocktest.
"""
import time
from test_framework.blocktools import (
create_block,
create_coinbase,
create_tx_with_script,
make_conform_to_ctor,
)
from test_framework.key import ECKey
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
ToHex,
)
from test_framework.p2p import P2PDataStore
from test_framework.script import (
CScript,
OP_CHECKSIG,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_FORKID,
SignatureHashForkId,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
# far into the future
REPLAY_PROTECTION_START_TIME = 2000000000
# Error due to invalid signature
RPC_INVALID_SIGNATURE_ERROR = "mandatory-script-verify-flag-failed (Signature must be zero for failed CHECK(MULTI)SIG operation)"
class PreviousSpendableOutput(object):
def __init__(self, tx=CTransaction(), n=-1):
self.tx = tx
self.n = n
class ReplayProtectionTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.block_heights = {}
self.tip = None
self.blocks = {}
self.extra_args = [['[email protected]',
"-replayprotectionactivationtime={}".format(
REPLAY_PROTECTION_START_TIME),
"-acceptnonstdtxn=1"]]
def next_block(self, number):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height)
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
# Do PoW, which is cheap on regnet
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def run_test(self):
node = self.nodes[0]
node.add_p2p_connection(P2PDataStore())
node.setmocktime(REPLAY_PROTECTION_START_TIME)
self.genesis_hash = int(node.getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
block.vtx.extend(new_transactions)
old_sha256 = block.sha256
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[
block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# shorthand
block = self.next_block
# Create a new block
block(0)
save_spendable_output()
node.p2p.send_blocks_and_test([self.tip], node)
# Now we need that block to mature so we can spend the coinbase.
maturity_blocks = []
for i in range(99):
block(5000 + i)
maturity_blocks.append(self.tip)
save_spendable_output()
node.p2p.send_blocks_and_test(maturity_blocks, node)
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(100):
out.append(get_spendable_output())
# Generate a key pair to test P2SH sigops count
private_key = ECKey()
private_key.generate()
public_key = private_key.get_pubkey().get_bytes()
# This is a little handier to use than the version in blocktools.py
def create_fund_and_spend_tx(spend, forkvalue=0):
# Fund transaction
script = CScript([public_key, OP_CHECKSIG])
txfund = create_tx_with_script(
spend.tx, spend.n, b'', amount=50 * COIN - 1000, script_pub_key=script)
txfund.rehash()
# Spend transaction
txspend = CTransaction()
txspend.vout.append(CTxOut(50 * COIN - 2000, CScript([OP_TRUE])))
txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b''))
# Sign the transaction
sighashtype = (forkvalue << 8) | SIGHASH_ALL | SIGHASH_FORKID
sighash = SignatureHashForkId(
script, txspend, 0, sighashtype, 50 * COIN - 1000)
sig = private_key.sign_ecdsa(sighash) + \
bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))
txspend.vin[0].scriptSig = CScript([sig])
txspend.rehash()
return [txfund, txspend]
def send_transaction_to_mempool(tx):
tx_id = node.sendrawtransaction(ToHex(tx))
assert tx_id in set(node.getrawmempool())
return tx_id
# Before the fork, no replay protection required to get in the mempool.
txns = create_fund_and_spend_tx(out[0])
send_transaction_to_mempool(txns[0])
send_transaction_to_mempool(txns[1])
# And txns get mined in a block properly.
block(1)
update_block(1, txns)
node.p2p.send_blocks_and_test([self.tip], node)
# Replay protected transactions are rejected.
replay_txns = create_fund_and_spend_tx(out[1], 0xffdead)
send_transaction_to_mempool(replay_txns[0])
assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR,
node.sendrawtransaction, ToHex(replay_txns[1]))
# And block containing them are rejected as well.
block(2)
update_block(2, replay_txns)
node.p2p.send_blocks_and_test(
[self.tip], node, success=False, reject_reason='blk-bad-inputs')
# Rewind bad block
tip(1)
# Create a block that would activate the replay protection.
bfork = block(5555)
bfork.nTime = REPLAY_PROTECTION_START_TIME - 1
update_block(5555, [])
node.p2p.send_blocks_and_test([self.tip], node)
activation_blocks = []
for i in range(5):
block(5100 + i)
activation_blocks.append(self.tip)
node.p2p.send_blocks_and_test(activation_blocks, node)
# Check we are just before the activation time
assert_equal(
node.getblockchaininfo()['mediantime'],
REPLAY_PROTECTION_START_TIME - 1)
# We are just before the fork, replay protected txns still are rejected
assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR,
node.sendrawtransaction, ToHex(replay_txns[1]))
block(3)
update_block(3, replay_txns)
node.p2p.send_blocks_and_test(
[self.tip], node, success=False, reject_reason='blk-bad-inputs')
# Rewind bad block
tip(5104)
# Send some non replay protected txns in the mempool to check
# they get cleaned at activation.
txns = create_fund_and_spend_tx(out[2])
send_transaction_to_mempool(txns[0])
tx_id = send_transaction_to_mempool(txns[1])
# Activate the replay protection
block(5556)
node.p2p.send_blocks_and_test([self.tip], node)
# Check we just activated the replay protection
assert_equal(
node.getblockchaininfo()['mediantime'],
REPLAY_PROTECTION_START_TIME)
# Non replay protected transactions are not valid anymore,
# so they should be removed from the mempool.
assert tx_id not in set(node.getrawmempool())
# Good old transactions are now invalid.
send_transaction_to_mempool(txns[0])
assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR,
node.sendrawtransaction, ToHex(txns[1]))
# They also cannot be mined
block(4)
update_block(4, txns)
node.p2p.send_blocks_and_test(
[self.tip], node, success=False, reject_reason='blk-bad-inputs')
# Rewind bad block
tip(5556)
# The replay protected transaction is now valid
replay_tx0_id = send_transaction_to_mempool(replay_txns[0])
replay_tx1_id = send_transaction_to_mempool(replay_txns[1])
# Make sure the transaction are ready to be mined.
tmpl = node.getblocktemplate()
found_id0 = False
found_id1 = False
for txn in tmpl['transactions']:
txid = txn['txid']
if txid == replay_tx0_id:
found_id0 = True
elif txid == replay_tx1_id:
found_id1 = True
assert found_id0 and found_id1
# And the mempool is still in good shape.
assert replay_tx0_id in set(node.getrawmempool())
assert replay_tx1_id in set(node.getrawmempool())
# They also can also be mined
block(5)
update_block(5, replay_txns)
node.p2p.send_blocks_and_test([self.tip], node)
# Ok, now we check if a reorg work properly across the activation.
postforkblockid = node.getbestblockhash()
node.invalidateblock(postforkblockid)
assert replay_tx0_id in set(node.getrawmempool())
assert replay_tx1_id in set(node.getrawmempool())
# Deactivating replay protection.
forkblockid = node.getbestblockhash()
node.invalidateblock(forkblockid)
# The funding tx is not evicted from the mempool, since it's valid in
# both sides of the fork
assert replay_tx0_id in set(node.getrawmempool())
assert replay_tx1_id not in set(node.getrawmempool())
# Check that we also do it properly on deeper reorg.
node.reconsiderblock(forkblockid)
node.reconsiderblock(postforkblockid)
node.invalidateblock(forkblockid)
assert replay_tx0_id in set(node.getrawmempool())
assert replay_tx1_id not in set(node.getrawmempool())
if __name__ == '__main__':
ReplayProtectionTest().main()
|
mit
| 787,828,342,569,320,100 | 34.428125 | 129 | 0.61242 | false | 3.770203 | true | false | false |
scality/ScalitySproxydSwift
|
test/scenario/multi-backend/fabfile/saio.py
|
1
|
3713
|
import os
import os.path
import fabric.contrib.files
from fabric.api import sudo
from utils import build_object_ring, render
def disk_setup(swift_user):
# Setup a loopdevice to act as disk for swift
sudo('mkdir -p /srv')
sudo('truncate -s 1GB /srv/swift-disk')
sudo('mkfs.xfs /srv/swift-disk')
fabric.contrib.files.append(
filename='/etc/fstab',
text='/srv/swift-disk /mnt/sdb1 xfs loop,noatime 0 0',
use_sudo=True
)
sudo('mkdir /mnt/sdb1')
sudo('mount /mnt/sdb1')
# Prepare directory structure for 4 swift nodes, with two "partitions" each
node_mkdir = 'mkdir -p /mnt/sdb1/{0:d}/node/sdb{1:d}'
num_nodes = 4
for i in range(1, num_nodes + 1):
sudo(node_mkdir.format(i, i))
sudo(node_mkdir.format(i, i + num_nodes))
sudo('ln -s /mnt/sdb1/{0:d} /srv/{1:d}'.format(i, i))
sudo('mkdir /var/cache/swift{0:d}'.format(i))
sudo('chown -R {0:s}: /mnt/sdb1'.format(swift_user))
sudo('mkdir /var/run/swift')
sudo('chown {0:s}: /var/run/swift /var/cache/swift*'.format(swift_user))
render(
directory='assets/saio/phase1/etc',
filenames=['rc.local'],
local_path_prefix='assets/saio/phase1',
content={'user': swift_user},
)
sudo('chmod 755 /etc/rc.local')
sudo('chown root: /etc/rc.local')
def install(swift_user):
sudo('pip install '
'git+https://github.com/openstack/[email protected]')
sudo('pip install git+https://github.com/openstack/[email protected]')
content = {
'user': swift_user,
'group': swift_user,
}
for path, _, filenames in os.walk('assets/saio/phase1/etc/swift'):
render(path, filenames, 'assets/saio/phase1', content)
sudo('chown -R {0:s}: /etc/swift'.format(swift_user))
def build_rings(swift_user):
# Account ring
build_object_ring(
swift_user=swift_user,
name='account.builder',
devices=[
'r1z1-127.0.0.1:6012/sdb1',
'r1z2-127.0.0.1:6022/sdb2',
'r1z3-127.0.0.1:6032/sdb3',
'r1z4-127.0.0.1:6042/sdb4',
],
)
# Container ring
build_object_ring(
swift_user=swift_user,
name='container.builder',
devices=[
'r1z1-127.0.0.1:6011/sdb1',
'r1z2-127.0.0.1:6021/sdb2',
'r1z3-127.0.0.1:6031/sdb3',
'r1z4-127.0.0.1:6041/sdb4',
],
)
# Object ring
build_object_ring(
swift_user=swift_user,
name='object.builder',
devices=[
'r1z1-127.0.0.1:6010/sdb1',
'r1z1-127.0.0.1:6010/sdb5',
'r1z2-127.0.0.1:6020/sdb2',
'r1z2-127.0.0.1:6020/sdb6',
'r1z3-127.0.0.1:6030/sdb3',
'r1z3-127.0.0.1:6030/sdb7',
'r1z4-127.0.0.1:6040/sdb4',
'r1z4-127.0.0.1:6040/sdb8',
],
)
def setup_rsync(swift_user):
render(
directory='assets/saio/phase1/etc',
filenames=['rsyncd.conf'],
local_path_prefix='assets/saio/phase1',
content={'user': swift_user, 'group': swift_user},
)
fabric.contrib.files.sed(
filename='/etc/default/rsync',
before='RSYNC_ENABLE=false',
after='RSYNC_ENABLE=true',
use_sudo=True,
)
sudo('sudo service rsync restart')
def install_scality_swift():
sudo('pip install '
'git+https://github.com/scality/scality-sproxyd-client.git')
sudo('pip install git+https://github.com/scality/ScalitySproxydSwift.git')
def start(swift_user):
sudo('swift-init main start', user=swift_user)
def stop(swift_user):
sudo('swift-init main stop', user=swift_user)
|
apache-2.0
| 453,750,375,810,706,200 | 27.128788 | 79 | 0.578508 | false | 2.900781 | false | false | false |
gwu-libraries/sfm-ui
|
sfm/sfm/settings/test_settings.py
|
1
|
1817
|
from sfm.settings.common import *
import tempfile
import os
DATABASES = {
# for unit tests
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'testdb'
}
}
SFM_DB_DATA_DIR = os.path.join(tempfile.gettempdir(), "test-data")
SFM_MQ_DATA_DIR = os.path.join(tempfile.gettempdir(), "test-data")
SFM_EXPORT_DATA_DIR = os.path.join(tempfile.gettempdir(), "test-data")
SFM_CONTAINERS_DATA_DIR = os.path.join(tempfile.gettempdir(), "test-data")
SFM_COLLECTION_SET_DATA_DIR = os.path.join(tempfile.gettempdir(), "test-data")
SCHEDULER_DB_URL = "sqlite:///testdb"
SCHEDULE_HARVESTS = False
PERFORM_EXPORTS = False
PERFORM_EMAILS = False
PERFORM_USER_HARVEST_EMAILS = False
PERFORM_SERIALIZE = False
ADMINS = [("sfmadmin", "[email protected]")]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(process)d %(name)s %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'apscheduler': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'ui': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'message_consumer': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
|
mit
| -1,005,177,759,540,445,600 | 23.226667 | 82 | 0.519538 | false | 3.555773 | true | false | false |
mikexine/tweetset
|
tweetset/collect/views.py
|
1
|
6251
|
from django.shortcuts import render, get_object_or_404, redirect
from django.http import Http404, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.contrib import auth
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.views import login as login_view
from django.contrib.auth.models import User
from django.contrib import messages
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from collect.models import Collection
from django.db.models import Count
from collect.utils import pagination_helper
from collect.forms import CollectionForm
from django.utils.text import slugify
import json
import gzip
import csv
from collect.utils import flatten
def encode_if_string(s):
try:
return s.encode('utf-8')
except:
return s
@login_required
def download_csv(request, collection_id):
c = get_object_or_404(Collection,pk=collection_id,user=request.user)
response = HttpResponse(content_type='application/gzip')
response['Content-Disposition'] = 'attachment; filename="'+slugify(c.name)+'.csv.gz"'
with gzip.GzipFile(fileobj=response, mode="w") as f:
list_of_tweets = []
for t in c.tweets.all():
list_of_tweets.append(flatten(t.data))
if len(list_of_tweets) > 0:
writer = csv.DictWriter(f,
['id', 'text', 'retweeted', 'created_at',
'user_id', 'user_screen_name'],
extrasaction='ignore', dialect='excel')
writer.writeheader()
for t in list_of_tweets:
writer.writerow({k: encode_if_string(v) for k, v in t.items()})
return response
@login_required
def download_json(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
print c.tweets.all()
response = HttpResponse(content_type='application/gzip')
response['Content-Disposition'] = 'attachment; filename="' + slugify(c.name) + '.json.gz"'
list_of_tweets = []
for t in c.tweets.all():
list_of_tweets.append(t.data)
with gzip.GzipFile(fileobj=response, mode="w") as f:
f.write(json.dumps(list_of_tweets, indent=4))
return response
@login_required
def map(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
return render(request, "collect/stats/map.html", {
'collection': c,
'collection_id': collection_id
})
@login_required
def time_chart(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
return render(request, "collect/stats/time_chart.html", {
'collection': c,
'collection_id': collection_id
})
@login_required
def frequencies(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
return render(request, "collect/stats/frequencies.html", {
'collection': c,
'collection_id': collection_id
})
@login_required
def tweets(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
page = request.GET.get('page', 1)
tweets, show_first, show_last, page_numbers = pagination_helper(object_list=c.tweets.all(), page=page, per_page=25, allow_empty_first_page=True)
return render(request, 'collect/tweets.html', {
'collection': c,
'tweets': tweets,
'show_first': show_first,
'show_last': show_last,
'page_numbers': page_numbers, })
@login_required
def edit_collection(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
if request.method == 'POST':
form = CollectionForm(request.POST, instance=c)
if form.is_valid():
new_collection = form.save(commit=False)
new_collection.save()
return redirect('dashboard')
else:
form = CollectionForm(instance=c)
return render(request, 'collect/edit_collection.html',
{'collection': c, 'form': form, })
@login_required
def new_collection(request):
if request.method == 'POST':
form = CollectionForm(request.POST)
if form.is_valid():
new_collection = form.save(commit=False)
new_collection.user = request.user
new_collection.save()
return redirect('dashboard')
else:
form = CollectionForm()
return render(request, 'collect/new_collection.html', {'form': form, })
@login_required
def stop_collection(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
c.stop()
return redirect('dashboard')
@login_required
def start_collection(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
if c.start():
messages.success(request, "Collection successfully started!")
else:
messages.error(request, "Collection could not be started.")
return redirect('dashboard')
@login_required
def make_stats(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
a = c.mstats()
print a
if a:
messages.success(request, "Stats will be available soon!")
else:
messages.error(request, "Err.. Stats could not be started.")
return redirect('dashboard')
@login_required
def delete_collection(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
c.delete()
return redirect('dashboard')
@login_required
def dashboard(request):
collections = Collection.objects.filter(user=request.user).annotate(num_tweets=Count('tweets'))
return render(request, 'collect/dashboard.html',
{'collections': collections, })
def index(request):
return render(request, 'collect/index.html')
def contacts(request):
return render(request, 'collect/contacts.html')
def collect_login(request, *args, **kwargs):
return login_view(request, *args, **kwargs)
|
mit
| -1,877,005,152,931,094,800 | 32.25 | 148 | 0.666773 | false | 3.705394 | false | false | false |
HardLight/denyhosts
|
DenyHosts/sync.py
|
1
|
7559
|
import logging
import os
import time
import sys
import socket
import requests
if sys.version_info < (3, 0):
from xmlrpclib import ServerProxy
else:
from xmlrpc.client import ServerProxy, Transport, ProtocolError
from .constants import SYNC_TIMESTAMP, SYNC_HOSTS, SYNC_HOSTS_TMP, SYNC_RECEIVED_HOSTS, SOCKET_TIMEOUT
logger = logging.getLogger("sync")
debug, info, error, exception = logger.debug, logger.info, logger.error, logger.exception
def get_plural(items):
if len(items) != 1:
return "s"
else:
return ""
if sys.version_info >= (3, 0):
class RequestsTransport(Transport):
def request(self, host, handler, data, verbose=False):
# set the headers, including the user-agent
headers = {"User-Agent": "my-user-agent",
"Content-Type": "text/xml",
"Accept-Encoding": "gzip"}
url = "http://%s%s" % (host, handler)
response = None
try:
response = requests.post(url, data=data, headers=headers, timeout=SOCKET_TIMEOUT)
response.raise_for_status()
except requests.RequestException as e:
if response is None:
exception(ProtocolError(url, 500, str(e), ""))
else:
exception(ProtocolError(
url,
response.status_code,
str(e),
response.headers
))
if response is not None:
return self.parse_response(response)
return response
def parse_response(self, resp):
"""
Parse the xmlrpc response.
"""
p, u = self.getparser()
p.feed(resp.text)
p.close()
return u.close()
class Sync(object):
def __init__(self, prefs):
self.__prefs = prefs
self.__work_dir = prefs.get('WORK_DIR')
self.__connected = False
self.__hosts_added = []
self.__server = None
self.__default_timeout = socket.getdefaulttimeout()
self.__pymajor_version = sys.version_info[0]
self.__sync_server = self.__prefs.get('SYNC_SERVER')
def xmlrpc_connect(self):
debug("xmlrpc_conect()")
# python 2
if self.__pymajor_version == 2:
socket.setdefaulttimeout(SOCKET_TIMEOUT) # set global socket timeout
for i in range(0, 3):
debug("XMLRPC Connection attempt: %d" % i)
try:
# python 2
if self.__pymajor_version == 2:
self.__server = ServerProxy(self.__sync_server)
else:
self.__server = ServerProxy(self.__sync_server, transport=RequestsTransport())
debug("Connected To SYNC Server")
self.__connected = True
break
except Exception as e:
error(str(e))
self.__connected = False
time.sleep(30)
if not self.__connected:
error('Failed to connect to %s after 3 attempts' % self.__sync_server)
# python 2
if self.__pymajor_version == 2:
socket.setdefaulttimeout(self.__default_timeout) # set timeout back to the default
return self.__connected
def xmlrpc_disconnect(self):
if self.__connected:
try:
# self.__server.close()
self.__server = None
except Exception:
pass
self.__connected = False
def get_sync_timestamp(self):
timestamp = 0
try:
with open(os.path.join(self.__work_dir, SYNC_TIMESTAMP)) as fp:
line = fp.readline().strip()
if len(line) > 0:
timestamp = int(line)
return timestamp
return timestamp
except Exception as e:
error(str(e))
return 0
def set_sync_timestamp(self, timestamp):
try:
with open(os.path.join(self.__work_dir, SYNC_TIMESTAMP), "w") as fp:
fp.write(timestamp)
except Exception as e:
error(e)
def send_new_hosts(self):
debug("send_new_hosts()")
self.__hosts_added = []
try:
src_file = os.path.join(self.__work_dir, SYNC_HOSTS)
dest_file = os.path.join(self.__work_dir, SYNC_HOSTS_TMP)
os.rename(src_file, dest_file)
except OSError:
return False
hosts = []
with open(dest_file, 'r') as fp:
# less memory usage than using readlines()
for line in fp:
hosts.append(line.strip())
try:
self.__send_new_hosts(hosts)
info("sent %d new host%s", len(hosts), get_plural(hosts))
self.__hosts_added = hosts
except Exception:
os.rename(dest_file, src_file)
return False
try:
os.unlink(dest_file)
except OSError:
pass
return True
def __send_new_hosts(self, hosts):
debug("__send_new_hosts()")
if not self.__connected and not self.xmlrpc_connect():
error("Could not initiate xmlrpc connection")
return
for i in range(0, 3):
try:
self.__server.add_hosts(hosts)
break
except Exception as e:
exception(e)
time.sleep(30)
def receive_new_hosts(self):
debug("receive_new_hosts()")
data = self.__receive_new_hosts()
if data is None:
return None
try:
timestamp = data['timestamp']
self.set_sync_timestamp(timestamp)
hosts = data['hosts']
info("received %d new host%s", len(hosts), get_plural(hosts))
debug("hosts added %s", hosts)
self.__save_received_hosts(hosts, timestamp)
return hosts
except Exception as e:
exception(e)
return None
def __receive_new_hosts(self):
debug("__receive_new_hosts()")
if not self.__connected and not self.xmlrpc_connect():
error("Could not initiate xmlrpc connection")
return
timestamp = self.get_sync_timestamp()
sync_dl_threshold = self.__prefs.get("SYNC_DOWNLOAD_THRESHOLD")
sync_dl_resiliency = self.__prefs.get("SYNC_DOWNLOAD_RESILIENCY")
data = None
for i in range(0, 3):
try:
data = self.__server.get_new_hosts(
timestamp,
sync_dl_threshold,
self.__hosts_added,
sync_dl_resiliency
)
break
except Exception as e:
exception(e)
pass
time.sleep(30)
if data is None:
error('Unable to retrieve data from the sync server')
return data
def __save_received_hosts(self, hosts, timestamp):
debug('__save_received_hosts()')
try:
timestr = time.ctime(float(timestamp))
with open(os.path.join(self.__work_dir, SYNC_RECEIVED_HOSTS), "a") as fp:
for host in hosts:
fp.write("%s:%s\n" % (host, timestr))
except IOError as e:
error(e)
return
finally:
fp.close()
|
gpl-2.0
| 7,353,463,990,728,281,000 | 31.165957 | 102 | 0.513957 | false | 4.436033 | false | false | false |
susi/angya
|
widgets/nav.py
|
1
|
1162
|
"""This module defines the left navigation and its buttons."""
import flask
from google.appengine.api import users
class Navigation(object):
"""The Navigation returns information to render the nav menu buttons."""
def __init__(self, app):
self.app = app
def render(self):
"""Returns a json map of the buttons for the navigation bar."""
buttons = [
{'name': 'close',
'url': 'javascript:closeNavigation()',
'hint': 'close navigation'},
{'name': 'list',
'url': 'javascript:tripmanager.listTrips()',
'hint': 'my trips list'},
{'name': 'edit',
'url': 'javascript:tripmanager.createTrip()',
'hint': 'create trip'},
{'name': 'marker',
'url': 'javascript:tripmanager.addPlace()',
'hint': 'add place to trip'},
{'name': 'map-type',
'url': 'javascript:swapMapType()',
'hint': 'change map type'},
]
widget = {
'name': 'left-nav',
'buttons': buttons,
'js': flask.url_for('static', filename='js/widgets/nav.js'),
'css': flask.url_for('static', filename='css/widgets/nav.css')
}
return flask.jsonify(**widget)
|
apache-2.0
| 3,088,551,322,639,043,600 | 28.794872 | 74 | 0.583477 | false | 3.785016 | false | false | false |
CodeReclaimers/neat-python
|
examples/xor/visualize.py
|
1
|
5915
|
from __future__ import print_function
import copy
import warnings
import graphviz
import matplotlib.pyplot as plt
import numpy as np
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
stdev_fitness = np.array(statistics.get_fitness_stdev())
plt.plot(generation, avg_fitness, 'b-', label="average")
plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
def plot_spikes(spikes, view=False, filename=None, title=None):
""" Plots the trains for a single spiking neuron. """
t_values = [t for t, I, v, u, f in spikes]
v_values = [v for t, I, v, u, f in spikes]
u_values = [u for t, I, v, u, f in spikes]
I_values = [I for t, I, v, u, f in spikes]
f_values = [f for t, I, v, u, f in spikes]
fig = plt.figure()
plt.subplot(4, 1, 1)
plt.ylabel("Potential (mv)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, v_values, "g-")
if title is None:
plt.title("Izhikevich's spiking neuron model")
else:
plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
plt.subplot(4, 1, 2)
plt.ylabel("Fired")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, f_values, "r-")
plt.subplot(4, 1, 3)
plt.ylabel("Recovery (u)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, u_values, "r-")
plt.subplot(4, 1, 4)
plt.ylabel("Current (I)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, I_values, "r-o")
if filename is not None:
plt.savefig(filename)
if view:
plt.show()
plt.close()
fig = None
return fig
def plot_species(statistics, view=False, filename='speciation.svg'):
""" Visualizes speciation throughout evolution. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
species_sizes = statistics.get_species_sizes()
num_generations = len(species_sizes)
curves = np.array(species_sizes).T
fig, ax = plt.subplots()
ax.stackplot(range(num_generations), *curves)
plt.title("Speciation")
plt.ylabel("Size per Species")
plt.xlabel("Generations")
plt.savefig(filename)
if view:
plt.show()
plt.close()
def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
node_colors=None, fmt='svg'):
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled', 'shape': 'box', 'fillcolor': node_colors.get(k, 'lightgray')}
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled', 'fillcolor': node_colors.get(k, 'lightblue')}
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add((cg.in_node_id, cg.out_node_id))
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled',
'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
#if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
|
bsd-3-clause
| 8,867,354,121,167,928,000 | 29.025381 | 112 | 0.597464 | false | 3.432966 | false | false | false |
jdgwartney/boundary-einstein-python
|
bootstrap.py
|
1
|
2028
|
#!/usr/bin/env python
import os
import shutil
import sys
import subprocess
import tarfile
import urllib
class Bootstrap:
def __init__(self,
version="12.0.4",
base='http://pypi.python.org/packages/source/v/virtualenv',
python="python2",
env="py",
requirements="requirements.txt"):
self.version = version
self.base = base
self.python = python
self.env = env
self.dirname = 'virtualenv-' + self.version
self.tgz_file = self.dirname + '.tar.gz'
self.venv_url = self.base + '/' + self.tgz_file
self.requirements=requirements
def shellcmd(self,cmd,echo=False):
""" Run 'cmd' in the shell and return its standard out.
"""
if echo: print '[cmd] {0}'.format(cmd)
out = subprocess.check_output(cmd,stderr=sys.stderr,shell=True)
if echo: print out
return out
def download(self):
""" Fetch virtualenv from PyPI
"""
urllib.urlretrieve(self.venv_url,self.tgz_file)
def extract(self):
""" Untar
"""
tar = tarfile.open(self.tgz_file,"r:gz")
tar.extractall()
def create(self):
""" Create the initial env
"""
self.shellcmd('{0} {1}/virtualenv.py {2}'.format(self.python,self.dirname,self.env))
def install(self):
"""Install the virtualenv package itself into the initial env
"""
self.shellcmd('{0}/bin/pip install {1}'.format(self.env,self.tgz_file))
def install_libs(self):
"""Install the virtualenv package itself into the initial env
"""
self.shellcmd('{0}/bin/pip install -r {1}'.format(self.env,self.requirements))
def cleanup(self):
""" Cleanup
"""
os.remove(self.tgz_file)
shutil.rmtree(self.dirname)
def setup(self):
"""Bootraps a python environment
"""
self.download()
self.extract()
self.create()
self.install()
self.cleanup()
if os.path.isfile(self.requirements):
self.install_libs()
if __name__ == "__main__":
bootstrap = Bootstrap()
bootstrap.setup()
|
apache-2.0
| -8,298,653,270,635,625,000 | 24.670886 | 88 | 0.622288 | false | 3.66065 | false | false | false |
Praxyk/Praxyk-DevOps
|
server/unittest/unit_test.py
|
1
|
2481
|
#!/bin/env python
import _fix_path_
import sys
import datetime
class UnitTest :
def maintest(self,name, desc, f) :
return self.logger.log_event(self.logclient, 'UNIT TEST', ("s" if f else "f"),
['Test Name', 'Description'],
(str(name), desc) )
# this is used for asserting actions are true that don't constiitute the main prupose of
# a test, but still needs to be logged and verified. Ex - a test that tries to update items
# in a database might need to login to the DB first, they would pass the result of the login
# attempt to this function, but the result of the updates to the maintest() function
def subtest(self,name, desc, f) :
return self.logger.log_event(self.logclient, 'SUB-TEST', ("s" if f else "f"),
['Test Name', 'Description'],
(str(name), desc) )
def logteststart(self, name, info="") :
return self.logger.log_event(self.logclient, 'UNIT TEST', 'a', ['Test Name', 'Info'], (name, info))
def loginfo(self, name, info) :
return self.logger.log_event(self.logclient, 'TEST-INFO', "i", ['Message'], str(info))
def loghead(self) :
title = self.title + ' UNIT TEST START '
exchar = '-'
logstr = '\n' + 30*exchar + title + 30*exchar + '\n'
logstr += '''Start Time : ''' + str(datetime.datetime.now()).split(' ')[1] + '\n'
for data in self.head_data :
logstr += 3*exchar+' [%s] \n' % data
logstr += 30*exchar + len(title)*exchar + 30*exchar + '\n'
self.logger.logblock(self.logclient, logstr)
def logtail(self, result) :
title = self.title + ' UNIT TEST FINISH '
exchar = '-'
logstr = '\n' + 30*exchar + title + 30*exchar + '\n'
logstr += 'End Time : ' + str(datetime.datetime.now()).split(' ')[1] + '\n'
logstr += 'Result : ' + str(result) + '\n'
for data in self.tail_data :
logstr += 3*exchar+' [%s] \n' % data
logstr += 30*exchar + len(title)*exchar + 30*exchar + '\n'
self.logger.logblock(self.logclient, logstr)
def __init__(self, testargs) : #logftest, testtbl, schema) :
self.passed = False
self.head_data
self.tail_data
self.title
self.logclient = testargs['logclient']
self.logger = testargs['logutil']
self.loghead()
|
gpl-2.0
| 1,886,044,161,479,319,300 | 43.303571 | 107 | 0.554212 | false | 3.504237 | true | false | false |
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/storage/tests/latest/test_storage_account_scenarios.py
|
1
|
114261
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import time
import unittest
from azure.cli.testsdk import (ScenarioTest, LocalContextScenarioTest, JMESPathCheck, ResourceGroupPreparer,
StorageAccountPreparer, api_version_constraint, live_only, LiveScenarioTest,
record_only)
from azure.cli.testsdk.decorators import serial_test
from azure.cli.core.profiles import ResourceType
from ..storage_test_util import StorageScenarioMixin
from knack.util import CLIError
from datetime import datetime, timedelta
from azure_devtools.scenario_tests import AllowLargeResponse
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2016-12-01')
class StorageAccountTests(StorageScenarioMixin, ScenarioTest):
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-06-01')
@ResourceGroupPreparer(name_prefix='cli_test_storage_service_endpoints')
def test_storage_account_service_endpoints(self, resource_group):
kwargs = {
'rg': resource_group,
'acc': self.create_random_name(prefix='cli', length=24),
'vnet': 'vnet1',
'subnet': 'subnet1'
}
self.cmd('storage account create -g {rg} -n {acc} --bypass Metrics --default-action Deny --https-only'.format(**kwargs),
checks=[
JMESPathCheck('networkRuleSet.bypass', 'Metrics'),
JMESPathCheck('networkRuleSet.defaultAction', 'Deny')])
self.cmd('storage account update -g {rg} -n {acc} --bypass Logging --default-action Allow'.format(**kwargs),
checks=[
JMESPathCheck('networkRuleSet.bypass', 'Logging'),
JMESPathCheck('networkRuleSet.defaultAction', 'Allow')])
self.cmd('storage account update -g {rg} -n {acc} --set networkRuleSet.default_action=deny'.format(**kwargs),
checks=[
JMESPathCheck('networkRuleSet.bypass', 'Logging'),
JMESPathCheck('networkRuleSet.defaultAction', 'Deny')])
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet}'.format(**kwargs))
self.cmd(
'network vnet subnet update -g {rg} --vnet-name {vnet} -n {subnet} --service-endpoints Microsoft.Storage'.format(
**kwargs))
self.cmd('storage account network-rule add -g {rg} --account-name {acc} --ip-address 25.1.2.3'.format(**kwargs))
# test network-rule add idempotent
self.cmd('storage account network-rule add -g {rg} --account-name {acc} --ip-address 25.1.2.3'.format(**kwargs))
self.cmd(
'storage account network-rule add -g {rg} --account-name {acc} --ip-address 25.2.0.0/24'.format(**kwargs))
self.cmd(
'storage account network-rule add -g {rg} --account-name {acc} --vnet-name {vnet} --subnet {subnet}'.format(
**kwargs))
self.cmd('storage account network-rule list -g {rg} --account-name {acc}'.format(**kwargs), checks=[
JMESPathCheck('length(ipRules)', 2),
JMESPathCheck('length(virtualNetworkRules)', 1)
])
# test network-rule add idempotent
self.cmd(
'storage account network-rule add -g {rg} --account-name {acc} --vnet-name {vnet} --subnet {subnet}'.format(
**kwargs))
self.cmd('storage account network-rule list -g {rg} --account-name {acc}'.format(**kwargs), checks=[
JMESPathCheck('length(ipRules)', 2),
JMESPathCheck('length(virtualNetworkRules)', 1)
])
self.cmd(
'storage account network-rule remove -g {rg} --account-name {acc} --ip-address 25.1.2.3'.format(**kwargs))
self.cmd(
'storage account network-rule remove -g {rg} --account-name {acc} --vnet-name {vnet} --subnet {subnet}'.format(
**kwargs))
self.cmd('storage account network-rule list -g {rg} --account-name {acc}'.format(**kwargs), checks=[
JMESPathCheck('length(ipRules)', 1),
JMESPathCheck('length(virtualNetworkRules)', 0)
])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2020-08-01-preview')
@ResourceGroupPreparer(name_prefix='cli_test_storage_service_endpoints')
@StorageAccountPreparer()
def test_storage_account_resource_access_rules(self, resource_group, storage_account):
self.kwargs = {
'rg': resource_group,
'sa': storage_account,
'rid1': "/subscriptions/a7e99807-abbf-4642-bdec-2c809a96a8bc/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1",
'rid2': "/subscriptions/a7e99807-abbf-4642-bdec-2c809a96a8bc/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2",
'rid3': "/subscriptions/a7e99807-abbf-4642-bdec-2c809a96a8bc/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3",
'tid1': "72f988bf-86f1-41af-91ab-2d7cd011db47",
'tid2': "72f988bf-86f1-41af-91ab-2d7cd011db47"
}
self.cmd(
'storage account network-rule add -g {rg} --account-name {sa} --resource-id {rid1} --tenant-id {tid1}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 1)
])
# test network-rule add idempotent
self.cmd(
'storage account network-rule add -g {rg} --account-name {sa} --resource-id {rid1} --tenant-id {tid1}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 1)
])
# test network-rule add more
self.cmd(
'storage account network-rule add -g {rg} --account-name {sa} --resource-id {rid2} --tenant-id {tid1}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 2)
])
self.cmd(
'storage account network-rule add -g {rg} --account-name {sa} --resource-id {rid3} --tenant-id {tid2}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 3)
])
# remove network-rule
self.cmd(
'storage account network-rule remove -g {rg} --account-name {sa} --resource-id {rid1} --tenant-id {tid1}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 2)
])
self.cmd(
'storage account network-rule remove -g {rg} --account-name {sa} --resource-id {rid2} --tenant-id {tid2}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 1)
])
@serial_test()
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-06-01')
@ResourceGroupPreparer(location='southcentralus')
def test_create_storage_account_with_assigned_identity(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
cmd = 'az storage account create -n {} -g {} --sku Standard_LRS --assign-identity'.format(name, resource_group)
result = self.cmd(cmd).get_output_in_json()
self.assertIn('identity', result)
self.assertTrue(result['identity']['principalId'])
self.assertTrue(result['identity']['tenantId'])
@serial_test()
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-06-01')
@ResourceGroupPreparer(location='southcentralus')
def test_update_storage_account_with_assigned_identity(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --sku Standard_LRS'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('identity', None)])
update_cmd = 'az storage account update -n {} -g {} --assign-identity'.format(name, resource_group)
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('identity', result)
self.assertTrue(result['identity']['principalId'])
self.assertTrue(result['identity']['tenantId'])
@AllowLargeResponse()
@ResourceGroupPreparer(parameter_name_for_location='location')
def test_create_storage_account(self, resource_group, location):
name = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {} --sku {} -l {}'.format(
name, resource_group, 'Standard_LRS', location))
self.cmd('storage account check-name --name {}'.format(name), checks=[
JMESPathCheck('nameAvailable', False),
JMESPathCheck('reason', 'AlreadyExists')
])
self.cmd('storage account list -g {}'.format(resource_group), checks=[
JMESPathCheck('[0].location', 'westus'),
JMESPathCheck('[0].sku.name', 'Standard_LRS'),
JMESPathCheck('[0].resourceGroup', resource_group)
])
self.cmd('az storage account show -n {} -g {}'.format(name, resource_group), checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', location),
JMESPathCheck('sku.name', 'Standard_LRS'),
JMESPathCheck('kind', 'StorageV2')
])
self.cmd('az storage account show -n {}'.format(name), checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', location),
JMESPathCheck('sku.name', 'Standard_LRS'),
JMESPathCheck('kind', 'StorageV2')
])
self.cmd('storage account show-connection-string -g {} -n {} --protocol http'.format(
resource_group, name), checks=[
JMESPathCheck("contains(connectionString, 'https')", False),
JMESPathCheck("contains(connectionString, '{}')".format(name), True)])
self.cmd('storage account update -g {} -n {} --tags foo=bar cat'
.format(resource_group, name),
checks=JMESPathCheck('tags', {'cat': '', 'foo': 'bar'}))
self.cmd('storage account update -g {} -n {} --sku Standard_GRS --tags'
.format(resource_group, name),
checks=[JMESPathCheck('tags', {}),
JMESPathCheck('sku.name', 'Standard_GRS')])
self.cmd('storage account update -g {} -n {} --set tags.test=success'
.format(resource_group, name),
checks=JMESPathCheck('tags', {'test': 'success'}))
self.cmd('storage account delete -g {} -n {} --yes'.format(resource_group, name))
self.cmd('storage account check-name --name {}'.format(name),
checks=JMESPathCheck('nameAvailable', True))
large_file_name = self.create_random_name(prefix='cli', length=24)
self.cmd('storage account create -g {} -n {} --sku {} --enable-large-file-share'.format(
resource_group, large_file_name, 'Standard_LRS'))
self.cmd('az storage account show -n {} -g {}'.format(large_file_name, resource_group), checks=[
JMESPathCheck('name', large_file_name),
JMESPathCheck('sku.name', 'Standard_LRS'),
JMESPathCheck('largeFileSharesState', 'Enabled')
])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(location='eastus2euap')
def test_create_storage_account_with_double_encryption(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {} --require-infrastructure-encryption'.format(
name, resource_group), checks=[
JMESPathCheck('name', name),
JMESPathCheck('encryption.requireInfrastructureEncryption', True)
])
self.cmd('az storage account show -n {} -g {}'.format(name, resource_group), checks=[
JMESPathCheck('name', name),
JMESPathCheck('encryption.requireInfrastructureEncryption', True)
])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-10-01')
@ResourceGroupPreparer(parameter_name_for_location='location', location='southcentralus')
def test_create_storage_account_v2(self, resource_group, location):
self.kwargs.update({
'name': self.create_random_name(prefix='cli', length=24),
'loc': location
})
self.cmd('storage account create -n {name} -g {rg} -l {loc} --kind StorageV2',
checks=[JMESPathCheck('kind', 'StorageV2')])
self.cmd('storage account check-name --name {name}', checks=[
JMESPathCheck('nameAvailable', False),
JMESPathCheck('reason', 'AlreadyExists')
])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2016-01-01')
@ResourceGroupPreparer(location='southcentralus')
def test_storage_create_default_sku(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {}'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('sku.name', 'Standard_RAGRS')])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-10-01')
@ResourceGroupPreparer(location='southcentralus')
def test_storage_create_default_kind(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {}'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('kind', 'StorageV2')])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2018-02-01')
@ResourceGroupPreparer(location='southcentralus', name_prefix='cli_storage_account_hns')
def test_storage_create_with_hns(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --kind StorageV2 --hns'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('isHnsEnabled', True)])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2018-02-01')
@ResourceGroupPreparer(location='southcentralus', name_prefix='cli_storage_account_hns')
def test_storage_create_with_hns_true(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --kind StorageV2 --hns true'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('isHnsEnabled', True)])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2018-02-01')
@ResourceGroupPreparer(location='southcentralus', name_prefix='cli_storage_account_hns')
def test_storage_create_with_hns_false(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --kind StorageV2 --hns false'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('isHnsEnabled', False)])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(location='eastus2euap', name_prefix='cli_storage_account_encryption')
def test_storage_create_with_encryption_key_type(self, resource_group):
name = self.create_random_name(prefix='cliencryption', length=24)
create_cmd = 'az storage account create -n {} -g {} --kind StorageV2 -t Account -q Service'.format(
name, resource_group)
self.cmd(create_cmd, checks=[
JMESPathCheck('encryption.services.queue', None),
JMESPathCheck('encryption.services.table.enabled', True),
JMESPathCheck('encryption.services.table.keyType', 'Account'),
])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
def test_storage_create_with_public_access(self, resource_group):
name1 = self.create_random_name(prefix='cli', length=24)
name2 = self.create_random_name(prefix='cli', length=24)
name3 = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {} --allow-blob-public-access'.format(name1, resource_group),
checks=[JMESPathCheck('allowBlobPublicAccess', True)])
self.cmd('az storage account create -n {} -g {} --allow-blob-public-access true'.format(name2, resource_group),
checks=[JMESPathCheck('allowBlobPublicAccess', True)])
self.cmd('az storage account create -n {} -g {} --allow-blob-public-access false'.format(name3, resource_group),
checks=[JMESPathCheck('allowBlobPublicAccess', False)])
@AllowLargeResponse()
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
@StorageAccountPreparer(name_prefix='blob')
def test_storage_update_with_public_access(self, storage_account):
self.cmd('az storage account update -n {} --allow-blob-public-access'.format(storage_account),
checks=[JMESPathCheck('allowBlobPublicAccess', True)])
self.cmd('az storage account update -n {} --allow-blob-public-access true'.format(storage_account),
checks=[JMESPathCheck('allowBlobPublicAccess', True)])
self.cmd('az storage account update -n {} --allow-blob-public-access false'.format(storage_account),
checks=[JMESPathCheck('allowBlobPublicAccess', False)])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
def test_storage_create_with_min_tls(self, resource_group):
name1 = self.create_random_name(prefix='cli', length=24)
name2 = self.create_random_name(prefix='cli', length=24)
name3 = self.create_random_name(prefix='cli', length=24)
name4 = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {}'.format(name1, resource_group),
checks=[JMESPathCheck('minimumTlsVersion', None)])
self.cmd('az storage account create -n {} -g {} --min-tls-version TLS1_0'.format(name2, resource_group),
checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_0')])
self.cmd('az storage account create -n {} -g {} --min-tls-version TLS1_1'.format(name3, resource_group),
checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_1')])
self.cmd('az storage account create -n {} -g {} --min-tls-version TLS1_2'.format(name4, resource_group),
checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_2')])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
@StorageAccountPreparer(name_prefix='tls')
def test_storage_update_with_min_tls(self, storage_account, resource_group):
self.cmd('az storage account show -n {} -g {}'.format(storage_account, resource_group),
checks=[JMESPathCheck('minimumTlsVersion', None)])
self.cmd('az storage account update -n {} -g {} --min-tls-version TLS1_0'.format(
storage_account, resource_group), checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_0')])
self.cmd('az storage account update -n {} -g {} --min-tls-version TLS1_1'.format(
storage_account, resource_group), checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_1')])
self.cmd('az storage account update -n {} -g {} --min-tls-version TLS1_2'.format(
storage_account, resource_group), checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_2')])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account_routing')
def test_storage_account_with_routing_preference(self, resource_group):
# Create Storage Account with Publish MicrosoftEndpoint, choose MicrosoftRouting
name1 = self.create_random_name(prefix='clirouting', length=24)
create_cmd1 = 'az storage account create -n {} -g {} --routing-choice MicrosoftRouting --publish-microsoft-endpoint true'.format(
name1, resource_group)
self.cmd(create_cmd1, checks=[
JMESPathCheck('routingPreference.publishInternetEndpoints', None),
JMESPathCheck('routingPreference.publishMicrosoftEndpoints', True),
JMESPathCheck('routingPreference.routingChoice', 'MicrosoftRouting'),
])
# Update Storage Account with Publish InternetEndpoint
update_cmd1 = 'az storage account update -n {} -g {} --routing-choice InternetRouting --publish-microsoft-endpoint false --publish-internet-endpoint true'.format(
name1, resource_group)
self.cmd(update_cmd1, checks=[
JMESPathCheck('routingPreference.publishInternetEndpoints', True),
JMESPathCheck('routingPreference.publishMicrosoftEndpoints', False),
JMESPathCheck('routingPreference.routingChoice', 'InternetRouting'),
])
# Create Storage Account with Publish InternetEndpoint, choose InternetRouting
name2 = self.create_random_name(prefix='clirouting', length=24)
create_cmd2 = 'az storage account create -n {} -g {} --routing-choice InternetRouting --publish-internet-endpoints true --publish-microsoft-endpoints false'.format(
name2, resource_group)
self.cmd(create_cmd2, checks=[
JMESPathCheck('routingPreference.publishInternetEndpoints', True),
JMESPathCheck('routingPreference.publishMicrosoftEndpoints', False),
JMESPathCheck('routingPreference.routingChoice', 'InternetRouting'),
])
# Update Storage Account with MicrosoftRouting routing choice
update_cmd2 = 'az storage account update -n {} -g {} --routing-choice MicrosoftRouting'\
.format(name2, resource_group)
self.cmd(update_cmd2, checks=[
JMESPathCheck('routingPreference.routingChoice', 'MicrosoftRouting'),
])
# Create without any routing preference
name3 = self.create_random_name(prefix='clirouting', length=24)
create_cmd3 = 'az storage account create -n {} -g {}'.format(
name3, resource_group)
self.cmd(create_cmd3, checks=[
JMESPathCheck('routingPreference', None),
])
# Update Storage Account with Publish MicrosoftEndpoint, choose MicrosoftRouting
update_cmd3 = 'az storage account update -n {} -g {} --routing-choice MicrosoftRouting --publish-internet-endpoints false --publish-microsoft-endpoints true'\
.format(name3, resource_group)
self.cmd(update_cmd3, checks=[
JMESPathCheck('routingPreference.publishInternetEndpoints', False),
JMESPathCheck('routingPreference.publishMicrosoftEndpoints', True),
JMESPathCheck('routingPreference.routingChoice', 'MicrosoftRouting'),
])
@AllowLargeResponse()
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
def test_storage_account_with_shared_key_access(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {} --allow-shared-key-access'.format(name, resource_group),
checks=[JMESPathCheck('allowSharedKeyAccess', True)])
self.cmd('az storage account create -n {} -g {} --allow-shared-key-access false'.format(name, resource_group),
checks=[JMESPathCheck('allowSharedKeyAccess', False)])
self.cmd('az storage account create -n {} -g {} --allow-shared-key-access true'.format(name, resource_group),
checks=[JMESPathCheck('allowSharedKeyAccess', True)])
self.cmd('az storage account update -n {} --allow-shared-key-access false'.format(name),
checks=[JMESPathCheck('allowSharedKeyAccess', False)])
self.cmd('az storage account update -n {} --allow-shared-key-access true'.format(name),
checks=[JMESPathCheck('allowSharedKeyAccess', True)])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2021-02-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
def test_storage_account_with_key_and_sas_policy(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {}'.format(name, resource_group),
checks=[JMESPathCheck('keyPolicy', None),
JMESPathCheck('sasPolicy', None)])
self.cmd('az storage account create -n {} -g {} --key-exp-days 3'.format(name, resource_group),
checks=[JMESPathCheck('keyPolicy.keyExpirationPeriodInDays', 3),
JMESPathCheck('sasPolicy', None)])
self.cmd('az storage account create -n {} -g {} --sas-exp 1.23:59:59'.format(name, resource_group),
checks=[JMESPathCheck('keyPolicy.keyExpirationPeriodInDays', 3),
JMESPathCheck('sasPolicy.sasExpirationPeriod', '1.23:59:59')])
self.cmd('az storage account update -n {} -g {} --key-exp-days 100000'.format(name, resource_group),
checks=[JMESPathCheck('keyPolicy.keyExpirationPeriodInDays', 100000),
JMESPathCheck('sasPolicy.sasExpirationPeriod', '1.23:59:59')])
self.cmd('az storage account update -n {} -g {} --sas-exp 100000.00:00:00'.format(name, resource_group),
checks=[JMESPathCheck('keyPolicy.keyExpirationPeriodInDays', 100000),
JMESPathCheck('sasPolicy.sasExpirationPeriod', '100000.00:00:00')])
def test_show_usage(self):
self.cmd('storage account show-usage -l westus', checks=JMESPathCheck('name.value', 'StorageAccounts'))
def test_show_usage_no_location(self):
with self.assertRaises(SystemExit):
self.cmd('storage account show-usage')
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_logging_operations(self, resource_group, storage_account):
connection_string = self.cmd(
'storage account show-connection-string -g {} -n {} -otsv'.format(resource_group, storage_account)).output
self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
JMESPathCheck('blob.read', False),
JMESPathCheck('blob.retentionPolicy.enabled', False)
])
self.cmd('storage logging update --services b --log r --retention 1 '
'--service b --connection-string {}'.format(connection_string))
self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
JMESPathCheck('blob.read', True),
JMESPathCheck('blob.retentionPolicy.enabled', True),
JMESPathCheck('blob.retentionPolicy.days', 1)
])
self.cmd('storage logging off --connection-string {}'.format(connection_string))
self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
JMESPathCheck('blob.delete', False),
JMESPathCheck('blob.write', False),
JMESPathCheck('blob.read', False),
JMESPathCheck('blob.retentionPolicy.enabled', False),
JMESPathCheck('blob.retentionPolicy.days', None),
JMESPathCheck('queue.delete', False),
JMESPathCheck('queue.write', False),
JMESPathCheck('queue.read', False),
JMESPathCheck('queue.retentionPolicy.enabled', False),
JMESPathCheck('queue.retentionPolicy.days', None),
JMESPathCheck('table.delete', False),
JMESPathCheck('table.write', False),
JMESPathCheck('table.read', False),
JMESPathCheck('table.retentionPolicy.enabled', False),
JMESPathCheck('table.retentionPolicy.days', None)
])
# Table service
with self.assertRaisesRegexp(CLIError, "incorrect usage: for table service, the supported version for logging is `1.0`"):
self.cmd('storage logging update --services t --log r --retention 1 '
'--version 2.0 --connection-string {}'.format(connection_string))
# Set version to 1.0
self.cmd('storage logging update --services t --log r --retention 1 --version 1.0 --connection-string {} '
.format(connection_string))
time.sleep(10)
self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
JMESPathCheck('table.version', '1.0'),
JMESPathCheck('table.delete', False),
JMESPathCheck('table.write', False),
JMESPathCheck('table.read', True),
JMESPathCheck('table.retentionPolicy.enabled', True),
JMESPathCheck('table.retentionPolicy.days', 1)
])
# Use default version
self.cmd('storage logging update --services t --log r --retention 1 --connection-string {}'.format(
connection_string))
time.sleep(10)
self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
JMESPathCheck('table.version', '1.0'),
JMESPathCheck('table.delete', False),
JMESPathCheck('table.write', False),
JMESPathCheck('table.read', True),
JMESPathCheck('table.retentionPolicy.enabled', True),
JMESPathCheck('table.retentionPolicy.days', 1)
])
@live_only()
@ResourceGroupPreparer()
def test_logging_error_operations(self, resource_group):
# BlobStorage doesn't support logging for some services
blob_storage = self.create_random_name(prefix='blob', length=24)
self.cmd('storage account create -g {} -n {} --kind BlobStorage --access-tier hot --https-only'.format(
resource_group, blob_storage))
blob_connection_string = self.cmd(
'storage account show-connection-string -g {} -n {} -otsv'.format(resource_group, blob_storage)).output
with self.assertRaisesRegexp(CLIError, "Your storage account doesn't support logging"):
self.cmd('storage logging show --services q --connection-string {}'.format(blob_connection_string))
# PremiumStorage doesn't support logging for some services
premium_storage = self.create_random_name(prefix='premium', length=24)
self.cmd('storage account create -g {} -n {} --sku Premium_LRS --https-only'.format(
resource_group, premium_storage))
premium_connection_string = self.cmd(
'storage account show-connection-string -g {} -n {} -otsv'.format(resource_group, premium_storage)).output
with self.assertRaisesRegexp(CLIError, "Your storage account doesn't support logging"):
self.cmd('storage logging show --services q --connection-string {}'.format(premium_connection_string))
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_metrics_operations(self, resource_group, storage_account_info):
self.storage_cmd('storage metrics show', storage_account_info) \
.assert_with_checks(JMESPathCheck('file.hour.enabled', True),
JMESPathCheck('file.minute.enabled', False))
self.storage_cmd('storage metrics update --services f --api true --hour true --minute true --retention 1 ',
storage_account_info)
self.storage_cmd('storage metrics show', storage_account_info).assert_with_checks(
JMESPathCheck('file.hour.enabled', True),
JMESPathCheck('file.minute.enabled', True))
@AllowLargeResponse()
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='account_1')
@StorageAccountPreparer(parameter_name='account_2')
def test_list_storage_accounts(self, account_1, account_2):
accounts_list = self.cmd('az storage account list').get_output_in_json()
assert len(accounts_list) >= 2
assert next(acc for acc in accounts_list if acc['name'] == account_1)
assert next(acc for acc in accounts_list if acc['name'] == account_2)
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_renew_account_key(self, resource_group, storage_account):
original_keys = self.cmd('storage account keys list -g {} -n {}'
.format(resource_group, storage_account)).get_output_in_json()
# key1 = keys_result[0]
# key2 = keys_result[1]
assert original_keys[0] and original_keys[1]
renewed_keys = self.cmd('storage account keys renew -g {} -n {} --key primary'
.format(resource_group, storage_account)).get_output_in_json()
print(renewed_keys)
print(original_keys)
assert renewed_keys[0] != original_keys[0]
assert renewed_keys[1] == original_keys[1]
original_keys = renewed_keys
renewed_keys = self.cmd('storage account keys renew -g {} -n {} --key secondary'
.format(resource_group, storage_account)).get_output_in_json()
assert renewed_keys[0] == original_keys[0]
assert renewed_keys[1] != original_keys[1]
@record_only() # Need to configure domain service first
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_renew_account_kerb_key(self, resource_group):
name = self.create_random_name(prefix='clistoragekerbkey', length=24)
self.kwargs = {'sc': name, 'rg': resource_group}
self.cmd('storage account create -g {rg} -n {sc} -l eastus2euap --enable-files-aadds')
self.cmd('storage account keys list -g {rg} -n {sc}', checks=JMESPathCheck('length(@)', 4))
original_keys = self.cmd('storage account keys list -g {rg} -n {sc} --expand-key-type kerb',
checks=JMESPathCheck('length(@)', 4)).get_output_in_json()
renewed_access_keys = self.cmd('storage account keys renew -g {rg} -n {sc} --key secondary').get_output_in_json()
assert renewed_access_keys[0] == original_keys[0]
assert renewed_access_keys[1] != original_keys[1]
renewed_kerb_keys = self.cmd(
'storage account keys renew -g {rg} -n {sc} --key primary --key-type kerb').get_output_in_json()
assert renewed_kerb_keys[2] != original_keys[2]
assert renewed_kerb_keys[3] == original_keys[3]
@AllowLargeResponse()
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_create_account_sas(self, storage_account):
from azure.cli.core.azclierror import RequiredArgumentMissingError
with self.assertRaises(RequiredArgumentMissingError):
self.cmd('storage account generate-sas --resource-types o --services b --expiry 2000-01-01 '
'--permissions r --account-name ""')
invalid_connection_string = "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;"
with self.assertRaises(RequiredArgumentMissingError):
self.cmd('storage account generate-sas --resource-types o --services b --expiry 2000-01-01 '
'--permissions r --connection-string {}'.format(invalid_connection_string))
sas = self.cmd('storage account generate-sas --resource-types o --services b '
'--expiry 2046-12-31T08:23Z --permissions r --https-only --account-name {}'
.format(storage_account)).output
self.assertIn('sig=', sas, 'SAS token {} does not contain sig segment'.format(sas))
self.assertIn('se=', sas, 'SAS token {} does not contain se segment'.format(sas))
def test_list_locations(self):
self.cmd('az account list-locations',
checks=[JMESPathCheck("[?name=='westus'].displayName | [0]", 'West US')])
@ResourceGroupPreparer(location='southcentralus')
@StorageAccountPreparer(location='southcentralus')
def test_customer_managed_key(self, resource_group, storage_account):
self.kwargs = {'rg': resource_group, 'sa': storage_account, 'vt': self.create_random_name('clitest', 24)}
self.kwargs['vid'] = self.cmd('az keyvault create -n {vt} -g {rg} '
'-otsv --query id').output.rstrip('\n')
self.kwargs['vtn'] = self.cmd('az keyvault show -n {vt} -g {rg} '
'-otsv --query properties.vaultUri').output.strip('\n')
self.kwargs['ver'] = self.cmd("az keyvault key create -n testkey -p software --vault-name {vt} "
"-otsv --query 'key.kid'").output.rsplit('/', 1)[1].rstrip('\n')
self.kwargs['oid'] = self.cmd("az storage account update -n {sa} -g {rg} --assign-identity "
"-otsv --query 'identity.principalId'").output.strip('\n')
self.cmd('az keyvault set-policy -n {vt} --object-id {oid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
self.cmd('az keyvault update -n {vt} -g {rg} --set properties.enableSoftDelete=true')
self.cmd('az resource update --id {vid} --set properties.enablePurgeProtection=true')
# Enable key auto-rotation
result = self.cmd('az storage account update -n {sa} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey ').get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Pin to a version and opt out for key auto-rotation
result = self.cmd('az storage account update -n {sa} -g {rg} '
'--encryption-key-version {ver}').get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], self.kwargs['ver'])
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Enable key auto-rotation again
result = self.cmd('az storage account update -n {sa} -g {rg} '
'--encryption-key-version ""').get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], "")
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Change Key name
self.cmd("az keyvault key create -n newkey -p software --vault-name {vt} ")
result = self.cmd('az storage account update -n {sa} -g {rg} '
'--encryption-key-vault {vtn} '
'--encryption-key-name "newkey"').get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'newkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], "")
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Change Key source
result = self.cmd('az storage account update -n {sa} -g {rg} '
'--encryption-key-source Microsoft.Storage').get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Storage")
@ResourceGroupPreparer(location='eastus2euap')
def test_user_assigned_identity(self, resource_group):
self.kwargs = {
'rg': resource_group,
'sa1': self.create_random_name(prefix='sa1', length=24),
'sa2': self.create_random_name(prefix='sa2', length=24),
'sa3': self.create_random_name(prefix='sa3', length=24),
'identity': self.create_random_name(prefix='id', length=24),
'vt': self.create_random_name('clitest', 24)
}
# Prepare managed identity
identity = self.cmd('az identity create -n {identity} -g {rg}').get_output_in_json()
self.kwargs['iid'] = identity['id']
self.kwargs['oid'] = identity['principalId']
# Prepare key vault
keyvault = self.cmd('az keyvault create -n {vt} -g {rg} ').get_output_in_json()
self.kwargs['vid'] = keyvault['id']
self.kwargs['vtn'] = keyvault['properties']['vaultUri']
self.kwargs['ver'] = self.cmd("az keyvault key create -n testkey -p software --vault-name {vt} "
"-otsv --query 'key.kid'").output.rsplit('/', 1)[1].rstrip('\n')
# Make UAI access to keyvault
self.cmd('az keyvault set-policy -n {vt} --object-id {oid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
self.cmd('az keyvault update -n {vt} -g {rg} --set properties.enableSoftDelete=true')
self.cmd('az resource update --id {vid} --set properties.enablePurgeProtection=true')
# CMK at create with UAI
result = self.cmd('az storage account create -n {sa1} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--key-vault-user-identity-id {iid} '
'--identity-type UserAssigned '
'--user-identity-id {iid}').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['identity']['type'], 'UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Clear a UserAssigned identity when in use with CMK will break access to the account
result = self.cmd('az storage account update -n {sa1} -g {rg} --identity-type None ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['identity']['type'], 'None')
self.assertEqual(result['identity']['userAssignedIdentities'], None)
# Recover from Identity clear
result = self.cmd('az storage account update -n {sa1} -g {rg} --identity-type UserAssigned --user-identity-id {iid}').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['identity']['type'], 'UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# CMK with UAI -> CMK with SAI
# 1. Add System Assigned Identity if it does not exist.
result = self.cmd('az storage account update -n {sa1} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--identity-type SystemAssigned,UserAssigned ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['identity']['type'], 'SystemAssigned,UserAssigned')
# 2. Add GET/WRAP/UNWRAP permissions on $KeyVaultUri for System Assigned identity.
self.kwargs['oid'] = self.cmd("az storage account update -n {sa1} -g {rg} --assign-identity "
"-otsv --query 'identity.principalId'").output.strip('\n')
self.cmd('az keyvault set-policy -n {vt} --object-id {oid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
# 3. Update encryption.identity to use the SystemAssigned identity. SAI must have access to existing KeyVault.
result = self.cmd('az storage account update -n {sa1} -g {rg} '
'--key-vault-user-identity-id "" ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['identity']['type'], 'SystemAssigned')
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], "")
# CMK with SAI -> MMK
result = self.cmd('az storage account update -n {sa1} -g {rg} '
'--encryption-key-source Microsoft.Storage ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Storage")
self.assertEqual(result['encryption']['keyVaultProperties'], None)
# MMK at create
result = self.cmd('az storage account create -n {sa2} -g {rg} --encryption-key-source Microsoft.Storage')\
.get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Storage")
self.assertEqual(result['encryption']['keyVaultProperties'], None)
# CMK with UAI and add SAI at create
result = self.cmd('az storage account create -n {sa3} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--key-vault-user-identity-id {iid} '
'--identity-type SystemAssigned,UserAssigned '
'--user-identity-id {iid}').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa3'])
self.assertEqual(result['identity']['type'], 'SystemAssigned,UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'],
self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# MMK -> CMK wth UAI
self.kwargs['sid'] = self.cmd("az storage account update -n {sa2} -g {rg} --assign-identity "
"-otsv --query 'identity.principalId'").output.strip('\n')
self.cmd('az keyvault set-policy -n {vt} --object-id {sid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
self.cmd('az keyvault set-policy -n {vt} --object-id {sid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--identity-type SystemAssigned ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned')
self.assertEqual(result['identity']['principalId'], self.kwargs['sid'])
self.assertEqual(result['encryption']['encryptionIdentity'], None)
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# CMK wth UAI -> MMK
result = self.cmd('az storage account create -n {sa2} -g {rg} --encryption-key-source Microsoft.Storage')\
.get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Storage")
self.assertEqual(result['encryption']['keyVaultProperties'], None)
# MMK -> CMK wth SAI
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--identity-type SystemAssigned ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned')
self.assertEqual(result['identity']['principalId'], self.kwargs['sid'])
self.assertEqual(result['encryption']['encryptionIdentity'], None)
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Clear a SystemAssigned identity when in use with CMK will break access to the account
result = self.cmd('az storage account update -n {sa2} -g {rg} --identity-type None ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'None')
self.assertEqual(result['identity']['userAssignedIdentities'], None)
# Recover account if SystemAssignedIdentity used for CMK is cleared
# 1. Create a new $UserAssignedIdentity that has access to $KeyVaultUri and update the account to use the new $UserAssignedIdentity for encryption (if not present already).
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--identity-type UserAssigned '
'--user-identity-id {iid} '
'--key-vault-user-identity-id {iid} ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# 2. Update account to use SAI,UAI identity.
result = self.cmd('az storage account update -n {sa2} -g {rg} --identity-type SystemAssigned,UserAssigned')\
.get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned,UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# 3. Clear the $UserAssignedIdentity used for encryption.
result = self.cmd('az storage account update -n {sa2} -g {rg} --key-vault-user-identity-id ""')\
.get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned,UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], '')
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# 4. Remove $UserAssignedIdentity from the top level identity bag.
result = self.cmd('az storage account update -n {sa2} -g {rg} --identity-type SystemAssigned')\
.get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned')
self.assertEqual(result['identity']['userAssignedIdentities'], None)
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], '')
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
self.kwargs['sid'] = result['identity']['principalId']
# CMK with SAI -> CMK with UAI
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--key-vault-user-identity-id {iid} '
'--identity-type SystemAssigned,UserAssigned '
'--user-identity-id {iid}').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned,UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# CMK with UAI1 -> CMK with UAI2
self.kwargs['new_id'] = self.create_random_name(prefix='newid', length=24)
identity = self.cmd('az identity create -n {new_id} -g {rg}').get_output_in_json()
self.kwargs['new_iid'] = identity['id']
self.kwargs['new_oid'] = identity['principalId']
self.cmd('az keyvault set-policy -n {vt} --object-id {new_oid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--key-vault-user-identity-id {new_iid} '
'--identity-type UserAssigned '
'--user-identity-id {new_iid}').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'UserAssigned')
self.assertIn(self.kwargs['new_iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['new_iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Clear a UserAssigned identity when in use with CMK will break access to the account
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--identity-type None ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'None')
self.assertEqual(result['identity']['userAssignedIdentities'], None)
# Recover from Identity clear
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--identity-type UserAssigned '
'--user-identity-id {new_iid} '
'--key-vault-user-identity-id {new_iid} ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'UserAssigned')
self.assertIn(self.kwargs['new_iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['new_iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_account_show_exit_codes(self, resource_group, storage_account):
self.kwargs = {'rg': resource_group, 'sa': storage_account}
self.assertEqual(self.cmd('storage account show -g {rg} -n {sa}').exit_code, 0)
with self.assertRaises(SystemExit) as ex:
self.cmd('storage account show text_causing_parsing_error')
self.assertEqual(ex.exception.code, 2)
with self.assertRaises(SystemExit) as ex:
self.cmd('storage account show -g fake_group -n {sa}')
self.assertEqual(ex.exception.code, 3)
with self.assertRaises(SystemExit) as ex:
self.cmd('storage account show -g {rg} -n fake_account')
self.assertEqual(ex.exception.code, 3)
@ResourceGroupPreparer()
@StorageAccountPreparer(kind='StorageV2')
def test_management_policy(self, resource_group, storage_account):
import os
curr_dir = os.path.dirname(os.path.realpath(__file__))
policy_file = os.path.join(curr_dir, 'mgmt_policy.json').replace('\\', '\\\\')
self.kwargs = {'rg': resource_group, 'sa': storage_account, 'policy': policy_file}
self.cmd('storage account management-policy create --account-name {sa} -g {rg} --policy @"{policy}"',
checks=[JMESPathCheck('policy.rules[0].name', 'olcmtest'),
JMESPathCheck('policy.rules[0].enabled', True),
JMESPathCheck('policy.rules[0].definition.actions.baseBlob.tierToCool.daysAfterModificationGreaterThan', 30),
JMESPathCheck('policy.rules[0].definition.actions.baseBlob.tierToArchive.daysAfterModificationGreaterThan', 90),
JMESPathCheck('policy.rules[0].definition.actions.baseBlob.delete.daysAfterModificationGreaterThan', 1000),
JMESPathCheck('policy.rules[0].definition.actions.snapshot.tierToCool.daysAfterCreationGreaterThan', 30),
JMESPathCheck('policy.rules[0].definition.actions.snapshot.tierToArchive.daysAfterCreationGreaterThan', 90),
JMESPathCheck('policy.rules[0].definition.actions.snapshot.delete.daysAfterCreationGreaterThan', 1000),
JMESPathCheck('policy.rules[0].definition.actions.version.tierToCool.daysAfterCreationGreaterThan', 30),
JMESPathCheck('policy.rules[0].definition.actions.version.tierToArchive.daysAfterCreationGreaterThan', 90),
JMESPathCheck('policy.rules[0].definition.actions.version.delete.daysAfterCreationGreaterThan', 1000),
JMESPathCheck('policy.rules[0].definition.filters.blobTypes[0]', "blockBlob"),
JMESPathCheck('policy.rules[0].definition.filters.prefixMatch[0]', "olcmtestcontainer1")])
self.cmd('storage account management-policy update --account-name {sa} -g {rg}'
' --set "policy.rules[0].name=newname"')
self.cmd('storage account management-policy show --account-name {sa} -g {rg}',
checks=JMESPathCheck('policy.rules[0].name', 'newname'))
self.cmd('storage account management-policy delete --account-name {sa} -g {rg}')
self.cmd('storage account management-policy show --account-name {sa} -g {rg}', expect_failure=True)
@record_only() # Need to configure domain service first
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_aadds(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {}'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
update_cmd = 'az storage account update -n {} -g {} --enable-files-aadds'.format(name, resource_group)
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AADDS')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_aadds_false(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {}'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
update_cmd = 'az storage account update -n {} -g {} --enable-files-aadds false'.format(name, resource_group)
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'None')
@record_only() # Need to configure domain service first
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_aadds_true(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {}'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
update_cmd = 'az storage account update -n {} -g {} --enable-files-aadds true'.format(name, resource_group)
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AADDS')
@record_only() # Need to configure domain service first
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_aadds(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --enable-files-aadds'.format(name, resource_group)
result = self.cmd(create_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AADDS')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_aadds_false(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --enable-files-aadds false'.format(name, resource_group)
result = self.cmd(create_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'None')
@record_only() # Need to configure domain service first
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_aadds_true(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --enable-files-aadds true'.format(name, resource_group)
result = self.cmd(create_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AADDS')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_adds(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.kwargs.update({
'rg': resource_group,
'sc': name,
'domain_name': 'mydomain.com',
'net_bios_domain_name': 'mydomain.com',
'forest_name': 'mydomain.com',
'domain_guid': '12345678-1234-1234-1234-123456789012',
'domain_sid': 'S-1-5-21-1234567890-1234567890-1234567890',
'azure_storage_sid': 'S-1-5-21-1234567890-1234567890-1234567890-1234'
})
create_cmd = """storage account create -n {sc} -g {rg} -l eastus2euap --enable-files-adds --domain-name
{domain_name} --net-bios-domain-name {net_bios_domain_name} --forest-name {forest_name} --domain-guid
{domain_guid} --domain-sid {domain_sid} --azure-storage-sid {azure_storage_sid}"""
result = self.cmd(create_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AD')
activeDirectoryProperties = result['azureFilesIdentityBasedAuthentication']['activeDirectoryProperties']
self.assertEqual(activeDirectoryProperties['azureStorageSid'], self.kwargs['azure_storage_sid'])
self.assertEqual(activeDirectoryProperties['domainGuid'], self.kwargs['domain_guid'])
self.assertEqual(activeDirectoryProperties['domainName'], self.kwargs['domain_name'])
self.assertEqual(activeDirectoryProperties['domainSid'], self.kwargs['domain_sid'])
self.assertEqual(activeDirectoryProperties['forestName'], self.kwargs['forest_name'])
self.assertEqual(activeDirectoryProperties['netBiosDomainName'], self.kwargs['net_bios_domain_name'])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_adds_false(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.kwargs.update({
'rg': resource_group,
'sc': name
})
result = self.cmd("storage account create -n {sc} -g {rg} -l eastus2euap --enable-files-adds false").get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'None')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_adds_true(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.kwargs.update({
'rg': resource_group,
'sc': name,
'domain_name': 'mydomain.com',
'net_bios_domain_name': 'mydomain.com',
'forest_name': 'mydomain.com',
'domain_guid': '12345678-1234-1234-1234-123456789012',
'domain_sid': 'S-1-5-21-1234567890-1234567890-1234567890',
'azure_storage_sid': 'S-1-5-21-1234567890-1234567890-1234567890-1234'
})
create_cmd = """storage account create -n {sc} -g {rg} -l eastus2euap --enable-files-adds true --domain-name
{domain_name} --net-bios-domain-name {net_bios_domain_name} --forest-name {forest_name} --domain-guid
{domain_guid} --domain-sid {domain_sid} --azure-storage-sid {azure_storage_sid}"""
result = self.cmd(create_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AD')
activeDirectoryProperties = result['azureFilesIdentityBasedAuthentication']['activeDirectoryProperties']
self.assertEqual(activeDirectoryProperties['azureStorageSid'], self.kwargs['azure_storage_sid'])
self.assertEqual(activeDirectoryProperties['domainGuid'], self.kwargs['domain_guid'])
self.assertEqual(activeDirectoryProperties['domainName'], self.kwargs['domain_name'])
self.assertEqual(activeDirectoryProperties['domainSid'], self.kwargs['domain_sid'])
self.assertEqual(activeDirectoryProperties['forestName'], self.kwargs['forest_name'])
self.assertEqual(activeDirectoryProperties['netBiosDomainName'], self.kwargs['net_bios_domain_name'])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_adds(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} -l eastus2euap'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
self.kwargs.update({
'rg': resource_group,
'sc': name,
'domain_name': 'mydomain.com',
'net_bios_domain_name': 'mydomain.com',
'forest_name': 'mydomain.com',
'domain_guid': '12345678-1234-1234-1234-123456789012',
'domain_sid': 'S-1-5-21-1234567890-1234567890-1234567890',
'azure_storage_sid': 'S-1-5-21-1234567890-1234567890-1234567890-1234'
})
update_cmd = """storage account update -n {sc} -g {rg} --enable-files-adds --domain-name {domain_name}
--net-bios-domain-name {net_bios_domain_name} --forest-name {forest_name} --domain-guid {domain_guid}
--domain-sid {domain_sid} --azure-storage-sid {azure_storage_sid}"""
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AD')
activeDirectoryProperties = result['azureFilesIdentityBasedAuthentication']['activeDirectoryProperties']
self.assertEqual(activeDirectoryProperties['azureStorageSid'], self.kwargs['azure_storage_sid'])
self.assertEqual(activeDirectoryProperties['domainGuid'], self.kwargs['domain_guid'])
self.assertEqual(activeDirectoryProperties['domainName'], self.kwargs['domain_name'])
self.assertEqual(activeDirectoryProperties['domainSid'], self.kwargs['domain_sid'])
self.assertEqual(activeDirectoryProperties['forestName'], self.kwargs['forest_name'])
self.assertEqual(activeDirectoryProperties['netBiosDomainName'], self.kwargs['net_bios_domain_name'])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_adds_false(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} -l eastus2euap'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
update_cmd = 'az storage account update -n {} -g {} --enable-files-adds false'.format(name, resource_group)
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'None')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_adds_true(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} -l eastus2euap'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
self.kwargs.update({
'rg': resource_group,
'sc': name,
'domain_name': 'mydomain.com',
'net_bios_domain_name': 'mydomain.com',
'forest_name': 'mydomain.com',
'domain_guid': '12345678-1234-1234-1234-123456789012',
'domain_sid': 'S-1-5-21-1234567890-1234567890-1234567890',
'azure_storage_sid': 'S-1-5-21-1234567890-1234567890-1234567890-1234'
})
update_cmd = """storage account update -n {sc} -g {rg} --enable-files-adds true --domain-name {domain_name}
--net-bios-domain-name {net_bios_domain_name} --forest-name {forest_name} --domain-guid {domain_guid}
--domain-sid {domain_sid} --azure-storage-sid {azure_storage_sid}"""
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AD')
activeDirectoryProperties = result['azureFilesIdentityBasedAuthentication']['activeDirectoryProperties']
self.assertEqual(activeDirectoryProperties['azureStorageSid'], self.kwargs['azure_storage_sid'])
self.assertEqual(activeDirectoryProperties['domainGuid'], self.kwargs['domain_guid'])
self.assertEqual(activeDirectoryProperties['domainName'], self.kwargs['domain_name'])
self.assertEqual(activeDirectoryProperties['domainSid'], self.kwargs['domain_sid'])
self.assertEqual(activeDirectoryProperties['forestName'], self.kwargs['forest_name'])
self.assertEqual(activeDirectoryProperties['netBiosDomainName'], self.kwargs['net_bios_domain_name'])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2020-08-01-preview')
@ResourceGroupPreparer(location='westus', name_prefix='cliedgezone')
def test_storage_account_extended_location(self, resource_group):
self.kwargs = {
'sa1': self.create_random_name(prefix='edge1', length=12),
'sa2': self.create_random_name(prefix='edge2', length=12),
'rg': resource_group
}
self.cmd('storage account create -n {sa1} -g {rg} --edge-zone microsoftrrdclab1 -l eastus2euap --sku Premium_LRS',
checks=[
JMESPathCheck('extendedLocation.name', 'microsoftrrdclab1'),
JMESPathCheck('extendedLocation.type', 'EdgeZone')
])
self.cmd('storage account create -n {sa2} -g {rg} --edge-zone microsoftlosangeles1 --sku Premium_LRS',
checks=[
JMESPathCheck('extendedLocation.name', 'microsoftlosangeles1'),
JMESPathCheck('extendedLocation.type', 'EdgeZone')
])
class RoleScenarioTest(LiveScenarioTest):
def run_under_service_principal(self):
account_info = self.cmd('account show').get_output_in_json()
return account_info['user']['type'] == 'servicePrincipal'
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
class RevokeStorageAccountTests(StorageScenarioMixin, RoleScenarioTest, LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_storage_revoke_keys')
@StorageAccountPreparer()
def test_storage_account_revoke_delegation_keys(self, resource_group, storage_account):
if self.run_under_service_principal():
return # this test delete users which are beyond a SP's capacity, so quit...
from datetime import datetime, timedelta
import time
expiry = (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ')
account_info = self.get_account_info(resource_group, storage_account)
c = self.create_container(account_info)
b = self.create_random_name('blob', 24)
local_file = self.create_temp_file(128, full_random=False)
self.kwargs.update({
'expiry': expiry,
'account': storage_account,
'container': c,
'local_file': local_file,
'blob': b,
'rg': resource_group
})
result = self.cmd('storage account show -n {account} -g {rg}').get_output_in_json()
self.kwargs['sc_id'] = result['id']
user = self.create_random_name('testuser', 15)
self.kwargs['upn'] = user + '@azuresdkteam.onmicrosoft.com'
self.cmd('ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}')
time.sleep(15) # By-design, it takes some time for RBAC system propagated with graph object change
self.cmd('role assignment create --assignee {upn} --role "Storage Blob Data Contributor" --scope {sc_id}')
container_sas = self.cmd('storage blob generate-sas --account-name {account} -n {blob} -c {container} --expiry {expiry} --permissions '
'rw --https-only --as-user --auth-mode login -otsv').output
self.kwargs['container_sas'] = container_sas
self.cmd('storage blob upload -c {container} -n {blob} -f "{local_file}" --account-name {account} --sas-token {container_sas}')
blob_sas = self.cmd('storage blob generate-sas --account-name {account} -n {blob} -c {container} --expiry {expiry} --permissions '
'r --https-only --as-user --auth-mode login -otsv').output
self.kwargs['blob_sas'] = blob_sas
self.cmd('storage blob show -c {container} -n {blob} --account-name {account} --sas-token {blob_sas}') \
.assert_with_checks(JMESPathCheck('name', b))
self.cmd('storage account revoke-delegation-keys -n {account} -g {rg}')
time.sleep(60) # By-design, it takes some time for RBAC system propagated with graph object change
self.cmd('storage blob show -c {container} -n {blob} --account-name {account} --sas-token {blob_sas}', expect_failure=True)
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
class BlobServicePropertiesTests(StorageScenarioMixin, ScenarioTest):
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix='cli_storage_account_update_change_feed')
@StorageAccountPreparer(kind='StorageV2', name_prefix='clitest', location="eastus2euap")
def test_storage_account_update_change_feed(self, resource_group, storage_account):
self.kwargs.update({
'sa': storage_account,
'rg': resource_group,
'cmd': 'storage account blob-service-properties update'
})
from azure.cli.core.azclierror import InvalidArgumentValueError
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed false --change-feed-retention-days 14600 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --change-feed-retention-days 1 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days -1 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 0 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 146001 -n {sa} -g {rg}')
result = self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], True)
self.assertEqual(result['changeFeed']['retentionInDays'], 1)
result = self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 100 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], True)
self.assertEqual(result['changeFeed']['retentionInDays'], 100)
result = self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 14600 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], True)
self.assertEqual(result['changeFeed']['retentionInDays'], 14600)
result = self.cmd('{cmd} --enable-change-feed false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], False)
self.assertEqual(result['changeFeed']['retentionInDays'], None)
@ResourceGroupPreparer(name_prefix='cli_storage_account_update_delete_retention_policy')
@StorageAccountPreparer()
def test_storage_account_update_delete_retention_policy(self, resource_group, storage_account):
self.kwargs.update({
'sa': storage_account,
'rg': resource_group,
'cmd': 'storage account blob-service-properties update'
})
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention false --delete-retention-days 365 -n {sa} -g {rg}').get_output_in_json()
with self.assertRaises(SystemExit):
self.cmd('{cmd} --delete-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true --delete-retention-days -1 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 0 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 366 -n {sa} -g {rg}')
result = self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['deleteRetentionPolicy']['days'], 1)
result = self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 100 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['deleteRetentionPolicy']['days'], 100)
result = self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 365 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['deleteRetentionPolicy']['days'], 365)
result = self.cmd('{cmd} --enable-delete-retention false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], False)
self.assertEqual(result['deleteRetentionPolicy']['days'], None)
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix="cli_test_sa_versioning")
@StorageAccountPreparer(location="eastus2euap", kind="StorageV2")
def test_storage_account_update_versioning(self):
result = self.cmd('storage account blob-service-properties update --enable-versioning true -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], True)
result = self.cmd('storage account blob-service-properties update --enable-versioning false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], False)
result = self.cmd('storage account blob-service-properties update --enable-versioning -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], True)
result = self.cmd('storage account blob-service-properties show -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], True)
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix='cli_storage_account_update_delete_retention_policy')
@StorageAccountPreparer(kind='StorageV2', name_prefix='clitest', location='eastus2euap')
def test_storage_account_update_container_delete_retention_policy(self, resource_group, storage_account):
self.kwargs.update({
'sa': storage_account,
'rg': resource_group,
'cmd': 'storage account blob-service-properties update'
})
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention false --container-delete-retention-days 365 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --container-delete-retention-days 1 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days -1 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 0 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 366 -n {sa} -g {rg}')
result = self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], 1)
result = self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 100 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], 100)
result = self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 365 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], 365)
result = self.cmd('{cmd} --enable-container-delete-retention false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], False)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], None)
@ResourceGroupPreparer()
@StorageAccountPreparer(kind="StorageV2")
def test_storage_account_default_service_properties(self):
from azure.cli.core.azclierror import InvalidArgumentValueError
self.cmd('storage account blob-service-properties show -n {sa} -g {rg}', checks=[
self.check('defaultServiceVersion', None)])
with self.assertRaisesRegexp(InvalidArgumentValueError, 'Valid example: 2008-10-27'):
self.cmd('storage account blob-service-properties update --default-service-version 2018 -n {sa} -g {rg}')
self.cmd('storage account blob-service-properties update --default-service-version 2018-11-09 -n {sa} -g {rg}',
checks=[self.check('defaultServiceVersion', '2018-11-09')])
self.cmd('storage account blob-service-properties show -n {sa} -g {rg}',
checks=[self.check('defaultServiceVersion', '2018-11-09')])
class FileServicePropertiesTests(StorageScenarioMixin, ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_file_soft_delete')
@StorageAccountPreparer(name_prefix='filesoftdelete', kind='StorageV2', location='eastus2euap')
def test_storage_account_file_delete_retention_policy(self, resource_group, storage_account):
from azure.cli.core.azclierror import ValidationError
self.kwargs.update({
'sa': storage_account,
'rg': resource_group,
'cmd': 'storage account file-service-properties'
})
self.cmd('{cmd} show --account-name {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 7))
# Test update without properties
self.cmd('{cmd} update --account-name {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 7))
self.cmd('{cmd} update --enable-delete-retention false -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', False),
JMESPathCheck('shareDeleteRetentionPolicy.days', None))
self.cmd('{cmd} show -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', False),
JMESPathCheck('shareDeleteRetentionPolicy.days', 0))
# Test update without properties
self.cmd('{cmd} update --account-name {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', False),
JMESPathCheck('shareDeleteRetentionPolicy.days', None))
with self.assertRaises(ValidationError):
self.cmd('{cmd} update --enable-delete-retention true -n {sa} -g {rg}')
with self.assertRaisesRegexp(ValidationError, "Delete Retention Policy hasn't been enabled,"):
self.cmd('{cmd} update --delete-retention-days 1 -n {sa} -g {rg} -n {sa} -g {rg}')
with self.assertRaises(ValidationError):
self.cmd('{cmd} update --enable-delete-retention false --delete-retention-days 1 -n {sa} -g {rg}')
self.cmd(
'{cmd} update --enable-delete-retention true --delete-retention-days 10 -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 10))
self.cmd('{cmd} update --delete-retention-days 1 -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 1))
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2020-08-01-preview')
@ResourceGroupPreparer(name_prefix='cli_file_smb')
@StorageAccountPreparer(parameter_name='storage_account1', name_prefix='filesmb1', kind='FileStorage',
sku='Premium_LRS', location='centraluseuap')
@StorageAccountPreparer(parameter_name='storage_account2', name_prefix='filesmb2', kind='StorageV2')
def test_storage_account_file_smb_multichannel(self, resource_group, storage_account1, storage_account2):
from azure.core.exceptions import ResourceExistsError
self.kwargs.update({
'sa': storage_account1,
'sa2': storage_account2,
'rg': resource_group,
'cmd': 'storage account file-service-properties'
})
with self.assertRaisesRegexp(ResourceExistsError, "SMB Multichannel is not supported for the account."):
self.cmd('{cmd} update --mc -n {sa2} -g {rg}')
self.cmd('{cmd} show -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 7),
JMESPathCheck('protocolSettings.smb.multichannel.enabled', False))
self.cmd('{cmd} show -n {sa2} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 7),
JMESPathCheck('protocolSettings.smb.multichannel', None))
self.cmd(
'{cmd} update --enable-smb-multichannel -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('protocolSettings.smb.multichannel.enabled', True))
self.cmd(
'{cmd} update --enable-smb-multichannel false -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('protocolSettings.smb.multichannel.enabled', False))
self.cmd(
'{cmd} update --enable-smb-multichannel true -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('protocolSettings.smb.multichannel.enabled', True))
class StorageAccountPrivateLinkScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_sa_plr')
@StorageAccountPreparer(name_prefix='saplr', kind='StorageV2', sku='Standard_LRS')
def test_storage_account_private_link(self, storage_account):
self.kwargs.update({
'sa': storage_account
})
self.cmd('storage account private-link-resource list --account-name {sa} -g {rg}', checks=[
self.check('length(@)', 6)])
class StorageAccountPrivateEndpointScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_sa_pe')
@StorageAccountPreparer(name_prefix='saplr', kind='StorageV2')
def test_storage_account_private_endpoint(self, storage_account):
self.kwargs.update({
'sa': storage_account,
'loc': 'eastus',
'vnet': self.create_random_name('cli-vnet-', 24),
'subnet': self.create_random_name('cli-subnet-', 24),
'pe': self.create_random_name('cli-pe-', 24),
'pe_connection': self.create_random_name('cli-pec-', 24),
})
# Prepare network
self.cmd('network vnet create -n {vnet} -g {rg} -l {loc} --subnet-name {subnet}',
checks=self.check('length(newVNet.subnets)', 1))
self.cmd('network vnet subnet update -n {subnet} --vnet-name {vnet} -g {rg} '
'--disable-private-endpoint-network-policies true',
checks=self.check('privateEndpointNetworkPolicies', 'Disabled'))
# Create a private endpoint connection
pr = self.cmd('storage account private-link-resource list --account-name {sa} -g {rg}').get_output_in_json()
self.kwargs['group_id'] = pr[0]['groupId']
storage = self.cmd('storage account show -n {sa} -g {rg}').get_output_in_json()
self.kwargs['sa_id'] = storage['id']
private_endpoint = self.cmd(
'network private-endpoint create -g {rg} -n {pe} --vnet-name {vnet} --subnet {subnet} -l {loc} '
'--connection-name {pe_connection} --private-connection-resource-id {sa_id} '
'--group-ids blob').get_output_in_json()
self.assertEqual(private_endpoint['name'], self.kwargs['pe'])
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['name'], self.kwargs['pe_connection'])
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['privateLinkServiceConnectionState']['status'], 'Approved')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['provisioningState'], 'Succeeded')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['groupIds'][0], self.kwargs['group_id'])
self.kwargs['pe_id'] = private_endpoint['privateLinkServiceConnections'][0]['id']
# Show the connection at storage account
storage = self.cmd('storage account show -n {sa} -g {rg}').get_output_in_json()
self.assertIn('privateEndpointConnections', storage)
self.assertEqual(len(storage['privateEndpointConnections']), 1)
self.assertEqual(storage['privateEndpointConnections'][0]['privateLinkServiceConnectionState']['status'],
'Approved')
self.kwargs['sa_pec_id'] = storage['privateEndpointConnections'][0]['id']
self.kwargs['sa_pec_name'] = storage['privateEndpointConnections'][0]['name']
self.cmd('storage account private-endpoint-connection show --account-name {sa} -g {rg} --name {sa_pec_name}',
checks=self.check('id', '{sa_pec_id}'))
with self.assertRaisesRegexp(CLIError, 'Your connection is already approved. No need to approve again.'):
self.cmd('storage account private-endpoint-connection approve --account-name {sa} -g {rg} --name {sa_pec_name}')
self.cmd('storage account private-endpoint-connection reject --account-name {sa} -g {rg} --name {sa_pec_name}',
checks=[self.check('privateLinkServiceConnectionState.status', 'Rejected')])
with self.assertRaisesRegexp(CLIError, 'You cannot approve the connection request after rejection.'):
self.cmd('storage account private-endpoint-connection approve --account-name {sa} -g {rg} --name {sa_pec_name}')
self.cmd('storage account private-endpoint-connection delete --id {sa_pec_id} -y')
class StorageAccountSkuScenarioTest(ScenarioTest):
@unittest.skip('Storage account type Standard_ZRS cannot be changed to Standard_GZRS')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(name_prefix='clistorage', location='eastus2')
@StorageAccountPreparer(name_prefix='clistoragesku', location='eastus2euap', kind='StorageV2', sku='Standard_ZRS')
def test_storage_account_sku(self, resource_group, storage_account):
self.kwargs = {
'gzrs_sa': self.create_random_name(prefix='cligzrs', length=24),
'GZRS': 'Standard_GZRS',
'rg': resource_group,
'sa': storage_account
}
# Create storage account with GZRS
self.cmd('az storage account create -n {gzrs_sa} -g {rg} --sku {GZRS} --https-only --kind StorageV2', checks=[
self.check('sku.name', '{GZRS}'),
self.check('name', '{gzrs_sa}')
])
# Convert ZRS to GZRS
self.cmd('az storage account show -n {sa} -g {rg}', checks=[
self.check('sku.name', 'Standard_ZRS'),
self.check('name', '{sa}')
])
self.cmd('az storage account update -n {sa} -g {rg} --sku {GZRS}', checks=[
self.check('sku.name', '{GZRS}'),
self.check('name', '{sa}'),
])
self.cmd('az storage account show -n {sa} -g {rg}', checks=[
self.check('sku.name', '{GZRS}'),
self.check('name', '{sa}')
])
self.cmd('az storage account delete -n {gzrs_sa} -g {rg} -y')
class StorageAccountFailoverScenarioTest(ScenarioTest):
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(name_prefix='clistorage', location='westus2')
def test_storage_account_failover(self, resource_group):
self.kwargs = {
'sa': self.create_random_name(prefix="storagegrzs", length=24),
'rg': resource_group
}
self.cmd('storage account create -n {sa} -g {rg} -l eastus2euap --kind StorageV2 --sku Standard_RAGRS --https-only',
checks=[self.check('name', '{sa}'),
self.check('sku.name', 'Standard_RAGRS')])
while True:
can_failover = self.cmd('storage account show -n {sa} -g {rg} --expand geoReplicationStats --query '
'geoReplicationStats.canFailover -o tsv').output.strip('\n')
if can_failover == 'true':
break
time.sleep(10)
self.cmd('storage account show -n {sa} -g {rg} --expand geoReplicationStats', checks=[
self.check('geoReplicationStats.canFailover', True),
self.check('failoverInProgress', None)
])
time.sleep(900)
self.cmd('storage account failover -n {sa} -g {rg} --no-wait -y')
self.cmd('storage account show -n {sa} -g {rg} --expand geoReplicationStats', checks=[
self.check('name', '{sa}'),
self.check('failoverInProgress', True)
])
class StorageAccountLocalContextScenarioTest(LocalContextScenarioTest):
@ResourceGroupPreparer(name_prefix='clistorage', location='westus2')
def test_storage_account_local_context(self):
self.kwargs.update({
'account_name': self.create_random_name(prefix='cli', length=24)
})
self.cmd('storage account create -g {rg} -n {account_name} --https-only',
checks=[self.check('name', self.kwargs['account_name'])])
self.cmd('storage account show',
checks=[self.check('name', self.kwargs['account_name'])])
with self.assertRaises(CLIError):
self.cmd('storage account delete')
self.cmd('storage account delete -n {account_name} -y')
class StorageAccountORScenarioTest(StorageScenarioMixin, ScenarioTest):
@AllowLargeResponse()
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix='cli_test_storage_account_ors', location='eastus2')
@StorageAccountPreparer(parameter_name='source_account', location='eastus2', kind='StorageV2')
@StorageAccountPreparer(parameter_name='destination_account', location='eastus2', kind='StorageV2')
@StorageAccountPreparer(parameter_name='new_account', location='eastus2', kind='StorageV2')
def test_storage_account_or_policy(self, resource_group, source_account, destination_account, new_account):
src_account_info = self.get_account_info(resource_group, source_account)
src_container = self.create_container(src_account_info)
dest_account_info = self.get_account_info(resource_group, destination_account)
dest_container = self.create_container(dest_account_info)
self.kwargs.update({
'rg': resource_group,
'src_sc': source_account,
'dest_sc': destination_account,
'new_sc': new_account,
'scont': src_container,
'dcont': dest_container,
})
# Enable ChangeFeed for Source Storage Accounts
self.cmd('storage account blob-service-properties update -n {src_sc} -g {rg} --enable-change-feed', checks=[
JMESPathCheck('changeFeed.enabled', True)])
# Enable Versioning for two Storage Accounts
self.cmd('storage account blob-service-properties update -n {src_sc} -g {rg} --enable-versioning', checks=[
JMESPathCheck('isVersioningEnabled', True)])
self.cmd('storage account blob-service-properties update -n {dest_sc} -g {rg} --enable-versioning', checks=[
JMESPathCheck('isVersioningEnabled', True)])
# Create ORS policy on destination account
result = self.cmd('storage account or-policy create -n {dest_sc} -s {src_sc} --dcont {dcont} '
'--scont {scont} -t "2020-02-19T16:05:00Z"').get_output_in_json()
self.assertIn('policyId', result)
self.assertIn('ruleId', result['rules'][0])
self.assertEqual(result["rules"][0]["filters"]["minCreationTime"], "2020-02-19T16:05:00Z")
self.kwargs.update({
'policy_id': result["policyId"],
'rule_id': result["rules"][0]["ruleId"]
})
# Get policy properties from destination account
self.cmd('storage account or-policy show -g {rg} -n {dest_sc} --policy-id {policy_id}') \
.assert_with_checks(JMESPathCheck('type', "Microsoft.Storage/storageAccounts/objectReplicationPolicies")) \
.assert_with_checks(JMESPathCheck('sourceAccount', source_account)) \
.assert_with_checks(JMESPathCheck('destinationAccount', destination_account)) \
.assert_with_checks(JMESPathCheck('rules[0].sourceContainer', src_container)) \
.assert_with_checks(JMESPathCheck('rules[0].destinationContainer', dest_container))
# Add rules
src_container1 = self.create_container(src_account_info)
dest_container1 = self.create_container(dest_account_info)
self.cmd('storage account or-policy rule list -g {rg} -n {dest_sc} --policy-id {policy_id}')\
.assert_with_checks(JMESPathCheck('length(@)', 1))
self.cmd('storage account or-policy rule show -g {rg} -n {dest_sc} --rule-id {rule_id} --policy-id {policy_id}')\
.assert_with_checks(JMESPathCheck('ruleId', result["rules"][0]["ruleId"])) \
.assert_with_checks(JMESPathCheck('sourceContainer', src_container)) \
.assert_with_checks(JMESPathCheck('destinationContainer', dest_container))
result = self.cmd('storage account or-policy rule add -g {} -n {} --policy-id {} -d {} -s {} -t "2020-02-19T16:05:00Z"'.format(
resource_group, destination_account, self.kwargs["policy_id"], dest_container1, src_container1)).get_output_in_json()
self.assertEqual(result["rules"][0]["filters"]["minCreationTime"], "2020-02-19T16:05:00Z")
self.cmd('storage account or-policy rule list -g {rg} -n {dest_sc} --policy-id {policy_id}')\
.assert_with_checks(JMESPathCheck('length(@)', 2))
# Update rules
self.cmd('storage account or-policy rule update -g {} -n {} --policy-id {} --rule-id {} --prefix-match blobA blobB -t "2020-02-20T16:05:00Z"'.format(
resource_group, destination_account, result['policyId'], result['rules'][1]['ruleId'])) \
.assert_with_checks(JMESPathCheck('filters.prefixMatch[0]', 'blobA')) \
.assert_with_checks(JMESPathCheck('filters.prefixMatch[1]', 'blobB')) \
.assert_with_checks(JMESPathCheck('filters.minCreationTime', '2020-02-20T16:05:00Z'))
self.cmd('storage account or-policy rule show -g {} -n {} --policy-id {} --rule-id {}'.format(
resource_group, destination_account, result['policyId'], result['rules'][1]['ruleId'])) \
.assert_with_checks(JMESPathCheck('filters.prefixMatch[0]', 'blobA')) \
.assert_with_checks(JMESPathCheck('filters.prefixMatch[1]', 'blobB')) \
.assert_with_checks(JMESPathCheck('filters.minCreationTime', '2020-02-20T16:05:00Z'))
# Remove rules
self.cmd('storage account or-policy rule remove -g {} -n {} --policy-id {} --rule-id {}'.format(
resource_group, destination_account, result['policyId'], result['rules'][1]['ruleId']))
self.cmd('storage account or-policy rule list -g {rg} -n {dest_sc} --policy-id {policy_id}') \
.assert_with_checks(JMESPathCheck('length(@)', 1))
# Set ORS policy to source account
with self.assertRaisesRegex(CLIError, 'ValueError: Please specify --policy-id with auto-generated policy id'):
self.cmd('storage account or-policy create -g {rg} -n {src_sc} -d {dest_sc} -s {src_sc} --dcont {dcont} --scont {scont}')
import json
temp_dir = self.create_temp_dir()
policy_file = os.path.join(temp_dir, "policy.json")
with open(policy_file, "w") as f:
policy = self.cmd('storage account or-policy show -g {rg} -n {dest_sc} --policy-id {policy_id}')\
.get_output_in_json()
json.dump(policy, f)
self.kwargs['policy'] = policy_file
self.cmd('storage account or-policy create -g {rg} -n {src_sc} -p @"{policy}"')\
.assert_with_checks(JMESPathCheck('type', "Microsoft.Storage/storageAccounts/objectReplicationPolicies")) \
.assert_with_checks(JMESPathCheck('sourceAccount', source_account)) \
.assert_with_checks(JMESPathCheck('destinationAccount', destination_account)) \
.assert_with_checks(JMESPathCheck('rules[0].sourceContainer', src_container)) \
.assert_with_checks(JMESPathCheck('rules[0].destinationContainer', dest_container)) \
.assert_with_checks(JMESPathCheck('rules[0].filters.minCreationTime', '2020-02-19T16:05:00Z'))
# Update ORS policy
self.cmd('storage account or-policy update -g {} -n {} --policy-id {} --source-account {}'.format(
resource_group, destination_account, self.kwargs["policy_id"], new_account)) \
.assert_with_checks(JMESPathCheck('sourceAccount', new_account))
# Delete policy from destination and source account
self.cmd('storage account or-policy delete -g {rg} -n {src_sc} --policy-id {policy_id}')
self.cmd('storage account or-policy list -g {rg} -n {src_sc}') \
.assert_with_checks(JMESPathCheck('length(@)', 0))
self.cmd('storage account or-policy delete -g {rg} -n {dest_sc} --policy-id {policy_id}')
self.cmd('storage account or-policy list -g {rg} -n {dest_sc}') \
.assert_with_checks(JMESPathCheck('length(@)', 0))
|
mit
| 306,901,749,235,503,800 | 58.635177 | 180 | 0.645382 | false | 3.887354 | true | false | false |
sekikn/ambari
|
ambari-agent/src/test/python/ambari_agent/TestCommandHooksOrchestrator.py
|
2
|
2534
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from unittest import TestCase
from ambari_agent.models.hooks import HookPrefix
from mock.mock import patch
from ambari_agent.CommandHooksOrchestrator import HookSequenceBuilder, ResolvedHooks, HooksOrchestrator
class TestCommandHooksOrchestrator(TestCase):
def setUp(self):
def injector():
pass
def file_cache():
pass
file_cache.__setattr__("get_hook_base_dir", lambda x: os.path.join("tmp"))
injector.__setattr__("file_cache", file_cache)
self._orchestrator = HooksOrchestrator(injector)
@patch("os.path.isfile")
def test_check_orchestrator(self, is_file_mock):
is_file_mock.return_value = True
ret = self._orchestrator.resolve_hooks({
"commandType": "EXECUTION_COMMAND",
"serviceName": "ZOOKEEPER",
"role": "ZOOKEEPER_SERVER"
}, "START")
self.assertTrue(ret)
self.assertEquals(len(ret.post_hooks), 3)
self.assertEquals(len(ret.pre_hooks), 3)
def test_hook_seq_builder(self):
seq = list(HookSequenceBuilder().build(HookPrefix.pre, "cmd", "srv", "role"))
seq_rev = list(HookSequenceBuilder().build(HookPrefix.post, "cmd", "srv", "role"))
# testing base default sequence definition
check_list = [
"before-cmd",
"before-cmd-srv",
"before-cmd-srv-role"
]
check_list_1 = [
"after-cmd-srv-role",
"after-cmd-srv",
"after-cmd"
]
self.assertEquals(seq, check_list)
self.assertEquals(seq_rev, check_list_1)
def test_hook_resolved(self):
def pre():
for i in range(1, 5):
yield i
def post():
for i in range(1, 3):
yield i
ret = ResolvedHooks(pre(), post())
self.assertEqual(ret.pre_hooks, list(pre()))
self.assertEqual(ret.post_hooks, list(post()))
|
apache-2.0
| 833,573,844,282,085,100 | 27.47191 | 103 | 0.693765 | false | 3.604552 | true | false | false |
theDrake/python-experiments
|
YouFace/page.py
|
1
|
5613
|
from html import *
TITLE = 'YouFace'
SUBTITLE = "A billion dollars and it's yours!"
STYLESHEET = '/youface.css'
LINKLIST = [('http://cit.cs.dixie.edu/cs/1410/', 'CS 1410'), \
('http://new.dixie.edu/reg/syllabus/', 'College calendar'),]
class Form(BlockContainer):
def __init__(self, action):
BlockContainer.__init__(self, 'form')
self.addAttr('method', 'post')
self.addAttr('action', action)
class Label(Container):
def __init__(self, forAttr):
Container.__init__(self, 'label')
self.addAttr('for', forAttr)
class Input(Tag):
def __init__(self, inputType, name, value=None):
Tag.__init__(self, 'input')
self.addAttr('type', inputType)
self.addAttr('name', name)
if value:
self.addAttr('value', value)
class Box(Div):
def __init__(self, title):
Div.__init__(self)
self.addClass('box')
titleTag = H(1)
titleTag.addText(title)
self.addTag(titleTag)
class StatusBox(Box):
def __init__(self, userName):
Box.__init__(self, 'Welcome, ' + userName)
p1 = P()
p1.addTag(Label('status').addText('Change your status:'))
p1.addTag(Input('text', 'status'))
p2 = P()
p2.addTag(Input('submit', 'change', 'Change'))
self.addTag(Form('/status').addTag(p1).addTag(p2))
class RecentActivityBox(Box):
def __init__(self, activities):
Box.__init__(self, 'Recent status updates')
activityList = Ul()
for a in activities:
activityList.addTag(Li().addText(str(a)))
self.addTag(activityList)
class UnFriendBox(Box):
def __init__(self, friendName):
Box.__init__(self, 'You are currently friends with ' + friendName)
f = Form('/unfriend')
f.addTag(Input('hidden', 'name', friendName))
p = P()
p.addTag(Input('submit', 'unfriend', 'Unfriend'))
f.addTag(p)
self.addTag(P().addTag(f))
class LoginBox(Box):
def __init__(self):
Box.__init__(self, 'Login')
p1 = P()
p1.addTag(Label('name').addText('Name:'))
p1.addTag(Input('text', 'name'))
p2 = P()
p2.addTag(Label('password').addText('Password:'))
p2.addTag(Input('password', 'password'))
p3 = P()
p3.addTag(Input('submit', 'type', 'Login'))
p3.addTag(Input('submit', 'type', 'Create'))
p3.addTag(Input('submit', 'type', 'Delete'))
self.addTag(Form('/login').addTag(p1).addTag(p2).addTag(p3))
class Gadget(Div):
def __init__(self, title):
Div.__init__(self)
self.addClass('gadget')
self.addTag(H(1).addText(title))
class LinksGadget(Gadget):
def __init__(self, links=LINKLIST):
Gadget.__init__(self, 'Links')
linkList = Ul()
for link in links:
linkList.addTag(Li().addTag(A(link[0]).addText(str(link[1]))))
self.addTag(linkList)
class FriendsGadget(Gadget):
def __init__(self, friends):
Gadget.__init__(self, 'Friends')
friendList = Ul()
for name in friends:
listItem = Li().addTag(A('/friend/' + name).addText(name))
friendList.addTag(listItem)
self.addTag(friendList)
p = P()
p.addTag(Input('text', 'name'))
p.addTag(Input('submit', 'addfriend', 'Add Friend'))
self.addTag(Form('/addfriend').addTag(p))
class LogoutGadget(Gadget):
def __init__(self):
Gadget.__init__(self, 'Logout')
p = P().addTag(Input('submit', 'logout', 'Logout'))
self.addTag(Form('/logout').addTag(p))
class Page:
def __init__(self):
self.boxList = []
self.gadgetList = []
self.head = Head().addTag(Meta()).addTag(Title().addText(TITLE))
self.head.addTag(Stylesheet(STYLESHEET))
self.header = Div().setId('header')
self.header.addTag(H(1).addTag(A('/').addText(TITLE)))
self.header.addTag(H(2).addText(SUBTITLE))
def addBox(self, box):
self.boxList.append(box)
return self
def addGadget(self, gadget):
self.gadgetList.append(gadget)
return self
def __str__(self):
mainColumn = Div().setId('maincolumn')
for b in self.boxList:
mainColumn.addTag(b)
sidebar = Div().setId('sidebar')
for g in self.gadgetList:
sidebar.addTag(g)
mainContainer = Div().setId('maincontainer').addTag(self.header)
mainContainer.addTag(mainColumn).addTag(sidebar)
body = Body().addTag(mainContainer)
html = Html().addTag(self.head).addTag(body)
return str(html)
def __repr__(self):
return self.__str__()
class LoginPage(Page):
def __init__(self, linkList=LINKLIST):
Page.__init__(self)
self.addBox(LoginBox()).addGadget(LinksGadget(linkList))
class UserPage(Page):
def __init__(self, friends, linkList=LINKLIST):
Page.__init__(self)
self.addGadget(LogoutGadget()).addGadget(FriendsGadget(friends))
self.addGadget(LinksGadget(linkList))
class FeedPage(UserPage):
def __init__(self, name, recentStatusUpdates, friends):
UserPage.__init__(self, friends)
self.addBox(StatusBox(name))
self.addBox(RecentActivityBox(recentStatusUpdates))
class FriendPage(UserPage):
def __init__(self, name, recentStatusUpdates, friends):
UserPage.__init__(self, friends)
self.addBox(UnFriendBox(name))
self.addBox(RecentActivityBox(recentStatusUpdates))
def main():
print 'page.py'
if __name__ == '__main__':
main()
|
mit
| -4,225,685,870,735,543,000 | 31.445087 | 74 | 0.582576 | false | 3.319338 | false | false | false |
ProfHoekstra/bluesky
|
bluesky/traffic/windfield.py
|
1
|
9404
|
""" Wind implementation for BlueSky."""
from numpy import array, sin, cos, arange, radians, ones, append, ndarray, \
amin, minimum, repeat, delete, zeros, around, maximum, floor, \
interp, pi
from bluesky.tools.aero import ft
class Windfield():
""" Windfield class:
Methods:
clear() = clear windfield, no wind vectors defined
addpoint(lat,lon,winddir,winddspd,windalt=None)
= add a wind vector to a position,
windvector can be arrays for altitudes (optional)
returns index of vector (0,1,2,3,..)
all units are SI units, angles in degrees
get(lat,lon,alt=0)
= get wind vector for given position and optional
altitude, all can be arrays,
vnorth and veast will be returned in same dimension
remove(idx) = remove a defined profile using the index
Members:
lat(nvec) = latitudes of wind definitions
lon(nvec) = longitudes of wind definitions
altaxis(nalt) = altitude axis (fixed, 250 m resolution)
vnorth(nalt,nvec) = wind north component [m/s]
veast(nalt,nvec) = wind east component [m/s]
winddim = Windfield dimension, will automatically be detected:
0 = no wind
1 = constant wind
2 = 2D field (no alt profiles),
3 = 3D field (alt dependent wind at some points)
"""
def __init__(self):
# For altitude use fixed axis to allow vectorisation later
self.altmax = 45000. * ft # [m]
self.altstep = 100. * ft # [m]
# Axis
self.altaxis = arange(0., self.altmax + self.altstep, self.altstep)
self.idxalt = arange(0, len(self.altaxis), 1.)
self.nalt = len(self.altaxis)
# List of indices of points with an altitude profile (for 3D check)
self.iprof = []
# Clear actual field
self.clear()
return
def clear(self): #Clear actual field
# Windfield dimension will automatically be detected:
# 0 = no wind, 1 = constant wind, 2 = 2D field (no alt profiles),
# 3 = 3D field (alt matters), used to speed up interpolation
self.winddim = 0
self.lat = array([])
self.lon = array([])
self.vnorth = array([[]])
self.veast = array([[]])
self.nvec = 0
return
def addpoint(self,lat,lon,winddir,windspd,windalt=None):
""" addpoint: adds a lat,lon position with a wind direction [deg]
and wind speedd [m/s]
Optionally an array with altitudes can be used in which case windspd
and wind speed need to have the same dimension
"""
# If scalar, copy into table for altitude axis
if not(type(windalt) in [ndarray,list]) and windalt == None: # scalar to array
prof3D = False # no wind profile, just one value
wspd = ones(self.nalt)*windspd
wdir = ones(self.nalt)*winddir
vnaxis = wspd*cos(radians(wdir)+pi)
veaxis = wspd*sin(radians(wdir)+pi)
# if list or array, convert to alt axis of wind field
else:
prof3D = True # switch on 3D parameter as an altitude array is given
wspd = array(windspd)
wdir = array(winddir)
altvn = wspd*cos(radians(wdir)+pi)
altve = wspd*sin(radians(wdir)+pi)
alttab = windalt
vnaxis = interp(self.altaxis, alttab, altvn)
veaxis = interp(self.altaxis, alttab, altve)
# print array([vnaxis]).transpose()
self.lat = append(self.lat,lat)
self.lon = append(self.lon,lon)
idx = len(self.lat)-1
if self.nvec==0:
self.vnorth = array([vnaxis]).transpose()
self.veast = array([veaxis]).transpose()
else:
self.vnorth = append(self.vnorth,array([vnaxis]).transpose(),axis=1)
self.veast = append(self.veast, array([veaxis]).transpose(),axis=1)
if self.winddim<3: # No 3D => set dim to 0,1 or 2 dep on nr of points
self.winddim = min(2,len(self.lat))
if prof3D:
self.winddim = 3
self.iprof.append(idx)
self.nvec = self.nvec+1
return idx # return index of added point
def getdata(self,userlat,userlon,useralt=0.0): # in case no altitude specified and field is 3D, use sea level wind
eps = 1e-20 # [m2] to avoid divison by zero for using exact same points
swvector = (type(userlat)==list or type(userlat)==ndarray)
if swvector:
npos = len(userlat)
else:
npos = 1
# Convert user input to right shape: columns for positions
lat = array(userlat).reshape((1,npos))
lon = array(userlon).reshape((1,npos))
# Make altitude into an array, with zero or float value broadcast over npos
if type(useralt)==ndarray:
alt = useralt
elif type(useralt)==list:
alt = array(useralt)
elif type(useralt)==float:
alt = useralt*ones(npos)
else:
alt = zeros(npos)
# Check dimension of wind field
if self.winddim == 0: # None = no wind
vnorth = zeros(npos)
veast = zeros(npos)
elif self.winddim == 1: # Constant = one point defined, so constant wind
vnorth = ones(npos)*self.vnorth[0,0]
veast = ones(npos)*self.veast[0,0]
elif self.winddim >= 2: # 2D/3D field = more points defined but no altitude profile
#---- Get horizontal weight factors
# Average cosine for flat-eartyh approximation
cavelat = cos(radians(0.5*(lat+array([self.lat]).transpose())))
# Lat and lon distance in 60 nm units (1 lat degree)
dy = lat - array([self.lat]).transpose() #(nvec,npos)
dx = cavelat*(lon - array([self.lon]).transpose())
# Calulate invesre distance squared
invd2 = 1./(eps+dx*dx+dy*dy) # inverse of distance squared
# Normalize weights
sumsid2 = ones((1,self.nvec)).dot(invd2) # totals to normalize weights
totals = repeat(sumsid2,self.nvec,axis=0) # scale up dims to (nvec,npos)
horfact = invd2/totals # rows x col = nvec x npos, weight factors
#---- Altitude interpolation
# No altitude profiles used: do 2D planar interpolation only
if self.winddim == 2 or ((type(useralt) not in (list,ndarray)) and useralt==0.0): # 2D field no altitude interpolation
vnorth = self.vnorth[0,:].dot(horfact)
veast = self.veast[0,:].dot(horfact)
# 3D interpolation as one or more points contain altitude profile
else:
# Get altitude index as float for alt interpolation
idxalt = maximum(0., minimum(self.altaxis[-1]-eps, alt) / self.altstep) # find right index
# Convert to index and factor
ialt = floor(idxalt).astype(int) # index array for lower altitude
falt = idxalt-ialt # factor for upper value
# Altitude interpolation combined with horizontal
nvec = len(self.lon) # Get number of definition points
# North wind (y-direction ot lat direction)
vn0 = (self.vnorth[ialt,:]*horfact.T).dot(ones((nvec,1))) # hor interpolate lower alt (npos x)
vn1 = (self.vnorth[ialt+1,:]*horfact.T).dot(ones((nvec,1))) # hor interpolate lower alts (npos x)
vnorth = (1.-falt)*(vn0.reshape(npos)) + falt*(vn1.reshape(npos)) # As 1D array
# East wind (x-direction or lon direction)
ve0 = (self.veast[ialt,:]*horfact.T).dot(ones((nvec,1)))
ve1 = (self.veast[ialt+1,:]*horfact.T).dot(ones((nvec,1)))
veast = (1.-falt)*(ve0.reshape(npos)) + falt*(ve1.reshape(npos)) # As 1D array
# Return same type as positons were given
if type(userlat)==ndarray:
return vnorth,veast
elif type(userlat)==list:
return list(vnorth),list(veast)
else:
return float(vnorth),float(veast)
def remove(self,idx): # remove a point using the returned index when it was added
if idx<len(self.lat):
self.lat = delete(self.lat,idx)
self.lon = delete(self.lat,idx)
self.vnorth = delete(self.vnorth,idx,axis=1)
self.veast = delete(self.veast ,idx,axis=1)
if idx in self.iprof:
self.iprof.remove(idx)
if self.winddim<3 or len(self.iprof)==0 or len(self.lat)==0:
self.winddim = min(2,len(self.lat)) # Check for 0, 1D, 2D or 3D
return
|
gpl-3.0
| -166,896,459,923,706,270 | 39.245614 | 130 | 0.542003 | false | 3.82899 | false | false | false |
mdworks2016/work_development
|
Python/20_Third_Certification/venv/lib/python3.7/site-packages/celery/contrib/sphinx.py
|
1
|
3587
|
# -*- coding: utf-8 -*-
"""Sphinx documentation plugin used to document tasks.
Introduction
============
Usage
-----
Add the extension to your :file:`docs/conf.py` configuration module:
.. code-block:: python
extensions = (...,
'celery.contrib.sphinx')
If you'd like to change the prefix for tasks in reference documentation
then you can change the ``celery_task_prefix`` configuration value:
.. code-block:: python
celery_task_prefix = '(task)' # < default
With the extension installed `autodoc` will automatically find
task decorated objects (e.g. when using the automodule directive)
and generate the correct (as well as add a ``(task)`` prefix),
and you can also refer to the tasks using `:task:proj.tasks.add`
syntax.
Use ``.. autotask::`` to alternatively manually document a task.
"""
from __future__ import absolute_import, unicode_literals
from sphinx.domains.python import PyModulelevel
from sphinx.ext.autodoc import FunctionDocumenter
from celery.app.task import BaseTask
try: # pragma: no cover
from inspect import formatargspec, getfullargspec
except ImportError: # Py2
from inspect import formatargspec, getargspec as getfullargspec # noqa
class TaskDocumenter(FunctionDocumenter):
"""Document task definitions."""
objtype = 'task'
member_order = 11
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, BaseTask) and getattr(member, '__wrapped__')
def format_args(self):
wrapped = getattr(self.object, '__wrapped__', None)
if wrapped is not None:
argspec = getfullargspec(wrapped)
if argspec[0] and argspec[0][0] in ('cls', 'self'):
del argspec[0][0]
fmt = formatargspec(*argspec)
fmt = fmt.replace('\\', '\\\\')
return fmt
return ''
def document_members(self, all_members=False):
pass
def check_module(self):
# Normally checks if *self.object* is really defined in the module
# given by *self.modname*. But since functions decorated with the @task
# decorator are instances living in the celery.local, we have to check
# the wrapped function instead.
wrapped = getattr(self.object, '__wrapped__', None)
if wrapped and getattr(wrapped, '__module__') == self.modname:
return True
return super(TaskDocumenter, self).check_module()
class TaskDirective(PyModulelevel):
"""Sphinx task directive."""
def get_signature_prefix(self, sig):
return self.env.config.celery_task_prefix
def autodoc_skip_member_handler(app, what, name, obj, skip, options):
"""Handler for autodoc-skip-member event."""
# Celery tasks created with the @task decorator have the property
# that *obj.__doc__* and *obj.__class__.__doc__* are equal, which
# trips up the logic in sphinx.ext.autodoc that is supposed to
# suppress repetition of class documentation in an instance of the
# class. This overrides that behavior.
if isinstance(obj, BaseTask) and getattr(obj, '__wrapped__'):
if skip:
return False
return None
def setup(app):
"""Setup Sphinx extension."""
app.setup_extension('sphinx.ext.autodoc')
app.add_autodocumenter(TaskDocumenter)
app.add_directive_to_domain('py', 'task', TaskDirective)
app.add_config_value('celery_task_prefix', '(task)', True)
app.connect('autodoc-skip-member', autodoc_skip_member_handler)
return {
'parallel_read_safe': True
}
|
apache-2.0
| 4,783,719,491,929,786,000 | 31.609091 | 79 | 0.666574 | false | 4.057692 | false | false | false |
guildai/guild
|
guild/serving_util.py
|
1
|
3478
|
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import json
import logging
import socket
from werkzeug import routing
from werkzeug import serving
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.wrappers import Request, Response
log = logging.getLogger("guild")
class QuietRequestHandler(serving.WSGIRequestHandler):
def log(self, type, message, *args):
if type != 'info':
super(QuietRequestHandler, self).log(type, message, *args)
class StaticBase(object):
def __init__(self, exports):
self._app = SharedDataMiddleware(self._not_found, exports)
def handle(self, _req):
return self._app
@staticmethod
def _not_found(_env, _start_resp):
raise NotFound()
class StaticDir(StaticBase):
def __init__(self, dir):
super(StaticDir, self).__init__({"/": dir})
def handle_index(self, _req):
def app(env, start_resp):
env["PATH_INFO"] = "/index.html"
return self._app(env, start_resp)
return app
def make_server(host, port, app, logging=True):
if host is None:
raise RuntimeError("host cannot be None")
if port is None:
raise RuntimeError("port cannot be None")
if logging:
request_handler = serving.WSGIRequestHandler
else:
request_handler = QuietRequestHandler
try:
return serving.make_server(
host, port, app, threaded=True, request_handler=request_handler
)
except socket.error as e:
if host:
raise
log.debug(
"error starting server on %s:%s (%s), " "trying IPv6 default host '::'",
host,
port,
e,
)
return serving.make_server("::", port, app, threaded=True)
def json_resp(data, status=200):
return Response(
json.dumps(data),
status=status,
content_type="application/json",
headers=[("Access-Control-Allow-Origin", "*")],
)
def Rule(path, handler, *args):
return routing.Rule(path, endpoint=(handler, args))
def Map(rules):
return routing.Map([Rule(path, handler, *args) for path, handler, args, in rules])
def dispatch(routes, env, start_resp):
urls = routes.bind_to_environ(env)
try:
(handler, args), kw = urls.match()
except HTTPException as e:
return e(env, start_resp)
else:
args = (Request(env),) + args
kw = _del_underscore_vars(kw)
try:
return handler(*args, **kw)(env, start_resp)
except HTTPException as e:
return e(env, start_resp)
def _del_underscore_vars(kw):
return {k: kw[k] for k in kw if k[0] != "_"}
def App(routes):
def app(env, start_resp):
return dispatch(routes, env, start_resp)
return app
|
apache-2.0
| 3,184,719,521,864,172,500 | 26.603175 | 86 | 0.642036 | false | 3.907865 | false | false | false |
omelkonian/cds
|
cds/modules/webhooks/views.py
|
1
|
3801
|
# -*- coding: utf-8 -*-
#
# This file is part of CERN Document Server.
# Copyright (C) 2016, 2017 CERN.
#
# CERN Document Server is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Document Server is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Document Server; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Task status manipulation."""
from __future__ import absolute_import
from invenio_db import db
from flask.views import MethodView
from invenio_webhooks.views import blueprint, error_handler
from invenio_oauth2server import require_api_auth, require_oauth_scopes
from invenio_webhooks.views import ReceiverEventResource
from .receivers import build_task_payload
from .status import iterate_result, collect_info, ResultEncoder
class TaskResource(MethodView):
"""Task Endpoint."""
@require_api_auth()
@require_oauth_scopes('webhooks:event')
@error_handler
def put(self, receiver_id, event_id, task_id):
"""Handle PUT request: restart a task."""
event = ReceiverEventResource._get_event(receiver_id, event_id)
payload = build_task_payload(event, task_id)
if payload:
event.receiver.rerun_task(**payload)
db.session.commit()
return '', 204
return '', 400
@require_api_auth()
@require_oauth_scopes('webhooks:event')
@error_handler
def delete(self, receiver_id, event_id, task_id):
"""Handle DELETE request: stop and clean a task."""
event = ReceiverEventResource._get_event(receiver_id, event_id)
payload = build_task_payload(event, task_id)
if payload:
event.receiver.clean_task(**payload)
db.session.commit()
return '', 204
return '', 400
class EventFeedbackResource(MethodView):
"""Event informations."""
@require_api_auth()
@require_oauth_scopes('webhooks:event')
@error_handler
def get(self, receiver_id, event_id):
"""Handle GET request: get more informations."""
event = ReceiverEventResource._get_event(receiver_id, event_id)
raw_info = event.receiver._raw_info(event=event)
def collect(task_name, result):
if isinstance(result.info, Exception):
(args,) = result.info.args
return {
'id': result.id,
'status': result.status,
'info': args,
'name': task_name
}
else:
return collect_info(task_name, result)
result = iterate_result(
raw_info=raw_info, fun=collect)
return ResultEncoder().encode(result), 200
task_item = TaskResource.as_view('task_item')
event_feedback_item = EventFeedbackResource.as_view('event_feedback_item')
blueprint.add_url_rule(
'/hooks/receivers/<string:receiver_id>/events/<string:event_id>'
'/tasks/<string:task_id>',
view_func=task_item,
)
blueprint.add_url_rule(
'/hooks/receivers/<string:receiver_id>/events/<string:event_id>/feedback',
view_func=event_feedback_item,
)
|
gpl-2.0
| -581,411,640,366,413,000 | 33.87156 | 78 | 0.665614 | false | 3.963504 | false | false | false |
mrakitin/sirepo
|
sirepo/template/elegant_command_parser.py
|
1
|
4455
|
# -*- coding: utf-8 -*-
u"""elegant command parser.
:copyright: Copyright (c) 2016 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
from sirepo.template.line_parser import LineParser
import re
_SKIP_COMMANDS = ['subprocess']
def parse_file(command_text):
parser = LineParser(0)
lines = command_text.replace('\r', '').split('\n')
prev_line = ''
commands = []
for line in lines:
parser.increment_line_number()
if re.search(r'^#', line):
continue
line = re.sub(r'\!.*$', '', line)
if not line:
continue
if re.search(r'\&end', line):
if not _parse_line(parser, prev_line + ' ' + line, commands):
break
prev_line = ''
elif re.search(r'\&', line) or len(prev_line):
prev_line += ' ' + line
else:
# ignoring lines between command markers
pass
if prev_line and re.search(r'\&', prev_line):
parser.raise_error('missing &end for command: {}'.format(prev_line))
_update_lattice_names(commands)
return commands
def _parse_array_value(parser):
# read off the end of the array value list
# parse values until a "&end" or "value =" is reached
#
# response[2] = %s.vhrm, %s.hvrm,
# distribution_type[0] = "gaussian", "gaussian",
# enforce_rms_values[0] = 1,1,1,
# distribution_type[0] = gaussian, gaussian, hard-edge,
# distribution_type[0] = 3*"gaussian",
# distribution_cutoff[0] = 3*3,
res = ''
index = parser.get_index()
while True:
value = parser.parse_value()
if value == '&end':
parser.reset_index(index)
break
parser.ignore_whitespace()
if parser.peek_char() == '=':
parser.reset_index(index)
break
if value:
res += value
else:
if parser.peek_char() == ',':
parser.assert_char(',')
res += ','
elif parser.peek_char() == '*':
parser.assert_char('*')
res += '*'
else:
parser.raise_error('expecting an array value')
index = parser.get_index()
if not res:
parser.raise_error('missing array value')
res = re.sub(r',$', '', res)
return res
def _parse_line(parser, line, commands):
parser.set_line(line)
parser.ignore_whitespace()
parser.assert_char('&')
command = PKDict(
_id=parser.next_id(),
_type=parser.parse_value(r'\s+'),
)
if command['_type'] == 'stop':
return False
parser.ignore_whitespace()
while True:
value = parser.parse_value()
if not value:
if parser.peek_char() == ',':
parser.assert_char(',')
continue
parser.raise_error('expecting a command element')
if value == '&end':
break
if parser.peek_char() == '=':
parser.assert_char('=')
if re.search(r'\[', value):
command[value] = _parse_array_value(parser)
else:
command[value] = parser.parse_value(r'[\s,=\!)]')
else:
parser.raise_error('trailing input: {}'.format(value))
parser.assert_end_of_line()
if not command['_type'] in _SKIP_COMMANDS:
commands.append(command)
return True
def _update_lattice_names(commands):
# preserve the name of the first run_setup.lattic
# others may map to previous save_lattice names
is_first_run_setup = True
save_lattices = []
for cmd in commands:
if cmd['_type'] == 'save_lattice':
name = re.sub(r'\%s', '', cmd['filename'])
save_lattices.append(name)
if cmd['_type'] == 'run_setup':
if is_first_run_setup:
is_first_run_setup = False
continue
for index in reversed(range(len(save_lattices))):
if re.search(re.escape(save_lattices[index]), cmd['lattice'], re.IGNORECASE):
cmd['lattice'] = 'save_lattice' if index == 0 else 'save_lattice{}'.format(index + 1)
break
else:
cmd['lattice'] = 'Lattice'
|
apache-2.0
| -7,543,986,354,780,968,000 | 31.757353 | 105 | 0.54119 | false | 3.843831 | false | false | false |
rohithkodali/numword
|
numword/numword_en_gb.py
|
1
|
1036
|
#This file is part of numword. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
'''
numword for EN_GB
'''
from .numword_en import NumWordEN
class NumWordENGB(NumWordEN):
'''
NumWord EN_GB
'''
def currency(self, val, longval=True):
'''
Convert to currency
'''
return self._split(val, hightxt=u"pound/s", lowtxt=u"pence",
jointxt=u"and", longval=longval)
_NW = NumWordENGB()
def cardinal(value):
'''
Convert to cardinal
'''
return _NW.cardinal(value)
def ordinal(value):
'''
Convert to ordinal
'''
return _NW.ordinal(value)
def ordinal_number(value):
'''
Convert to ordinal number
'''
return _NW.ordinal_number(value)
def currency(value, longval=True):
'''
Convert to currency
'''
return _NW.currency(value, longval=longval)
def year(value, longval=True):
'''
Convert to year
'''
return _NW.year(value, longval=longval)
|
lgpl-2.1
| 786,346,942,397,314,000 | 17.175439 | 71 | 0.617761 | false | 3.453333 | false | false | false |
JensRantil/http-trigger-trigger
|
release.py
|
1
|
1521
|
"""Script for creating releases."""
import os
import sys
import shutil
if len(sys.argv) != 2:
print "Usage: ./" + sys.argv[0] + " <tag/version>"
sys.exit(1)
version = sys.argv[1]
if 'GOPATH' not in os.environ:
print "GOPATH not set."
sys.exit(1)
VARIANTS = [('linux', ['386', 'amd64', 'arm']),
('darwin', ['amd64', '386'])]
releasepath = 'releases'
for opsystem, variants in VARIANTS:
for variant in variants:
variantdir = "http-trigger-trigger-{0}-{1}".format(opsystem, variant)
print "Building release for {0}...".format(variantdir)
variantpath = os.path.join(releasepath, variantdir)
os.makedirs(variantpath)
os.environ['GOOS'] = opsystem
os.environ['GOARCH'] = variant
exitcode = os.system('go build http-trigger-trigger.go')
if exitcode != 0:
print "Error building {0}. Exitting...".format(variantdir)
sys.exit(1)
shutil.move('http-trigger-trigger', variantpath)
shutil.copy('README.rst', variantpath)
shutil.copy('setup.ini.example', variantpath)
#os.system('tar czf {0}.tar.gz {1}'.format(variantdir, variantpath))
tarfile = os.path.join(releasepath,
variantdir + "-" + version + '.tar.gz')
os.system('tar -C {0} -czf {1} {2}'.format(releasepath,
tarfile,
variantdir))
shutil.rmtree(variantpath)
|
mit
| -53,057,553,182,827,090 | 32.8 | 77 | 0.5595 | false | 3.682809 | false | false | false |
NeilBryant/check_mk
|
web/htdocs/metrics.py
|
1
|
47008
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# Frequently used variable names:
# perf_data_string: Raw performance data as sent by the core, e.g "foor=17M;1;2;4;5"
# perf_data: Split performance data, e.g. [("foo", "17", "M", "1", "2", "4", "5")]
# translated_metrics: Completely parsed and translated into metrics, e.g. { "foo" : { "value" : 17.0, "unit" : { "render" : ... }, ... } }
# color: RGB color representation ala HTML, e.g. "#ffbbc3" or "#FFBBC3", len() is always 7!
# color_rgb: RGB color split into triple (r, g, b), where r,b,g in (0.0 .. 1.0)
# unit_name: The ID of a unit, e.g. "%"
# unit: The definition-dict of a unit like in unit_info
# graph_template: Template for a graph. Essentially a dict with the key "metrics"
import math, time, colorsys
import config, defaults, pagetypes, table
from lib import *
from valuespec import *
import livestatus
# .--Plugins-------------------------------------------------------------.
# | ____ _ _ |
# | | _ \| |_ _ __ _(_)_ __ ___ |
# | | |_) | | | | |/ _` | | '_ \/ __| |
# | | __/| | |_| | (_| | | | | \__ \ |
# | |_| |_|\__,_|\__, |_|_| |_|___/ |
# | |___/ |
# +----------------------------------------------------------------------+
# | Typical code for loading Multisite plugins of this module |
# '----------------------------------------------------------------------'
# Datastructures and functions needed before plugins can be loaded
loaded_with_language = False
def load_plugins():
global loaded_with_language
if loaded_with_language == current_language:
return
global unit_info ; unit_info = {}
global metric_info ; metric_info = {}
global check_metrics ; check_metrics = {}
global perfometer_info ; perfometer_info = []
global graph_info ; graph_info = []
load_web_plugins("metrics", globals())
loaded_with_language = current_language
#.
# .--Constants-----------------------------------------------------------.
# | ____ _ _ |
# | / ___|___ _ __ ___| |_ __ _ _ __ | |_ ___ |
# | | | / _ \| '_ \/ __| __/ _` | '_ \| __/ __| |
# | | |__| (_) | | | \__ \ || (_| | | | | |_\__ \ |
# | \____\___/|_| |_|___/\__\__,_|_| |_|\__|___/ |
# | |
# +----------------------------------------------------------------------+
# | Various constants to be used by the declarations of the plugins. |
# '----------------------------------------------------------------------'
KB = 1024
MB = KB * 1024
GB = MB * 1024
TB = GB * 1024
PB = TB * 1024
m = 0.001
K = 1000
M = K * 1000
G = M * 1000
T = G * 1000
P = T * 1000
scale_symbols = {
m : "m",
1 : "",
KB : "k",
MB : "M",
GB : "G",
TB : "T",
PB : "P",
K : "k",
M : "M",
G : "G",
T : "T",
P : "P",
}
scalar_colors = {
"warn" : "#ffff00",
"crit" : "#ff0000",
}
#.
# .--Helpers-------------------------------------------------------------.
# | _ _ _ |
# | | | | | ___| |_ __ ___ _ __ ___ |
# | | |_| |/ _ \ | '_ \ / _ \ '__/ __| |
# | | _ | __/ | |_) | __/ | \__ \ |
# | |_| |_|\___|_| .__/ \___|_| |___/ |
# | |_| |
# +----------------------------------------------------------------------+
# | Various helper functions |
# '----------------------------------------------------------------------'
# "45.0" -> 45.0, "45" -> 45
def float_or_int(v):
try:
return int(v)
except:
return float(v)
def metric_to_text(metric, value=None):
if value == None:
value = metric["value"]
return metric["unit"]["render"](value)
# A few helper function to be used by the definitions
#.
# .--Colors--------------------------------------------------------------.
# | ____ _ |
# | / ___|___ | | ___ _ __ ___ |
# | | | / _ \| |/ _ \| '__/ __| |
# | | |__| (_) | | (_) | | \__ \ |
# | \____\___/|_|\___/|_| |___/ |
# | |
# +----------------------------------------------------------------------+
# | Functions and constants dealing with colors |
# '----------------------------------------------------------------------'
cmk_color_palette = {
# do not use:
# "0" : (0.33, 1, 1), # green
# "1" : (0.167, 1, 1), # yellow
# "2" : (0, 1, 1), # red
# red area
"11" : (0.775, 1, 1),
"12" : (0.8, 1, 1),
"13" : (0.83, 1, 1),
"14" : (0.05, 1, 1),
"15" : (0.08, 1, 1),
"16" : (0.105, 1, 1),
# yellow area
"21" : (0.13, 1, 1),
"22" : (0.14, 1, 1),
"23" : (0.155, 1, 1),
"24" : (0.185, 1, 1),
"25" : (0.21, 1, 1),
"26" : (0.25, 1, 1),
# green area
"31" : (0.45, 1, 1),
"32" : (0.5, 1, 1),
"33" : (0.515, 1, 1),
"34" : (0.53, 1, 1),
"35" : (0.55, 1, 1),
"36" : (0.57, 1, 1),
# blue area
"41" : (0.59, 1, 1),
"42" : (0.62, 1, 1),
"43" : (0.66, 1, 1),
"44" : (0.71, 1, 1),
"45" : (0.73, 1, 1),
"46" : (0.75, 1, 1),
# special colors
"51" : (0, 0, 0.5), # grey_50
"52" : (0.067, 0.7, 0.5), # brown 1
"53" : (0.083, 0.8, 0.55), # brown 2
}
def get_palette_color_by_index(i, shading='a'):
color_key = sorted(cmk_color_palette.keys())[i % len(cmk_color_palette)]
return "%s/%s" % (color_key, shading)
# 23/c -> #ff8040
# #ff8040 -> #ff8040
def parse_color_into_hexrgb(color_string):
if color_string[0] == "#":
return color_string
elif "/" in color_string:
cmk_color_index, color_shading = color_string.split("/")
hsv = list(cmk_color_palette[cmk_color_index])
# Colors of the yellow ("2") and green ("3") area need to be darkened (in third place of the hsv tuple),
# colors of the red and blue area need to be brightened (in second place of the hsv tuple).
# For both shadings we need different factors.
cmk_color_nuance_index = 1
cmk_color_nuance_factor = 0.6
if cmk_color_index[0] in ["2", "3"]:
cmk_color_nuance_index = 2
cmk_color_nuance_factor = 0.8
if color_shading == 'b':
hsv[cmk_color_nuance_index] *= cmk_color_nuance_factor
color_hexrgb = hsv_to_hexrgb(hsv)
return color_hexrgb
else:
return "#808080"
def hsv_to_hexrgb(hsv):
return render_color(colorsys.hsv_to_rgb(*hsv))
# "#ff0080" -> (1.0, 0.0, 0.5)
def parse_color(color):
try:
return tuple([ int(color[a:a+2], 16) / 255.0 for a in (1,3,5) ])
except Exception, e:
raise MKGeneralException(_("Invalid color specification '%s'") % color)
def render_color(color_rgb):
return "#%02x%02x%02x" % (
int(color_rgb[0] * 255),
int(color_rgb[1] * 255),
int(color_rgb[2] * 255),)
# Make a color darker. v ranges from 0 (not darker) to 1 (black)
def darken_color(rgb, v):
def darken(x, v):
return x * (1.0 - v)
return tuple([ darken(x, v) for x in rgb ])
# Make a color lighter. v ranges from 0 (not lighter) to 1 (white)
def lighten_color(rgb, v):
def lighten(x, v):
return x + ((1.0 - x) * v)
return tuple([ lighten(x, v) for x in rgb ])
def mix_colors(a, b):
return tuple([
(ca + cb) / 2.0
for (ca, cb)
in zip(a, b)
])
#.
# .--Evaluation----------------------------------------------------------.
# | _____ _ _ _ |
# | | ____|_ ____ _| |_ _ __ _| |_(_) ___ _ __ |
# | | _| \ \ / / _` | | | | |/ _` | __| |/ _ \| '_ \ |
# | | |___ \ V / (_| | | |_| | (_| | |_| | (_) | | | | |
# | |_____| \_/ \__,_|_|\__,_|\__,_|\__|_|\___/|_| |_| |
# | |
# +----------------------------------------------------------------------+
# | Parsing of performance data into metrics, evaluation of expressions |
# '----------------------------------------------------------------------'
# Convert perf_data_string into perf_data, extract check_command
def parse_perf_data(perf_data_string, check_command=None):
# Strip away arguments like in "check_http!-H mathias-kettner.de"
check_command = check_command.split("!")[0]
if not perf_data_string:
return None, check_command
parts = perf_data_string.split()
# Try if check command is appended to performance data
# in a PNP like style
if parts[-1].startswith("[") and parts[-1].endswith("]"):
check_command = parts[-1][1:-1]
del parts[-1]
# Python's isdigit() works only on str. We deal with unicode since
# we deal with data coming from Livestatus
def isdigit(x):
return x in [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' ]
# Parse performance data, at least try
try:
perf_data = []
for part in parts:
varname, values = part.split("=")
value_parts = values.split(";")
while len(value_parts) < 5:
value_parts.append(None)
value_text, warn, crit, min, max = value_parts[0:5]
if value_text == "":
continue # ignore useless empty variable
# separate value from unit
i = 0
while i < len(value_text) and (isdigit(value_text[i]) or value_text[i] in ['.', ',', '-']):
i += 1
unit_name = value_text[i:]
value = value_text[:i]
perf_data.append((varname, value, unit_name, warn, crit, min, max))
except:
if config.debug:
raise
perf_data = None
return perf_data, check_command
# Convert Ascii-based performance data as output from a check plugin
# into floating point numbers, do scaling if neccessary.
# Simple example for perf_data: [(u'temp', u'48.1', u'', u'70', u'80', u'', u'')]
# Result for this example:
# { "temp" : "value" : 48.1, "warn" : 70, "crit" : 80, "unit" : { ... } }
def translate_metrics(perf_data, check_command):
cm = check_metrics.get(check_command, {})
translated_metrics = {}
color_index = 0
for nr, entry in enumerate(perf_data):
varname = entry[0]
value_text = entry[1]
translation_entry = {} # Default: no translation neccessary
if varname in cm:
translation_entry = cm[varname]
else:
for orig_varname, te in cm.items():
if orig_varname[0] == "~" and regex(orig_varname[1:]).match(varname): # Regex entry
translation_entry = te
break
# Translate name
metric_name = translation_entry.get("name", varname)
if metric_name in translated_metrics:
continue # ignore duplicate value
if metric_name not in metric_info:
color_index += 1
palette_color = get_palette_color_by_index(color_index)
mi = {
"title" : metric_name.title(),
"unit" : "",
"color" : parse_color_into_hexrgb(palette_color),
}
else:
mi = metric_info[metric_name].copy()
mi["color"] = parse_color_into_hexrgb(mi["color"])
# Optional scaling
scale = translation_entry.get("scale", 1.0)
new_entry = {
"value" : float_or_int(value_text) * scale,
"orig_name" : varname,
"scale" : scale, # needed for graph definitions
"scalar" : {},
}
# Do not create graphs for ungraphed metrics if listed here
new_entry["auto_graph"] = translation_entry.get("auto_graph", True)
# Add warn, crit, min, max
for index, key in [ (3, "warn"), (4, "crit"), (5, "min"), (6, "max") ]:
if len(entry) < index + 1:
break
elif entry[index]:
try:
value = float_or_int(entry[index])
new_entry["scalar"][key] = value * scale
except:
if config.debug:
raise
pass # empty of invalid number
new_entry.update(mi)
new_entry["unit"] = unit_info[new_entry["unit"]]
translated_metrics[metric_name] = new_entry
# TODO: warn, crit, min, max
# if entry[2]:
# # TODO: lower and upper levels
# translated_metrics[metric_name]["warn"] = float(entry[2])
return translated_metrics
# Evaluates an expression, returns a triple of value, unit and color.
# e.g. "fs_used:max" -> 12.455, "b", "#00ffc6",
# e.g. "fs_used(%)" -> 17.5, "%", "#00ffc6",
# e.g. "fs_used:max(%)" -> 100.0, "%", "#00ffc6",
# e.g. 123.4 -> 123.4, "", None
# e.g. "123.4#ff0000" -> 123.4, "", "#ff0000",
# Note:
# "fs_growth.max" is the same as fs_growth. The .max is just
# relevant when fetching RRD data and is used for selecting
# the consolidation function MAX.
def evaluate(expression, translated_metrics):
if type(expression) in (float, int):
return evaluate_literal(expression, translated_metrics)
else:
if "#" in expression:
expression, explicit_color = expression.rsplit("#", 1) # drop appended color information
else:
explicit_color = None
if "@" in expression:
expression, explicit_unit_name = expression.rsplit("@", 1) # appended unit name
else:
explicit_unit_name = None
value, unit, color = evaluate_rpn(expression, translated_metrics)
if explicit_color:
color = "#" + explicit_color
if explicit_unit_name:
unit = unit_info[explicit_unit_name]
return value, unit, color
# TODO: real unit computation!
def unit_mult(u1, u2):
if u1 == unit_info[""] or u1 == unit_info["count"]:
return u2
else:
return u1
unit_div = unit_mult
unit_add = unit_mult
unit_sub = unit_mult
def operator_minmax(a, b, func):
v = func(a[0], b[0])
# Use unit and color of the winner. If the winner
# has none (e.g. it is a scalar like 0), then take
# unit and color of the loser.
if v == a[0]:
winner = a
loser = b
else:
winner = b
loser = a
if winner[1] != unit_info[""]:
unit = winner[1]
else:
unit = loser[1]
return v, unit, winner[2] or loser[2]
# TODO: Do real unit computation, detect non-matching units
rpn_operators = {
"+" : lambda a, b: ((a[0] + b[0]), unit_mult(a[1], b[1]), choose_operator_color(a[2], b[2])),
"-" : lambda a, b: ((a[0] - b[0]), unit_sub(a[1], b[1]), choose_operator_color(a[2], b[2])),
"*" : lambda a, b: ((a[0] * b[0]), unit_add(a[1], b[1]), choose_operator_color(a[2], b[2])),
"/" : lambda a, b: ((a[0] / b[0]), unit_div(a[1], b[1]), choose_operator_color(a[2], b[2])),
">" : lambda a, b: ((a[0] > b[0] and 1.0 or 0.0), unit_info[""], "#000000"),
"<" : lambda a, b: ((a[0] < b[0] and 1.0 or 0.0), unit_info[""], "#000000"),
">=" : lambda a, b: ((a[0] >= b[0] and 1.0 or 0.0), unit_info[""], "#000000"),
"<=" : lambda a, b: ((a[0] <= b[0] and 1.0 or 0.0), unit_info[""], "#000000"),
"MIN" : lambda a, b: operator_minmax(a, b, min),
"MAX" : lambda a, b: operator_minmax(a, b, max),
}
def choose_operator_color(a, b):
if a == None:
return b
elif b == None:
return a
else:
return render_color(mix_colors(parse_color(a), parse_color(b)))
def evaluate_rpn(expression, translated_metrics):
parts = expression.split(",")
stack = [] # stack tuples of (value, unit, color)
while parts:
operator_name = parts[0]
parts = parts[1:]
if operator_name in rpn_operators:
if len(stack) < 2:
raise MKGeneralException("Syntax error in expression '%s': too few operands" % expression)
op1 = stack[-2]
op2 = stack[-1]
result = rpn_operators[operator_name](op1, op2)
stack = stack[:-2] + [ result ]
else:
stack.append(evaluate_literal(operator_name, translated_metrics))
if len(stack) != 1:
raise MKGeneralException("Syntax error in expression '%s': too many operands left" % expression)
return stack[0]
def evaluate_literal(expression, translated_metrics):
if type(expression) == int:
return float(expression), unit_info["count"], None
elif type(expression) == float:
return expression, unit_info[""], None
elif expression[0].isdigit() or expression[0] == '-':
return float(expression), unit_info[""], None
if expression.endswith(".max") or expression.endswith(".min") or expression.endswith(".average"):
expression = expression.rsplit(".", 1)[0]
color = None
# TODO: Error handling with useful exceptions
if expression.endswith("(%)"):
percent = True
expression = expression[:-3]
else:
percent = False
if ":" in expression:
varname, scalarname = expression.split(":")
value = translated_metrics[varname]["scalar"].get(scalarname)
color = scalar_colors.get(scalarname)
else:
varname = expression
value = translated_metrics[varname]["value"]
if percent:
maxvalue = translated_metrics[varname]["scalar"]["max"]
if maxvalue != 0:
value = 100.0 * float(value) / maxvalue
else:
value = 0.0
unit = unit_info["%"]
else:
unit = translated_metrics[varname]["unit"]
if color == None:
color = parse_color_into_hexrgb(metric_info[varname]["color"])
return value, unit, color
# Replace expressions in strings like CPU Load - %(load1:max@count) CPU Cores"
def replace_expressions(text, translated_metrics):
def eval_to_string(match):
expression = match.group()[2:-1]
unit_name = None
if "@" in expression:
expression, unit_name = expression.split("@")
value, unit, color = evaluate(expression, translated_metrics)
if unit_name:
unit = unit_info[unit_name]
if value != None:
return unit["render"](value)
else:
return _("n/a")
r = regex(r"%\([^)]*\)")
return r.sub(eval_to_string, text)
#.
# .--Perf-O-Meters-------------------------------------------------------.
# | ____ __ ___ __ __ _ |
# | | _ \ ___ _ __ / _| / _ \ | \/ | ___| |_ ___ _ __ ___ |
# | | |_) / _ \ '__| |_ _____| | | |_____| |\/| |/ _ \ __/ _ \ '__/ __| |
# | | __/ __/ | | _|_____| |_| |_____| | | | __/ || __/ | \__ \ |
# | |_| \___|_| |_| \___/ |_| |_|\___|\__\___|_| |___/ |
# | |
# +----------------------------------------------------------------------+
# | Implementation of Perf-O-Meters |
# '----------------------------------------------------------------------'
def get_perfometers(translated_metrics):
for perfometer in perfometer_info:
if perfometer_possible(perfometer, translated_metrics):
yield perfometer
# TODO: We will run into a performance problem here when we
# have more and more Perf-O-Meter definitions.
# TODO: remove all tuple-perfometers and use dicts
def perfometer_possible(perfometer, translated_metrics):
if type(perfometer) == dict:
if perfometer["type"] == "linear":
required = perfometer["segments"][:]
elif perfometer["type"] == "logarithmic":
required = [ perfometer["metric"] ]
else:
pass # TODO: dual, stacked?
if "label" in perfometer and perfometer["label"] != None:
required.append(perfometer["label"][0])
if "total" in perfometer:
required.append(perfometer["total"])
for req in required:
try:
evaluate(req, translated_metrics)
except:
return False
if "condition" in perfometer:
try:
value, color, unit = evaluate(perfometer["condition"], translated_metrics)
if value == 0.0:
return False
except:
return False
return True
perf_type, perf_args = perfometer
if perf_type == "logarithmic":
required = [ perf_args[0] ]
elif perf_type == "linear":
required = perf_args[0]
if perf_args[1]:
required = required + [perf_args[1]] # Reference value for 100%
if perf_args[2]:
required = required + [perf_args[2]] # Labelling value
elif perf_type in ("stacked", "dual"):
for sub_perf in perf_args:
if not perfometer_possible(sub_perf, translated_metrics):
return False
return True
else:
raise MKInternalError(_("Undefined Perf-O-Meter type '%s'") % perf_type)
for req in required:
try:
evaluate(req, translated_metrics)
except:
return False
return True
def metricometer_logarithmic(value, half_value, base, color):
# Negative values are printed like positive ones (e.g. time offset)
value = abs(float(value))
if value == 0.0:
pos = 0
else:
half_value = float(half_value)
h = math.log(half_value, base) # value to be displayed at 50%
pos = 50 + 10.0 * (math.log(value, base) - h)
if pos < 2:
pos = 2
if pos > 98:
pos = 98
return [ (pos, color), (100 - pos, "#ffffff") ]
def build_perfometer(perfometer, translated_metrics):
# TODO: alle nicht-dict Perfometer umstellen
if type(perfometer) == dict:
if perfometer["type"] == "logarithmic":
value, unit, color = evaluate(perfometer["metric"], translated_metrics)
label = unit["render"](value)
stack = [ metricometer_logarithmic(value, perfometer["half_value"], perfometer["exponent"], color) ]
elif perfometer["type"] == "linear":
entry = []
stack = [entry]
summed = 0.0
for ex in perfometer["segments"]:
value, unit, color = evaluate(ex, translated_metrics)
summed += value
if "total" in perfometer:
total, unit, color = evaluate(perfometer["total"], translated_metrics)
else:
total = summed
if total == 0:
entry.append((100.0, "#ffffff"))
else:
for ex in perfometer["segments"]:
value, unit, color = evaluate(ex, translated_metrics)
entry.append((100.0 * value / total, color))
# Paint rest only, if it is positive and larger than one promille
if total - summed > 0.001:
entry.append((100.0 * (total - summed) / total, "#ffffff"))
# Use unit of first metrics for output of sum. We assume that all
# stackes metrics have the same unit anyway
value, unit, color = evaluate(perfometer["segments"][0], translated_metrics)
label = unit["render"](summed)
# "label" option in all Perf-O-Meters overrides automatic label
if "label" in perfometer:
if perfometer["label"] == None:
label = ""
else:
expr, unit_name = perfometer["label"]
value, unit, color = evaluate(expr, translated_metrics)
if unit_name:
unit = unit_info[unit_name]
label = unit["render"](value)
return label, stack
# This stuf is deprecated and will be removed soon. Watch out!
perfometer_type, definition = perfometer
if perfometer_type == "logarithmic":
expression, median, exponent = definition
value, unit, color = evaluate(expression, translated_metrics)
label = unit["render"](value)
stack = [ metricometer_logarithmic(value, median, exponent, color) ]
# TODO: das hier fliegt raus
elif perfometer_type == "linear":
entry = []
stack = [entry]
# NOTE: This might be converted to a dict later.
metrics_expressions, total_spec, label_expression = definition
summed = 0.0
for ex in metrics_expressions:
value, unit_name, color = evaluate(ex, translated_metrics)
summed += value
if total_spec == None:
total = summed
else:
total, unit_name, color = evaluate(total_spec, translated_metrics)
if total == 0:
entry.append((100.0, "#ffffff"))
else:
for ex in metrics_expressions:
value, unit_name, color = evaluate(ex, translated_metrics)
entry.append((100.0 * value / total, color))
# Paint rest only, if it is positive and larger than one promille
if total - summed > 0.001:
entry.append((100.0 * (total - summed) / total, "#ffffff"))
# Use unit of first metrics for output of sum. We assume that all
# stackes metrics have the same unit anyway
if label_expression:
expr, unit_name = label_expression
value, unit, color = evaluate(expr, translated_metrics)
if unit_name:
unit = unit_info[unit_name]
label = unit["render"](summed)
else: # absolute
value, unit, color = evaluate(metrics_expressions[0], translated_metrics)
label = unit["render"](summed)
elif perfometer_type == "stacked":
stack = []
labels = []
for sub_perf in definition:
sub_label, sub_stack = build_perfometer(sub_perf, translated_metrics)
stack.append(sub_stack[0])
if sub_label:
labels.append(sub_label)
if labels:
label = " / ".join(labels)
else:
label = ""
return label, stack
elif perfometer_type == "dual":
labels = []
if len(definition) != 2:
raise MKInternalError(_("Perf-O-Meter of type 'dual' must contain exactly two definitions, not %d") % len(definition))
content = []
for nr, sub_perf in enumerate(definition):
sub_label, sub_stack = build_perfometer(sub_perf, translated_metrics)
if len(sub_stack) != 1:
raise MKInternalError(_("Perf-O-Meter of type 'dual' must only contain plain Perf-O-Meters"))
half_stack = [ (value/2, color) for (value, color) in sub_stack[0] ]
if nr == 0:
half_stack.reverse()
content += half_stack
if sub_label:
labels.append(sub_label)
if labels:
label = " / ".join(labels)
else:
label = ""
return label, [ content ]
else:
raise MKInternalError(_("Unsupported Perf-O-Meter type '%s'") % perfometer_type)
return label, stack
#.
# .--Graphs--------------------------------------------------------------.
# | ____ _ |
# | / ___|_ __ __ _ _ __ | |__ ___ |
# | | | _| '__/ _` | '_ \| '_ \/ __| |
# | | |_| | | | (_| | |_) | | | \__ \ |
# | \____|_| \__,_| .__/|_| |_|___/ |
# | |_| |
# +----------------------------------------------------------------------+
# | Implementation of time graphs - basic code, not the rendering |
# | Rendering of the graphs is done by PNP4Nagios, we just create PHP |
# | templates for PNP here.
# '----------------------------------------------------------------------'
def get_graph_templates(translated_metrics):
if not translated_metrics:
return []
explicit_templates = get_explicit_graph_templates(translated_metrics)
already_graphed_metrics = get_graphed_metrics(explicit_templates)
implicit_templates = get_implicit_graph_templates(translated_metrics, already_graphed_metrics)
return explicit_templates + implicit_templates
def get_explicit_graph_templates(translated_metrics):
templates = []
for graph_template in graph_info:
if graph_possible(graph_template, translated_metrics):
templates.append(graph_template)
elif graph_possible_without_optional_metrics(graph_template, translated_metrics):
templates.append(graph_without_missing_optional_metrics(graph_template, translated_metrics))
return templates
def get_implicit_graph_templates(translated_metrics, already_graphed_metrics):
templates = []
for metric_name, metric_entry in sorted(translated_metrics.items()):
if metric_entry["auto_graph"] and metric_name not in already_graphed_metrics:
templates.append(generic_graph_template(metric_name))
return templates
def get_graphed_metrics(graph_templates):
graphed_metrics = set([])
for graph_template in graph_templates:
graphed_metrics.update(metrics_used_by_graph(graph_template))
return graphed_metrics
def metrics_used_by_graph(graph_template):
used_metrics = []
for metric_definition in graph_template["metrics"]:
used_metrics += list(metrics_used_in_definition(metric_definition[0]))
return used_metrics
def metrics_used_in_definition(metric_definition):
without_unit = metric_definition.split("@")[0]
without_color = metric_definition.split("#")[0]
parts = without_color.split(",")
for part in parts:
metric_name = part.split(".")[0] # drop .min, .max, .average
if metric_name in metric_info:
yield metric_name
def graph_possible(graph_template, translated_metrics):
for metric_definition in graph_template["metrics"]:
try:
evaluate(metric_definition[0], translated_metrics)
except Exception, e:
return False
# Allow graphs to be disabled if certain (better) metrics
# are available
if "conflicting_metrics" in graph_template:
for var in graph_template["conflicting_metrics"]:
if var in translated_metrics:
return False
return True
def graph_possible_without_optional_metrics(graph_template, translated_metrics):
if "optional_metrics" in graph_template:
return graph_possible(graph_template,
add_fake_metrics(translated_metrics, graph_template["optional_metrics"]))
def graph_without_missing_optional_metrics(graph_template, translated_metrics):
working_metrics = []
for metric_definition in graph_template["metrics"]:
try:
evaluate(metric_definition[0], translated_metrics)
working_metrics.append(metric_definition)
except:
pass
reduced_graph_template = graph_template.copy()
reduced_graph_template["metrics"] = working_metrics
return reduced_graph_template
def add_fake_metrics(translated_metrics, metric_names):
with_fake = translated_metrics.copy()
for metric_name in metric_names:
with_fake[metric_name] = {
"value" : 1.0,
"scale" : 1.0,
"unit" : unit_info[""],
"color" : "#888888",
}
return with_fake
def generic_graph_template(metric_name):
return {
"metrics" : [
( metric_name, "area" ),
],
"scalars" : [
metric_name + ":warn",
metric_name + ":crit",
]
}
def get_graph_range(graph_template, translated_metrics):
if "range" in graph_template:
min_value, max_value = [
evaluate(r, translated_metrics)[0]
for r in graph_template["range"]
]
else:
# Compute range of displayed data points
max_value = None
min_value = None
return min_value, max_value
# Called with exactly one variable: the template ID. Example:
# "check_mk-kernel.util:guest,steal,system,user,wait".
def page_pnp_template():
template_id = html.var("id")
check_command, perf_var_string = template_id.split(":", 1)
perf_var_names = perf_var_string.split(",")
# Fake performance values in order to be able to find possible graphs
perf_data = [ ( varname, 1, "", 1, 1, 1, 1 ) for varname in perf_var_names ]
translated_metrics = translate_metrics(perf_data, check_command)
if not translated_metrics:
return # check not supported
# Collect output in string. In case of an exception to not output
# any definitions
output = ""
for graph_template in get_graph_templates(translated_metrics):
graph_code = render_graph_pnp(graph_template, translated_metrics)
output += graph_code
html.write(output)
# TODO: some_value.max not yet working
def render_graph_pnp(graph_template, translated_metrics):
graph_title = None
vertical_label = None
rrdgraph_commands = ""
legend_precision = graph_template.get("legend_precision", 2)
legend_scale = graph_template.get("legend_scale", 1)
legend_scale_symbol = scale_symbols[legend_scale]
# Define one RRD variable for each of the available metrics.
# Note: We need to use the original name, not the translated one.
for var_name, metrics in translated_metrics.items():
rrd = "$RRDBASE$_" + pnp_cleanup(metrics["orig_name"]) + ".rrd"
scale = metrics["scale"]
unit = metrics["unit"]
render_scale = unit.get("render_scale", 1)
if scale != 1.0 or render_scale != 1.0:
rrdgraph_commands += "DEF:%s_UNSCALED=%s:1:MAX " % (var_name, rrd)
rrdgraph_commands += "CDEF:%s=%s_UNSCALED,%f,* " % (var_name, var_name, scale * render_scale)
else:
rrdgraph_commands += "DEF:%s=%s:1:MAX " % (var_name, rrd)
# Scaling for legend
rrdgraph_commands += "CDEF:%s_LEGSCALED=%s,%f,/ " % (var_name, var_name, legend_scale)
# Prepare negative variants for upside-down graph
rrdgraph_commands += "CDEF:%s_NEG=%s,-1,* " % (var_name, var_name)
rrdgraph_commands += "CDEF:%s_LEGSCALED_NEG=%s_LEGSCALED,-1,* " % (var_name, var_name)
# Compute width of columns in case of mirrored legend
total_width = 89 # characters
left_width = max([len(_("Average")), len(_("Maximum")), len(_("Last"))]) + 2
column_width = (total_width - left_width) / len(graph_template["metrics"]) - 2
# Now add areas and lines to the graph
graph_metrics = []
# Graph with upside down metrics? (e.g. for Disk IO)
have_upside_down = False
# Compute width of the right column of the legend
max_title_length = 0
for nr, metric_definition in enumerate(graph_template["metrics"]):
if len(metric_definition) >= 3:
title = metric_definition[2]
elif not "," in metric_definition:
metric_name = metric_definition[0].split("#")[0]
mi = translated_metrics[metric_name]
title = mi["title"]
else:
title = ""
max_title_length = max(max_title_length, len(title))
for nr, metric_definition in enumerate(graph_template["metrics"]):
metric_name = metric_definition[0]
line_type = metric_definition[1] # "line", "area", "stack"
# Optional title, especially for derived values
if len(metric_definition) >= 3:
title = metric_definition[2]
else:
title = ""
# Prefixed minus renders the metrics in negative direction
if line_type[0] == '-':
have_upside_down = True
upside_down = True
upside_down_factor = -1
line_type = line_type[1:]
upside_down_suffix = "_NEG"
else:
upside_down = False
upside_down_factor = 1
upside_down_suffix = ""
if line_type == "line":
draw_type = "LINE"
draw_stack = ""
elif line_type == "area":
draw_type = "AREA"
draw_stack = ""
elif line_type == "stack":
draw_type = "AREA"
draw_stack = ":STACK"
# User can specify alternative color using a suffixed #aabbcc
if '#' in metric_name:
metric_name, custom_color = metric_name.split("#", 1)
else:
custom_color = None
commands = ""
# Derived value with RBN syntax (evaluated by RRDTool!).
if "," in metric_name:
# We evaluate just in order to get color and unit.
# TODO: beware of division by zero. All metrics are set to 1 here.
value, unit, color = evaluate(metric_name, translated_metrics)
# Choose a unique name for the derived variable and compute it
commands += "CDEF:DERIVED%d=%s " % (nr , metric_name)
if upside_down:
commands += "CDEF:DERIVED%d_NEG=DERIVED%d,-1,* " % (nr, nr)
metric_name = "DERIVED%d" % nr
# Scaling and upsidedown handling for legend
commands += "CDEF:%s_LEGSCALED%s=%s,%f,/ " % (metric_name, upside_down_suffix, metric_name, legend_scale * upside_down_factor)
else:
mi = translated_metrics[metric_name]
if not title:
title = mi["title"]
color = parse_color_into_hexrgb(mi["color"])
unit = mi["unit"]
if custom_color:
color = "#" + custom_color
# Paint the graph itself
# TODO: Die Breite des Titels intelligent berechnen. Bei legend = "mirrored" muss man die
# Vefügbare Breite ermitteln und aufteilen auf alle Titel
right_pad = " " * (max_title_length - len(title))
commands += "%s:%s%s%s:\"%s%s\"%s " % (draw_type, metric_name, upside_down_suffix, color, title.replace(":", "\\:"), right_pad, draw_stack)
if line_type == "area":
commands += "LINE:%s%s%s " % (metric_name, upside_down_suffix, render_color(darken_color(parse_color(color), 0.2)))
unit_symbol = unit["symbol"]
if unit_symbol == "%":
unit_symbol = "%%"
else:
unit_symbol = " " + unit_symbol
graph_metrics.append((metric_name, unit_symbol, commands))
# Use title and label of this metrics as default for the graph
if title and not graph_title:
graph_title = title
if not vertical_label:
vertical_label = unit["title"]
# Now create the rrdgraph commands for all metrics - according to the choosen layout
for metric_name, unit_symbol, commands in graph_metrics:
rrdgraph_commands += commands
legend_symbol = unit_symbol
if unit_symbol and unit_symbol[0] == " ":
legend_symbol = " %s%s" % (legend_scale_symbol, unit_symbol[1:])
for what, what_title in [ ("AVERAGE", _("average")), ("MAX", _("max")), ("LAST", _("last")) ]:
rrdgraph_commands += "GPRINT:%%s_LEGSCALED:%%s:\"%%%%8.%dlf%%s %%s\" " % legend_precision % \
(metric_name, what, legend_symbol, what_title)
rrdgraph_commands += "COMMENT:\"\\n\" "
# For graphs with both up and down, paint a gray rule at 0
if have_upside_down:
rrdgraph_commands += "HRULE:0#c0c0c0 "
# Now compute the arguments for the command line of rrdgraph
rrdgraph_arguments = ""
graph_title = graph_template.get("title", graph_title)
vertical_label = graph_template.get("vertical_label", vertical_label)
rrdgraph_arguments += " --vertical-label %s --title %s " % (
quote_shell_string(vertical_label or " "),
quote_shell_string(graph_title))
min_value, max_value = get_graph_range(graph_template, translated_metrics)
if min_value != None and max_value != None:
rrdgraph_arguments += " -l %f -u %f" % (min_value, max_value)
else:
rrdgraph_arguments += " -l 0"
return graph_title + "\n" + rrdgraph_arguments + "\n" + rrdgraph_commands + "\n"
#.
# .--Hover-Graph---------------------------------------------------------.
# | _ _ ____ _ |
# | | | | | _____ _____ _ __ / ___|_ __ __ _ _ __ | |__ |
# | | |_| |/ _ \ \ / / _ \ '__|____| | _| '__/ _` | '_ \| '_ \ |
# | | _ | (_) \ V / __/ | |_____| |_| | | | (_| | |_) | | | | |
# | |_| |_|\___/ \_/ \___|_| \____|_| \__,_| .__/|_| |_| |
# | |_| |
# '----------------------------------------------------------------------'
def new_style_graphs_possible():
return browser_supports_canvas() and not html.is_mobile()
def browser_supports_canvas():
user_agent = html.get_user_agent()
if 'MSIE' in user_agent:
matches = regex('MSIE ([0-9]{1,}[\.0-9]{0,})').search(user_agent)
return not matches or float(matches.group(1)) >= 9.0
else:
return True
def page_show_graph():
site = html.var('site')
host_name = html.var('host_name')
service = html.var('service')
if new_style_graphs_possible():
# FIXME HACK TODO We don't have the current perfata and check command
# here, but we only need it till metrics.render_svc_time_graph() does
# not need these information anymore.
if service == "_HOST_":
query = "GET hosts\n" \
"Filter: host_name = %s\n" \
"Columns: perf_data metrics check_command\n" % host_name
else:
query = "GET services\n" \
"Filter: host_name = %s\n" \
"Filter: service_description = %s\n" \
"Columns: perf_data metrics check_command\n" % (host_name, service)
html.live.set_only_sites([site])
try:
data = html.live.query_row(query)
except livestatus.MKLivestatusNotFoundError:
html.write('<div class="error">%s</div>' %
_('Failed to fetch data for graph. Maybe the site is not reachable?'))
return
html.live.set_only_sites(None)
if service == "_HOST_":
row = {
'site' : site,
'host_name' : host_name,
'host_perf_data' : data[0],
'host_metrics' : data[1],
'host_check_command' : data[2],
}
else:
row = {
'site' : site,
'host_name' : host_name,
'service_description' : service,
'service_perf_data' : data[0],
'service_metrics' : data[1],
'service_check_command' : data[2],
}
# now try to render the graph with our graphing. If it is not possible,
# add JS code to let browser fetch the PNP graph
try:
# Currently always displaying 24h graph
end_time = time.time()
start_time = end_time - 8 * 3600
htmlcode = render_time_graph(row, start_time, end_time, size=(30, 10), font_size=8, show_legend=False, graph_id_prefix="hover")
if htmlcode:
html.write(htmlcode)
return
except NameError:
if config.debug:
raise
pass
# Fallback to PNP graph rendering
host = pnp_cleanup(host_name)
svc = pnp_cleanup(service)
site = html.site_status[site]["site"]
if html.mobile:
url = site["url_prefix"] + ("pnp4nagios/index.php?kohana_uri=/mobile/popup/%s/%s" % \
(html.urlencode(host), html.urlencode(svc)))
else:
url = site["url_prefix"] + ("pnp4nagios/index.php/popup?host=%s&srv=%s" % \
(html.urlencode(host), html.urlencode(svc)))
html.write(url)
|
gpl-2.0
| 7,823,285,372,340,483,000 | 35.666927 | 147 | 0.495054 | false | 3.662122 | false | false | false |
sostenibilidad-unam/posgrado
|
posgradmin/posgradmin/management/commands/exporta_cursos.py
|
1
|
3459
|
# coding: utf-8
from django.core.management.base import BaseCommand
from posgradmin.models import Curso
from django.template.loader import render_to_string
from os import path
from datetime import datetime
import random
from sh import mkdir
from django.utils.text import slugify
import argparse
class Command(BaseCommand):
help = u'Exporta cursos a formato markdown para la página'
def add_arguments(self, parser):
parser.add_argument('--cursos',
type=argparse.FileType('r'),
help='path a la pagina principal de cursos')
parser.add_argument('--outdir',
required=True,
help='path al directorio donde escribir')
def handle(self, *args, **options):
export(options['cursos'], options['outdir'])
def export(cursos, outdir):
mkdir('-p', outdir)
intersemestral = [True, False]
tipos = [
(u"Cursos obligatorios", 'Obligatoria'),
(u"Cursos obligatorios por campo", 'Obligatorias por campo'),
(u"Cursos optativos", 'Optativa'),
(u"Seminarios de Doctorado", u"Seminario de Doctorado")
]
sedes = [
'en línea',
'CDMX',
'Morelia',
u'León'
]
index = cursos.read()
cursos_path = cursos.name
cursos.close()
cursos_md = ""
for inter in intersemestral:
for tipo in tipos:
for sede in sedes:
cursos = Curso.objects.filter(
status='publicado').filter(
intersemestral=inter).filter(
asignatura__tipo=tipo[1]).filter(
sede=sede).order_by('asignatura__asignatura')
if cursos:
if inter:
cursos_md += "\n\n\n# Cursos Intersemestrales\n\n"
else:
cursos_md += "\n\n\n# Cursos Semestrales\n\n"
cursos_md += u"\n\n## %s %s\n\n" % (tipo[0], sede)
for c in cursos:
curso_slug = slugify(c.asignatura.asignatura
+ '_'
+ c.sede)
cursos_md += " - [%s](/cursos/%s/)\n" % (c.asignatura.asignatura, curso_slug)
index = index.replace("<!-- " + slugify("%s %s" % (tipo[0], sede)) + " -->",
cursos_md)
index = index.replace("<!-- cursos-siges -->", cursos_md)
with open(cursos_path, 'w') as f:
f.write(index)
# crear una página por curso
for c in Curso.objects.filter(status='publicado'):
# mkdir('-p', path.join(outdir, ''))
if c.sede is None:
sede = ""
else:
sede = c.sede
curso_slug = slugify(c.asignatura.asignatura
+ '_'
+ sede)
c_md = path.join(outdir,
'%s.md' % curso_slug)
with open(c_md, 'w') as f:
f.write(render_to_string(
'posgradmin/curso.md',
{'curso': c,
'curso_slug': curso_slug,
'academicos': ["<a href='mailto:%s'>%s</a>" % (p.user.email, p) for p in c.academicos.all()],
'pleca': random.randint(0, 19)
}))
|
gpl-3.0
| 7,672,059,377,457,798,000 | 31.904762 | 110 | 0.4822 | false | 3.679446 | false | false | false |
balanced/balanced-python
|
setup.py
|
1
|
2182
|
"""
Balanced Python client library.
See ``README.md`` for usage advice.
"""
import os
import re
try:
import setuptools
except ImportError:
import distutils.core
setup = distutils.core.setup
else:
setup = setuptools.setup
def _get_version():
path = os.path.join(PATH_TO_FILE, 'balanced', '__init__.py')
version_re = r".*__version__ = '(.*?)'"
fo = open(path)
try:
return re.compile(version_re, re.S).match(fo.read()).group(1)
finally:
fo.close()
def _get_long_description():
path = os.path.join(PATH_TO_FILE, 'README.md')
fo = open(path)
try:
return fo.read()
finally:
fo.close()
def parse_requirements(file_name):
requirements = []
for line in open(file_name, 'r').read().split('\n'):
if re.match(r'(\s*#)|(\s*$)', line):
continue
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', line))
elif re.match(r'\s*-f\s+', line):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(file_name):
dependency_links = []
for line in open(file_name, 'r').read().split('\n'):
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
return dependency_links
PATH_TO_FILE = os.path.dirname(__file__)
VERSION = _get_version()
LONG_DESCRIPTION = _get_long_description()
setup(
name='balanced',
version=VERSION,
url='https://balancedpayments.com/',
license='MIT License',
author='Balanced',
author_email='[email protected]',
description='Payments platform for marketplaces',
long_description=LONG_DESCRIPTION,
packages=['balanced'],
test_suite='nose.collector',
install_requires=parse_requirements('requirements.txt'),
dependency_links=parse_dependency_links('requirements.txt'),
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
mit
| 4,966,423,551,292,434,000 | 24.670588 | 77 | 0.600825 | false | 3.588816 | false | false | false |
zengchunyun/s12
|
day10/homework/twisted_fram/EchoServer.py
|
1
|
2339
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: zengchunyun
"""
from twisted.internet import protocol
from twisted.internet import reactor
class EchoServer(protocol.Protocol): # 创建Protocol的派生类EchoServer
def dataReceived(self, data): # 重写父类dataReceived方法,当有接收到客户端发来数据时,会调用此方法,并将用户数据传入
self.transport.write(bytes(str(data), "utf8")) # 通过父类的transport的write方法,将接收到用户的输入,完全的发送给客户端,
# 由于3.x的需要自己转换发送的数据类型,所以这里使用bytes转换成bytes数据类型
def main(): # 定义程序主函数
factory = protocol.ServerFactory() # 实例化ServerFactory类,ServerFactory继承了factory
factory.protocol = EchoServer # 重写factory类的protocol属性,将EchoServer类的地址赋给protocol
reactor.listenTCP(8000, factory, interface="127.0.0.1")
# print(type(reactor)) # 通过type打印出reactor的父类
# twisted.internet.selectreactor.SelectReactor
# 再进一步分析SelectReactor的父类twisted.internet.posixbase.PosixReactorBase下有一个
# listenTCP方法(port, factory, backlog=50, interface=''),backlog代表最大listen队列为50
# listenTCP下执行twisted.internet.tcp.Port类
# PosixReactorBase又继承了父类twisted.internet.base._SignalReactorMixin,然后执行了该父类的run方法
reactor.run()
# run方法执行该类本身的startRunning方法,startRunning再调用ReactorBase类的startRunning方法
# run方法再执行类本身的mainLoop方法
# mainLoop方法则一直循环执行SelectReactor.doIteration(t)方法,该方法则调用了事件驱动select.select轮询事件
# 当有可读事件时,执行self._doReadOrWrite方法,该方法通过反射器调用twisted.internet.tcp.Connection的doRead方法,通过该方法
# 返回self._dataReceived(data),该方法定义了self.protocol.dataReceived(data),这个self.protocol就是我们
# 这里定义的protocol.ServerFactory().protocol,然后执行dataReceived(data),这个方法已经被我们重写了,也就是我们listenTCP传入的factory
# 执行factory.protocol.dataReceived(data) 等于执行EchoServer().dataReceived(data)方法
if __name__ == "__main__":
main()
|
gpl-2.0
| -6,804,515,873,428,824,000 | 44.394737 | 105 | 0.78087 | false | 2.017544 | false | false | false |
AhmedHani/acmASCIS-ML-Hack-2017
|
Session_1/dev/server/datasets_processing/manager.py
|
1
|
7728
|
___author__ = 'acmASCIS'
'''
by ahani at {9/22/2016}
'''
import random
from dev.server.datasets_generator._sort import Sort
from dev.server.datasets_generator._matmul import Matmul
from dev.server.datasets_processing.validator import Validator
class Manager(object):
def __init__(self):
super(Manager, self).__init__()
@staticmethod
def make_default_sorting_dataset(dataset_size=10, array_length=5):
"""
Make a random generated dataset for checking the sorting algorithm correctness
:param dataset_size: (int) Number of arrays that would be created
:param array_length: (int) The array length
:return: input and output files path the contains the dataset and its empty output file
"""
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/sorting/sort" + str(file_index) + ".in"
output_file_path = "./datasets/sorting/sort" + str(file_index) + ".out"
_sort = Sort(array_length)
with open(input_file_path, "r") as writer:
for i in range(dataset_size):
array_sample = _sort.generate_data()
array_string = ' '.join(array_sample)
writer.write(array_string)
return input_file_path, output_file_path
@staticmethod
def make_custom_sorting_dataset(arrays):
"""
Establish the target dataset from the user.
:param arrays: (array of arrays) each array contains integer elements
:return: input and output files path the contains the dataset and its empty output file
"""
Validator.validate_custom_sorting_dataset(arrays)
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/sorting/sort" + str(file_index) + ".in"
output_file_path = "./datasets/sorting/sort" + str(file_index) + ".out"
with open(input_file_path, "r") as writer:
for i in range(len(arrays)):
_sort = Sort(len(arrays[i]))
_sort.set_data(arrays[i])
array_string = ' '.join(arrays[i])
writer.write(array_string)
return input_file_path, output_file_path
@staticmethod
def make_default_freq_dataset(dataset_size=10, array_length=5):
"""
Make a random generated dataset for checking the frequency calculation algorithm correctness
:param dataset_size: (int) Number of arrays that would be created
:param array_length: (int) The array length
:return: input and output files path the contains the dataset and its empty output file
"""
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/freq/freq" + str(file_index) + ".in"
output_file_path = "./datasets/freq/freq" + str(file_index) + ".out"
_sort = Sort(array_length)
with open(input_file_path, "r") as writer:
for i in range(dataset_size):
array_sample = _sort.generate_data()
array_string = ' '.join(array_sample)
writer.write(array_string)
return input_file_path, output_file_path
@staticmethod
def make_custom_freq_dataset(arrays):
"""
Establish the target dataset from the user.
:param arrays: (array of arrays) each array contains integer elements
:return: input and output files path the contains the dataset and its empty output file
"""
Validator.validate_custom_freq_dataset(arrays)
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/freq/freq" + str(file_index) + ".in"
output_file_path = "./datasets/freq/freq" + str(file_index) + ".out"
with open(input_file_path, "r") as writer:
for i in range(len(arrays)):
_sort = Sort(len(arrays[i]))
_sort.set_data(arrays[i])
array_string = ' '.join(arrays[i])
writer.write(array_string)
return input_file_path, output_file_path
@staticmethod
def make_default_matmul_dataset(dataset_size=10, matrix_a_size=(3, 3), matrix_b_size=(3, 3)):
"""
Make a random generated dataset for checking the matrix multiplication algorithm correctness
:param dataset_size: (int) an integer that specifies the number of test cases
:param matrix_a_size: (tuple) that specifies the first matrix size
:param matrix_b_size: (tuple) that specifies the second matrix size
:return: input and output files path the contains the dataset and its empty output file
"""
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/arrays_multipliction/matmul" + str(file_index) + ".in"
output_file_path = "./datasets/freq/matmul" + str(file_index) + ".out"
with open(input_file_path, "r") as writer:
writer.write(str(dataset_size))
for i in range(dataset_size):
matmul = Matmul(matrix_a_size, matrix_b_size)
matrix_a, matrix_b = matmul.get_matrices()
writer.write(str(matrix_a_size[0]) + " " + str(matrix_a_size[1]) + " " + str(matrix_b_size[0]) + " " + str(matrix_b_size[1]))
for i in range(len(matrix_a)):
for j in range(len(matrix_a[0])):
if j < len(matrix_a[0]) - 1:
writer.write(str(matrix_a[i][j]) + " ")
else:
writer.write(str(matrix_a[i][j]))
writer.write("")
for i in range(len(matrix_b)):
for j in range(len(matrix_b[0])):
if j < len(matrix_b[0]) - 1:
writer.write(str(matrix_b[i][j]) + " ")
else:
writer.write(str(matrix_b[i][j]))
return input_file_path, output_file_path
@staticmethod
def make_custom_matmul_dataset(matrices_list):
"""
Establish the target dataset from the user.
:param matrices_list: (array of tuples) each array contains a tuple that contains key: first matrix value: second matrix (i.e (matrix_a, matrix_b)
:return: input and output files path the contains the dataset and its empty output file
"""
Validator.validate_custom_matmul_dataset(matrices_list)
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/arrays_multipliction/matmul" + str(file_index) + ".in"
output_file_path = "./datasets/freq/matmul" + str(file_index) + ".out"
with open(input_file_path, "r") as writer:
writer.write(str(len(matrices_list)))
for item in matrices_list:
writer.write(str(len(item[0])) + " " + str(len(item[0][0])) + " " + str(len(item[1])) + " " + str(len(item[1][0])))
matrix_a = item[0]
matrix_b = item[1]
for i in range(len(matrix_a)):
for j in range(len(matrix_a[0])):
if j < len(matrix_a[0]) - 1:
writer.write(str(matrix_a[i][j]) + " ")
else:
writer.write(str(matrix_a[i][j]))
writer.write("")
for i in range(len(matrix_b)):
for j in range(len(matrix_b[0])):
if j < len(matrix_b[0]) - 1:
writer.write(str(matrix_b[i][j]) + " ")
else:
writer.write(str(matrix_b[i][j]))
return input_file_path, output_file_path
|
mit
| -4,601,814,935,475,399,700 | 42.421348 | 154 | 0.566123 | false | 3.906977 | false | false | false |
sam-m888/gprime
|
gprime/filters/rules/citation/_matchespagesubstringof.py
|
1
|
1934
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2011 Helge Herz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
# "Sources having a title that contain a substring"
#-------------------------------------------------------------------------
class MatchesPageSubstringOf(Rule):
"""Citation Volume/Page title containing <substring>"""
labels = [ _('Text:')]
name = _('Citations with Volume/Page containing <text>')
description = _("Matches citations whose Volume/Page contains a "
"certain substring")
category = _('General filters')
allow_regex = True
def apply(self, db, object):
""" Apply the filter """
return self.match_substring(0, object.get_page())
|
gpl-2.0
| 2,667,130,032,348,129,000 | 36.921569 | 79 | 0.537229 | false | 5.076115 | false | false | false |
Hitachi-Data-Systems/org-chart-builder
|
pptx/dml/line.py
|
1
|
2249
|
# encoding: utf-8
"""
DrawingML objects related to line formatting
"""
from __future__ import absolute_import, print_function, unicode_literals
from ..enum.dml import MSO_FILL
from .fill import FillFormat
from ..util import Emu, lazyproperty
class LineFormat(object):
"""
Provides access to line properties such as line color, style, and width.
Typically accessed via the ``.line`` property of a shape such as |Shape|
or |Picture|.
"""
def __init__(self, parent):
super(LineFormat, self).__init__()
self._parent = parent
@lazyproperty
def color(self):
"""
The |ColorFormat| instance that provides access to the color settings
for this line. Essentially a shortcut for ``line.fill.fore_color``.
As a side-effect, accessing this property causes the line fill type
to be set to ``MSO_FILL.SOLID``. If this sounds risky for your use
case, use ``line.fill.type`` to non-destructively discover the
existing fill type.
"""
if self.fill.type != MSO_FILL.SOLID:
self.fill.solid()
return self.fill.fore_color
@lazyproperty
def fill(self):
"""
|FillFormat| instance for this line, providing access to fill
properties such as foreground color.
"""
ln = self._get_or_add_ln()
return FillFormat.from_fill_parent(ln)
@property
def width(self):
"""
The width of the line expressed as an integer number of :ref:`English
Metric Units <EMU>`. The returned value is an instance of
|BaseLength|, a value class having properties such as `.inches`,
`.cm`, and `.pt` for converting the value into convenient units.
"""
ln = self._ln
if ln is None:
return Emu(0)
return ln.w
@width.setter
def width(self, emu):
if emu is None:
emu = 0
ln = self._get_or_add_ln()
ln.w = emu
def _get_or_add_ln(self):
"""
Return the ``<a:ln>`` element containing the line format properties
in the XML.
"""
return self._parent.get_or_add_ln()
@property
def _ln(self):
return self._parent.ln
|
apache-2.0
| -2,358,455,342,825,472,500 | 28.592105 | 77 | 0.600711 | false | 3.987589 | false | false | false |
tf198/pycart
|
pycart/git_repo.py
|
1
|
7009
|
from jinja2 import Environment, FileSystemLoader
import os
from datetime import datetime
import git, renderer, utils, settings
import web, logging
import cPickle as pickle
from cache import cache
logger = logging.getLogger(__name__)
def render_template(template_name, **context):
extensions = context.pop('extensions', [])
g = context.pop('globals', {})
jinja_env = Environment(
loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
extensions=extensions,
)
jinja_env.globals.update(g)
# jinja_env.update_template_context(context)
return jinja_env.get_template(template_name).render(context)
ACTION_ICONS = {'add': 'file',
'modify': 'align-left',
'delete': 'trash'}
# explicitly defined repos
repos = settings.REPOS.copy()
# add repo directories
logger.info("Searching for repos")
for d in getattr(settings, "REPO_DIRS", []):
for directory, subdirs, files in os.walk(d):
root, ext = os.path.splitext(directory)
if ext == '.git':
repos[root[len(d) + 1:]] = directory
# remove excluded repos
for x in getattr(settings, "REPO_EXCLUDE", []):
if x in repos:
del(repos[x])
logger.info("{0} repos found".format(len(repos)))
class RepoMixin(object):
template = None
sha_type = None
def GET(self, *args):
self.cache_key = str(':'.join(args))
d = self.get_context(*args)
helpers = {'author_link': utils.author_link,
'author_gravatar': utils.author_gravatar,
'timesince': utils.timesince}
return render_template(self.template, globals=helpers, **d)
def get_repo(self, repo):
try:
repo_path = repos[repo]
except KeyError:
raise web.notfound("No repo named {0}".format(repo))
return git.repo(repo_path)
def get_base_context(self, repo, sha, path):
d = {}
self.repo = self.get_repo(repo)
try:
if sha in self.repo: # explicit sha
d['ref_name'] = sha[:10]
d['ref_link'] = sha
self.sha = self.repo.get_object(sha)
else:
d['ref_name'] = d['ref_link'] = sha
self.sha = git.get_branch(self.repo, sha)
except KeyError:
logger.exception("Failed to find sha: {0}".format(sha))
raise web.notfound('Bad SHA: {0}'.format(sha))
d['repo'] = repo
d['sha'] = self.sha.id
d['branches'] = git.branches(self.repo)
d['tags'] = git.tags(self.repo)
d['sha_type'] = self.sha_type
d['path'] = path.strip('/')
d['breadcrumbs'] = d['path'].split('/') if path else []
return d
class ListView(object):
def GET(self):
return render_template('list.html', repos=repos.keys())
class TreeView(RepoMixin):
template = "tree.html"
sha_type = 'branch'
def get_listing(self, node, path):
listing_key = self.cache_key + ':listing'
if self.cache_key in cache:
if cache[self.cache_key] == self.sha.id:
logger.info("Using cached data for /%s", path)
d = pickle.loads(cache[listing_key])
d['commit'] = self.repo.get_object(d['commit'])
return d
else:
logger.info("Expiring cache for /%s", path)
try:
del(cache[listing_key])
except KeyError:
pass
d = {'data': None,
'filename': None,
'listing': [],
'commit': None}
last_commit = None
for e in node.items():
commit = git.get_commit(self.repo, self.sha, os.path.join(path, e.path))
is_file = e.mode & 0100000
icon = 'file' if is_file else 'folder-open'
mode = utils.filemode(e.mode) if is_file else ""
d['listing'].append((icon,
e.path,
commit.message ,
mode,
datetime.fromtimestamp(commit.commit_time)))
if last_commit is None or commit.commit_time > last_commit.commit_time:
last_commit = commit
if e.path.lower().startswith('readme'):
d['data'] = e.sha
d['filename'] = "{0}/{1}".format(path, e.path)
d['commit'] = last_commit.id
cache[self.cache_key] = self.sha.id
cache[listing_key] = pickle.dumps(d)
d['commit'] = last_commit
return d
def get_context(self, repo, sha, path):
d = self.get_base_context(repo, sha, path)
path = d['path']
try:
node = git.get_by_path(self.repo, self.sha, d['breadcrumbs'])
except IndexError:
d['error'] = "{0} does not exist in this branch".format(path)
return d
if hasattr(node, 'items'): # is directory
d.update(self.get_listing(node, path))
else: # is a file
d['data'] = node.id
d['commit'] = git.get_commit(self.repo, self.sha, path)
d['filename'] = path
if d['data'] is not None:
text, meta = renderer.render_file(d['filename'], self.repo.get_object(d['data']).data)
d['data'] = text
d['language'] = meta.get('language', 'Unknown')
d['inline_style'] = renderer.get_style()
d['cache_trigger'] = d['commit'].id
return d
class CommitView(RepoMixin):
template = "commit.html"
sha_type = 'commit'
def get_context(self, repo, sha):
d = self.get_base_context(repo, sha, "")
try:
commit = self.repo.get_object(sha)
except KeyError:
raise web.notfound("No such commit")
if commit.__class__.__name__ != "Commit":
raise web.notfound("Not a valid commit")
files = []
for change in git.get_changes(self.repo, commit):
if change.type == 'delete':
files.append((ACTION_ICONS.get('delete'), change.old.path, commit.parents[0], 'Deleted'))
else:
diff = git.unified_diff(self.repo, change.old, change.new)
html = renderer.render_diff(diff)
files.append((ACTION_ICONS.get(change.type, 'fire'), change.new.path, commit.id, html))
d['inline_style'] = renderer.get_style()
d['files'] = files
d['branch'] = commit.id
d['commit'] = commit
d['branch_name'] = commit.id[:10]
return d
class HistoryView(RepoMixin):
template = "history.html"
sha_type = 'commit'
def get_context(self, repo, sha, path):
d = self.get_base_context(repo, sha, path)
walker = self.repo.get_walker(include=[self.sha.id], paths=[d['path']])
d['history'] = [ entry.commit for entry in walker ]
return d
|
gpl-3.0
| 3,639,235,360,990,462,000 | 29.081545 | 105 | 0.54002 | false | 3.792749 | false | false | false |
reclaro/castel
|
castel/advcounter.py
|
1
|
5902
|
""" Advanced counter.
This script is used to get some statistics from a text file.
The script parse a file and returns the number of words, line,
the most commom letter and the average number of letters per word.
The script has a mandatory argument which is the file to parse.
It is possible to pass different options to set a different
configuration file, the number of decimal digit returned in the
calculation and the encoding of the file
"""
import argparse
import logging
import sys
from configparser import SafeConfigParser
from configparser import NoOptionError
from configparser import NoSectionError
from stevedore import driver
def get_config_value(config_file, section, key):
"""
Parse a configuration file and return the value associated
to the given key.
args:
config_file: name of the configuration file
secion: name of the section in the configuration file
where the key is defined
key: the name of the key fo lookup in the configuration
file
ret:
the value corresponding to the associated given key
"""
try:
config = SafeConfigParser()
config.read(config_file)
return config.get(section, key)
except NoOptionError:
print("No Option %s in the section %s" % (key, section))
sys.exit(1)
except NoSectionError:
print("No section %s defined " % (section))
sys.exit(1)
def get_driver(config_file):
"""
Load the backend driver according to the value specified in the
configuration file.
args:
config_file: The name of the configuration file
ret:
The class to use as defined in the configuration file
"""
driver_name = get_config_value(config_file, 'default', 'driver')
mgr = driver.DriverManager(namespace='advcounter.plugin',
name=driver_name,
invoke_on_load=True,
)
return mgr.driver
def get_iowrapper(engine_driver, stream_name, encoding):
"""
Call the open method of the configured engine driver to
open the input file specifying the encoding type of the file
args:
engine_driver: the class of the configured driver used to
perform the statistics
stream_name: the name of the file to open
encoding: the encoding to use for reading the file
ret:
The TextIOWrapper returned by the open file
"""
try:
return engine_driver.open_file(stream_name, encoding=encoding)
except FileNotFoundError:
print("File \'%s\' not found" % stream_name)
sys.exit(1)
def configure_logging(config_file):
"""
Configure the logging details according to the values
defined in the configuration file.
args:
config_file: the name of the configuration file
"""
debug_levels = {'debug': logging.DEBUG,
'error': logging.ERROR,
'critical': logging.CRITICAL,
'fatal': logging.FATAL,
'info': logging.INFO,
'warning': logging.WARNING}
log_file = get_config_value(config_file, 'default', 'log_file')
log_level = get_config_value(config_file, 'default', 'log_level')
logging.basicConfig(filename=log_file, level=debug_levels[log_level])
def parse_options():
""" This function manage the options passed to the script
The method uses the argparse library to parse the input
options defined for the script
"""
parser = argparse.ArgumentParser()
parser.add_argument("file", help="Name of the file to parse")
parser.add_argument("-d",
"--decimal",
metavar="integer",
default=1,
type=int,
help="Number of decimal digits returned by"
" calculations, default is 1")
parser.add_argument("-c",
"--config",
default="advcounter.conf",
help="Path for the config file, default"
" is advcounter.conf")
parser.add_argument("-e",
"--encoding",
default="utf-8",
help="Encoding of the input file")
return parser.parse_args()
def get_and_print_results(engine_driver, file_obj):
"""Call the engine to get and print the results
This method call the different method exposed by the driver
engine to get back the results.
The results are printed to the standard output
args:
engine_driver: the driver configured to parse the file
file_obj: the TextIoWrapper to pass to the engine methods
"""
print("number of lines",
engine_driver.get_total_lines(file_obj))
file_obj.seek(0)
print("number of words",
engine_driver.get_total_words(file_obj))
file_obj.seek(0)
print("most common letter",
engine_driver.most_common_letter(file_obj))
file_obj.seek(0)
print("average letter per word",
engine_driver.get_avg_letters_per_word(file_obj))
def main():
"""
Main function which parses the options defined and call the
methods to the engine driver configured to get the statistics
results
"""
args = parse_options()
engine_driver = get_driver(args.config)
engine_driver.precision = args.decimal
configure_logging(args.config)
file_obj = get_iowrapper(engine_driver, args.file, args.encoding)
try:
get_and_print_results(engine_driver, file_obj)
except UnicodeDecodeError:
print("File \'%s\' is not in the %s format" %
(args.file, args.encoding))
sys.exit(1)
finally:
file_obj.close()
if __name__ == '__main__':
main()
|
gpl-2.0
| -5,990,204,540,903,453,000 | 32.344633 | 73 | 0.618096 | false | 4.481397 | true | false | false |
NicolasHug/Surprise
|
surprise/prediction_algorithms/random_pred.py
|
1
|
1319
|
""" Algorithm predicting a random rating.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from .algo_base import AlgoBase
class NormalPredictor(AlgoBase):
"""Algorithm predicting a random rating based on the distribution of the
training set, which is assumed to be normal.
The prediction :math:`\hat{r}_{ui}` is generated from a normal distribution
:math:`\mathcal{N}(\hat{\mu}, \hat{\sigma}^2)` where :math:`\hat{\mu}` and
:math:`\hat{\sigma}` are estimated from the training data using Maximum
Likelihood Estimation:
.. math::
\\hat{\mu} &= \\frac{1}{|R_{train}|} \\sum_{r_{ui} \\in R_{train}}
r_{ui}\\\\\\\\\
\\hat{\sigma} &= \\sqrt{\\sum_{r_{ui} \\in R_{train}}
\\frac{(r_{ui} - \\hat{\mu})^2}{|R_{train}|}}
"""
def __init__(self):
AlgoBase.__init__(self)
def fit(self, trainset):
AlgoBase.fit(self, trainset)
num = sum((r - self.trainset.global_mean)**2
for (_, _, r) in self.trainset.all_ratings())
denum = self.trainset.n_ratings
self.sigma = np.sqrt(num / denum)
return self
def estimate(self, *_):
return np.random.normal(self.trainset.global_mean, self.sigma)
|
bsd-3-clause
| 7,401,184,946,207,124,000 | 28.311111 | 79 | 0.582259 | false | 3.443864 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.