repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
edx/edx-oauth2-provider | edx_oauth2_provider/tests/test_credentials.py | 1 | 2833 | """ Test email as username authentication """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import ddt
from django.core.urlresolvers import reverse
from provider.constants import CONFIDENTIAL, PUBLIC
from .base import OAuth2TestCase
from .factories import ClientFactory
USERNAME = 'some_username'
EMAIL = '[email protected]'
PASSWORD = 'some_password'
CLIENT_ID = 'some_client_id'
CLIENT_SECRET = 'some_secret'
# Data to generate login tests. Missing fields default to the values
# in the class variables below. 'success' defaults to False.
# 'client_secret' is not used unless specified.
AUTHENTICATION_TEST_DATA = [
{
'success': True
},
{
'username': EMAIL,
'success': True
},
{
'password': PASSWORD + '_bad',
},
{
'username': USERNAME + '_bad',
},
{
'client_id': CLIENT_ID + '_bad'
},
{
'username': EMAIL,
'password': PASSWORD + '_bad',
},
{
'client_secret': CLIENT_SECRET,
'success': True
},
{
'client_type': CONFIDENTIAL,
'client_secret': CLIENT_SECRET,
'success': True
},
{
'client_secret': CLIENT_SECRET + '_bad',
'success': True # public clients should ignore the client_secret field
},
{
'client_type': CONFIDENTIAL,
'client_secret': CLIENT_SECRET + '_bad',
},
]
@ddt.ddt
class AuthenticationTest(OAuth2TestCase):
"""
Authentication test class.
"""
def setUp(self):
super(AuthenticationTest, self).setUp()
user = self.user_factory.create(
username=USERNAME,
password=PASSWORD,
email=EMAIL
)
self.set_user(user)
self.auth_client = ClientFactory.create(
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET
)
self.url = reverse('oauth2:access_token')
@ddt.data(*AUTHENTICATION_TEST_DATA)
def test_password_grants(self, data):
self.auth_client.client_type = data.get('client_type', PUBLIC)
self.auth_client.save()
values = {
'grant_type': 'password',
'client_id': data.get('client_id', CLIENT_ID),
'username': data.get('username', USERNAME),
'password': data.get('password', PASSWORD),
}
client_secret = data.get('client_secret')
if client_secret:
values.update({'client_secret': client_secret})
response = self.client.post(self.url, values)
if data.get('success', False):
self.assertEqual(200, response.status_code)
self.assertIn('access_token', json.loads(response.content.decode('utf-8')))
else:
self.assertEqual(400, response.status_code)
| agpl-3.0 | -2,953,483,770,073,217,000 | 25.476636 | 87 | 0.591246 | false |
timmartin/ScanManager | setup.py | 1 | 1198 | from setuptools import setup
from codecs import open
from os import path
import glob
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="scan_manager",
version="0.0.1",
description="GUI app for collating images produced by a document scanner",
long_description=long_description,
url="https://github.com/timmartin/ScanManager",
author="Tim Martin",
author_email="[email protected]",
license="MIT",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: End Users/Desktop',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
keywords="scanner",
packages=['scan_manager'],
install_requires=['PySide~=1.2.4',
'fpdf~=1.7.2'],
data_files=[('images', glob.glob('images/*.svg'))],
entry_points={
'console_scripts': [
'scan_manager=scan_manager:main'
]
}
)
| mit | -2,133,402,301,166,609,200 | 21.603774 | 78 | 0.606845 | false |
pculture/unisubs | apps/teams/workflows/teamworkflows.py | 1 | 14051 | # Amara, universalsubtitles.org
#
# Copyright (C) 2014 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
"""
Team Workflows
==============
Team workflows are ways for teams to get their subtitling work done. Team
workflows compliment the :doc:`subtitle-workflows` and add team-specific
features.
Team workflows are responsible for:
- Providing a SubtitleWorkflow for team videos
- Handling the workflow settings page
- Handling the dashboard page
- Creating extra tabs or the teams section
.. autoclass:: TeamWorkflow
:members: label, dashboard_view, workflow_settings_view,
setup_team, get_subtitle_workflow, extra_pages,
extra_settings_pages
.. autoclass:: TeamPage
.. autoclass:: teams.workflows.old.workflow.OldTeamWorkflow
"""
from collections import namedtuple
from django.urls import reverse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.translation import ungettext, ugettext as _, ugettext_lazy
from activity.models import ActivityRecord
from videos.models import Video
from subtitles.models import SubtitleLanguage
from teams import experience
from utils.behaviors import DONT_OVERRIDE
from utils.pagination import AmaraPaginatorFuture
from utils.text import fmt
class TeamWorkflow(object):
label = NotImplemented
"""Human-friendly name for this workflow. This is what appears on the
team creation form.
"""
dashboard_view = NotImplemented
member_view = NotImplemented
"""
view function for the dashboard page.
"""
user_dashboard_extra = None
"""
Team-specific extra data to render in user dashboard page.
"""
workflow_settings_view = NotImplemented
"""
view function for the workflow settings page.
"""
has_workflow_settings_page = False
has_subtitle_visibility_setting = False
def __init__(self, team):
self.team = team
def setup_team(self):
"""Do any additional setup for newly created teams."""
pass
def get_subtitle_workflow(self, team_video):
"""Get the SubtitleWorkflow for a video with this workflow. """
raise NotImplementedError()
def extra_pages(self, user):
"""Get extra team pages to handle this workflow.
These pages will be listed as tabs in the team section. Workflows
will typically use this for things like dashboard pages.
Args:
user -- user viewing the page
Returns:
list of :class:`TeamPage` objects
"""
return []
def extra_settings_pages(self, user):
"""Get extra team settings pages to handle this workflow.
This works just like extra_pages(), but the pages will show up as
tabs under the settings section.
Args:
user -- user viewing the page
Returns:
list of :class:`TeamPage` objects
"""
return []
def team_page(self, name, title, view_name):
"""Convenience function to create an TeamPage object
This method automatically generates the URL from view_name using
reverse()
"""
url = reverse(view_name, kwargs={'slug': self.team.slug})
return TeamPage(name, title, url)
def video_page_customize(self, request, video):
"""Add extra content to the video page when viewing from the context
of a team."""
return DONT_OVERRIDE
def subtitles_page_customize(self, request, video, subtitle_language):
"""Add extra content to the subtitles page when viewing from the context
of a team."""
return DONT_OVERRIDE
def team_video_page_extra_tabs(self, request):
"""Add extra sub tabs to the team video page.
These appear near the top of the page.
"""
return []
def management_page_extra_tabs(self, user, *args, **kwargs):
"""Add extra sub tabs to the team management page.
These appear near the top of the page.
"""
return []
def team_video_page_default(self, request):
extra_tabs = self.team_video_page_extra_tabs(request)
if extra_tabs:
return extra_tabs[0].url
else:
return reverse("teams:videos", kwargs={
'slug': self.team.slug,
})
def management_page_default(self, user):
extra_tabs = self.management_page_extra_tabs(user)
if extra_tabs:
return extra_tabs[0].url
else:
return reverse("teams:manage_videos", kwargs={
'slug': self.team.slug,
})
def video_management_add_counts(self, videos):
"""Add the subtitle counts for the videos management page
By default we add the number of completed subtitles, but other
workflows may want to add other/different counts.
For each video you can set the counts attribute to a list of strings.
Each string should describe a count of something, like the number of
completed subtitles. The number should be wrapped in a <strong> tag
(and the whole thing should be wrapped in a mark_safe() call).
You can also set the counts2 attribute to create a
second line of counts.
Args:
videos -- List of Video instances.
"""
counts = SubtitleLanguage.count_completed_subtitles(videos)
for v in videos:
incomplete_count, completed_count = counts[v.id]
v.counts = []
if completed_count > 0:
msg = ungettext(
(u'<strong>%(count)s</strong> subtitle completed'),
(u'<strong>%(count)s</strong> subtitles completed'),
completed_count)
v.counts.append(mark_safe(fmt(msg, count=completed_count)))
if incomplete_count > 0:
msg = ungettext(
(u'<strong>%(count)s</strong> subtitle started'),
(u'<strong>%(count)s</strong> subtitles started'),
incomplete_count)
v.counts.append(mark_safe(fmt(msg, count=incomplete_count)))
def video_management_alter_context_menu(self, video, menu):
"""Alter the context menu for the video management page."""
def video_management_extra_forms(self):
"""Add extra forms to the video management page """
return []
def activity_type_filter_options(self):
"""
Get possible activity type filter values
This is used on the activity page to populate the type dropdown.
"""
return [
'video-added',
'comment-added',
'version-added',
'video-url-added',
'member-joined',
'member-left',
'video-title-changed',
'video-deleted',
'video-url-edited',
'video-url-deleted',
'video-moved-from-team',
'video-moved-to-team',
'team-settings-changed',
'language-changed',
]
def customize_permissions_table(self, team, form, permissions_table):
"""
Customize the table show on the permissions settings page
"""
pass
# these can be used to customize the content in the project/language
# manager pages
def render_project_page(self, request, team, project, page_data):
page_data['videos'] = (team.videos
.filter(teamvideo__project=project)
.order_by('-id'))[:5]
return render(request, 'new-teams/project-page.html', page_data)
def render_all_languages_page(self, request, team, page_data):
return render(request, 'new-teams/all-languages-page.html', page_data)
def render_language_page(self, request, team, language_code, page_data):
qs = (self.team.videos
.filter(primary_audio_language_code=language_code)
.order_by('-id'))
page_data['videos']= qs[:5]
return render(request, 'new-teams/language-page.html', page_data)
def fetch_member_history(self, user, query=None):
"""
Fetch the member subtitling history data for the dashboard/profile
page
This method should return a queryset of items to display. The items
can be any django model. They will get rendered by the template in
the member_history_template attribute.
"""
qs = (ActivityRecord.objects
.for_team(self.team)
.filter(user=user)
.order_by('-created'))
if query:
qs = qs.filter(video__in=Video.objects.search(query))
return qs
member_history_template = 'future/teams/member-subtitling-history.html'
"""
Template that can render the results from member_history. It will be
passed the following variables:
- team: Team the history is for
- user: User the history is for
- member_history: Single page from the queryset returned by fetch_member_history()
"""
member_history_header = ugettext_lazy('Recent activity')
def get_experience_column_label(self):
"""
Team members page label for the experience coluumn.
"""
return _('Subtitles completed')
def add_experience_to_members(self, page):
"""
Add experience attributes to a list of members
We call this for the team members page to populate the experience
column (usually subtitles completed). This method should:
- Set the experience attribute to each member to a TeamExperience object
- Optionally, set the experience_extra attribute, which is a list of
extra experience to show in the expanded view.
"""
subtitles_completed = experience.get_subtitles_completed(page)
for member, count in zip(page, subtitles_completed):
member.experience_count = count
# map type codes to subclasses
_type_code_map = {}
# map API codes to type codes
_api_code_map = {}
@classmethod
def get_workflow(cls, team):
"""Get a TeamWorkflow subclass for a team."""
klass = cls._type_code_map[team.workflow_type]
return klass(team)
@classmethod
def get_choices(cls):
choices = [(type_code, subclass.label)
for (type_code, subclass) in cls._type_code_map.items()]
cls._sort_choices(choices)
return choices
@classmethod
def get_api_choices(cls):
choices = [
(type_code, api_code)
for (api_code, type_code) in cls._api_code_map.items()
]
cls._sort_choices(choices)
return choices
@classmethod
def _sort_choices(cls, choices):
"""Sort workflow type choices
We sort choices so that:
- unisubs choices are first, then extensions (unisubs choices are
1-char)
- after that it's sorted alphabeticaly by code
"""
choices.sort(key=lambda (code, _): (len(code), code))
@classmethod
def register(cls, type_code, api_code=None):
"""Register a TeamWorkflow subclass.
Calling this class method will enable it for teams whose
workflow_type value is type_code
Args:
type_code: string code value for this workflow. Workflows in the
unisubs repository should be 1 char long. Workflows on other
repositories should be 2 chars with the first char being
unique to the repository.
api_code: API code value for this workflow. Pass in a non-None
value to enable creating this workflow via the API
"""
TeamWorkflow._type_code_map[type_code] = cls
if api_code is not None:
TeamWorkflow._api_code_map[api_code] = type_code
TeamPage = namedtuple('TeamPage', 'name title url')
"""Represents a page in the team's section
Attributes:
name: machine-name for this tuple. This is value to use for current in
the _teams/tabs.html template
title: human friendly tab title
url: URL for the page
"""
TeamExperience = namedtuple('TeamExperience', 'label icon count')
"""Used to list experience counts on the members directory
By default, we show subtitles completed, but other workflows might want to
display different things, like assignments completed, etc.
"""
class TeamPermissionsRow(object):
"""
Used to display the checks/Xs on the permissions settings page
"""
def __init__(self, label, admins, managers, contributors,
setting_name=None):
self.label = label
self.admins = admins
self.managers = managers
self.contributors = contributors
self.setting_name = setting_name
@classmethod
def from_setting(cls, label, form, setting_name):
value = form[setting_name].value()
permissions = form[setting_name].field.widget.decompress(value)
# some fields only have settings for admins/managers. Make sure to
# extend permissions to 3 items in that case
permissions.extend([False] * (3 - len(permissions)))
return cls(label, *permissions, setting_name=setting_name)
| agpl-3.0 | 7,985,709,267,886,165,000 | 34.1275 | 90 | 0.632197 | false |
commonsense/divisi | csc/divisi/recycling_set.py | 1 | 3241 | from ordered_set import OrderedSet
from priodict import priorityDictionary
class RecyclingSet(OrderedSet):
__slots__ = ['items', 'indices', 'index', 'indexFor', '__contains__',
'__getitem__', '__len__', 'count', 'maxsize',
'drop_listeners', 'priority']
def __init__(self, maxsize, origitems=None):
self.count = 0
self.maxsize = maxsize
self.priority = priorityDictionary()
self.drop_listeners = []
OrderedSet.__init__(self, origitems)
def __getstate__(self):
return (self.items, self.priority, self.maxsize, self.count)
def __setstate__(self, state):
items, self.priority, self.maxsize, self.count = state
OrderedSet.__setstate__(self, items)
def add(self, key):
"""
Add an item to the set (unless it's already there),
returning its index. Drop an old item if necessary.
``None`` is never an element of an OrderedSet.
"""
if key in self.indices:
self.touch(key)
return self.indices[key]
n = len(self.items)
if n < self.maxsize:
self.items.append(key)
if key is not None:
self.indices[key] = n
self.touch(key)
return n
else:
newindex = self.drop_oldest()
self.items[newindex] = key
self.indices[key] = newindex
self.touch(key)
return newindex
append = add
def __delitem__(self, n):
"""
Deletes an item from the RecyclingSet.
"""
oldkey = self.items[n]
del self.indices[oldkey]
self.items[n] = None
self.announce_drop(n, oldkey)
def drop_oldest(self):
"""
Drop the least recently used item, to make room for a new one. Return
the number of the slot that just became free.
"""
slot = self.priority.smallest()
oldest = self.items[slot]
del self[slot]
return slot
def listen_for_drops(self, callback):
"""
If an object needs to know when a slot becomes invalid because its
key gets dropped, it should register a callback with listen_for_drops.
"""
self.drop_listeners.append(callback)
def announce_drop(self, index, key):
"""
Tell all registered listeners that we dropped a key.
"""
print "dropping key:", key
for listener in self.drop_listeners:
listener(index, key)
def touch(self, key):
"""
Remember that this key is useful.
"""
if key not in self: raise IndexError
else:
self.count += 1
self.priority[self.index(key, False)] = self.count
def index(self, key, touch=True):
if touch: self.touch(key)
return self.indices[key]
indexFor = index
def __contains__(self, key):
return key in self.indices
def __getitem__(self, key):
if key < self.maxsize and key >= len(self.items):
return None
return self.items[key]
def __len__(self):
return len(self.indices)
def _setup_quick_lookup_methods(self):
pass
| gpl-3.0 | -7,574,904,923,457,729,000 | 29.28972 | 78 | 0.558161 | false |
epuzanov/ZenPacks.community.HPMon | ZenPacks/community/HPMon/modeler/plugins/community/snmp/HPFanMap.py | 1 | 3236 | ################################################################################
#
# This program is part of the HPMon Zenpack for Zenoss.
# Copyright (C) 2008, 2009, 2010, 2011 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""HPFanMap
HPFanMap maps the cpqHeFltTolFanTable table to fab objects
$Id: HPFanMap.py,v 1.3 2011/01/02 19:01:17 egor Exp $"""
__version__ = '$Revision: 1.3 $'[11:-2]
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetTableMap
class HPFanMap(SnmpPlugin):
"""Map HP/Compaq insight manager Fans table to model."""
maptype = "HPFanMap"
modname = "ZenPacks.community.HPMon.HPFan"
relname = "fans"
compname = "hw"
snmpGetTableMaps = (
GetTableMap('cpqHeFltTolFanTable',
'.1.3.6.1.4.1.232.6.2.6.7.1',
{
'.3': '_locale',
'.4': '_present',
'.5': 'type',
'.9': 'status',
'.12': '_rpm',
}
),
)
typemap = {1: 'other',
2: 'Tach Output',
3: 'Spin Detect',
}
localemap = {1: 'other',
2: 'unknown',
3: 'system',
4: 'systemBoard',
5: 'ioBoard',
6: 'cpu',
7: 'memory',
8: 'storage',
9: 'removableMedia',
10: 'powerSupply',
11: 'ambient',
12: 'chassis',
13: 'bridgeCard',
14: 'managementBoard',
15: 'backplane',
16: 'networkSlot',
17: 'bladeSlot',
18: 'virtual',
}
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
rm = self.relMap()
localecounter = {}
for oid, fan in tabledata.get('cpqHeFltTolFanTable', {}).iteritems():
try:
om = self.objectMap(fan)
if om._present < 3: continue
if not hasattr(om, '_rpm'):
om.modname = "ZenPacks.community.HPMon.HPsdFan"
om.snmpindex = oid.strip('.')
om.type = self.typemap.get(getattr(om,'type',1),self.typemap[1])
if om._locale in localecounter:
localecounter[om._locale] = localecounter[om._locale] + 1
else:
localecounter[om._locale] = 1
om.id = self.prepId("%s%d" % (self.localemap.get(
getattr(om, '_locale', 1),
self.localemap[1]),
localecounter[om._locale]))
except AttributeError:
continue
rm.append(om)
return rm
| gpl-2.0 | -2,512,895,411,321,758,700 | 33.425532 | 82 | 0.439122 | false |
hmcl/storm-apache | storm-client/src/py/storm/DistributedRPC.py | 1 | 10668 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:utf8strings
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def execute(self, functionName, funcArgs):
"""
Parameters:
- functionName
- funcArgs
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def execute(self, functionName, funcArgs):
"""
Parameters:
- functionName
- funcArgs
"""
self.send_execute(functionName, funcArgs)
return self.recv_execute()
def send_execute(self, functionName, funcArgs):
self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid)
args = execute_args()
args.functionName = functionName
args.funcArgs = funcArgs
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_execute(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = execute_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "execute failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["execute"] = Processor.process_execute
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_execute(self, seqid, iprot, oprot):
args = execute_args()
args.read(iprot)
iprot.readMessageEnd()
result = execute_result()
try:
result.success = self._handler.execute(args.functionName, args.funcArgs)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except DRPCExecutionException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("execute", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class execute_args(object):
"""
Attributes:
- functionName
- funcArgs
"""
def __init__(self, functionName=None, funcArgs=None,):
self.functionName = functionName
self.funcArgs = funcArgs
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.functionName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.funcArgs = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('execute_args')
if self.functionName is not None:
oprot.writeFieldBegin('functionName', TType.STRING, 1)
oprot.writeString(self.functionName.encode('utf-8') if sys.version_info[0] == 2 else self.functionName)
oprot.writeFieldEnd()
if self.funcArgs is not None:
oprot.writeFieldBegin('funcArgs', TType.STRING, 2)
oprot.writeString(self.funcArgs.encode('utf-8') if sys.version_info[0] == 2 else self.funcArgs)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(execute_args)
execute_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'functionName', 'UTF8', None, ), # 1
(2, TType.STRING, 'funcArgs', 'UTF8', None, ), # 2
)
class execute_result(object):
"""
Attributes:
- success
- e
- aze
"""
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = DRPCExecutionException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('execute_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(execute_result)
execute_result.thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'e', [DRPCExecutionException, None], None, ), # 1
(2, TType.STRUCT, 'aze', [AuthorizationException, None], None, ), # 2
)
fix_spec(all_structs)
del all_structs
| apache-2.0 | -3,341,553,864,955,557,000 | 33.412903 | 134 | 0.591207 | false |
juvvadi/keystone | keystone/logic/types/auth.py | 1 | 5621 | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from datetime import datetime
import json
from lxml import etree
import keystone.logic.types.fault as fault
class PasswordCredentials(object):
"""Credentials based on username, password, and (optional) tenant_id.
To handle multiple token for a user depending on tenants,
tenant_id is mandatory.
"""
def __init__(self, username, password, tenant_id):
self.username = username
self.password = password
self.tenant_id = tenant_id
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/idm/api/v1.0}"
"passwordCredentials")
if root == None:
raise fault.BadRequestFault("Expecting passwordCredentials")
username = root.get("username")
if username == None:
raise fault.BadRequestFault("Expecting a username")
password = root.get("password")
if password == None:
raise fault.BadRequestFault("Expecting a password")
tenant_id = root.get("tenantId")
#--for multi-token handling--
if tenant_id == None:
raise fault.BadRequestFault("Expecting tenant")
# ----
return PasswordCredentials(username, password, tenant_id)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse password credentials",
str(e))
@staticmethod
def from_json(json_str):
try:
obj = json.loads(json_str)
if not "passwordCredentials" in obj:
raise fault.BadRequestFault("Expecting passwordCredentials")
cred = obj["passwordCredentials"]
if not "username" in cred:
raise fault.BadRequestFault("Expecting a username")
username = cred["username"]
if not "password" in cred:
raise fault.BadRequestFault("Expecting a password")
password = cred["password"]
if "tenantId" in cred:
tenant_id = cred["tenantId"]
else:
#--for multi-token handling--
if tenant_id == None:
raise fault.BadRequestFault("Expecting a tenant")
# ---
return PasswordCredentials(username, password, tenant_id)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse password credentials",
str(e))
class Token(object):
"An auth token."
def __init__(self, expires, token_id):
self.expires = expires
self.token_id = token_id
class Group(object):
"A group, optionally belonging to a tenant."
def __init__(self, group_id, tenant_id):
self.tenant_id = tenant_id
self.group_id = group_id
class Groups(object):
"A collection of groups."
def __init__(self, values, links):
self.values = values
self.links = links
class User(object):
"A user."
def __init__(self, username, tenant_id, groups):
self.username = username
self.tenant_id = tenant_id
self.groups = groups
class AuthData(object):
"Authentation Infor returned upon successful login."
def __init__(self, token, user):
self.token = token
self.user = user
def to_xml(self):
dom = etree.Element("auth",
xmlns="http://docs.openstack.org/idm/api/v1.0")
token = etree.Element("token",
expires=self.token.expires.isoformat())
token.set("id", self.token.token_id)
user = etree.Element("user",
username=self.user.username,
tenantId=str(self.user.tenant_id))
groups = etree.Element("groups")
for group in self.user.groups.values:
g = etree.Element("group",
tenantId=group.tenant_id)
g.set("id", group.group_id)
groups.append(g)
user.append(groups)
dom.append(token)
dom.append(user)
return etree.tostring(dom)
def to_json(self):
token = {}
token["id"] = self.token.token_id
token["expires"] = self.token.expires.isoformat()
user = {}
user["username"] = self.user.username
user["tenantId"] = self.user.tenant_id
group = []
for g in self.user.groups.values:
grp = {}
grp["tenantId"] = g.tenant_id
grp["id"] = g.group_id
group.append(grp)
groups = {}
groups["group"] = group
user["groups"] = groups
auth = {}
auth["token"] = token
auth["user"] = user
ret = {}
ret["auth"] = auth
return json.dumps(ret)
| apache-2.0 | 4,376,412,600,012,934,700 | 32.064706 | 76 | 0.569294 | false |
nickmarton/Paxos-Distributed-Calendar | Classes/Node.py | 1 | 24814 | """Node (User) Class for Paxos Calendar."""
import os
import sys
import time
import thread
import pickle
import socket
import logging
from Bully import bully_algorithm
from Appointment import Appointment
from Calendar import Calendar
from Proposer import Proposer
from Acceptor import Acceptor
class Node(object):
"""
Node class.
node_id: Unique ID used for Node identification as well as for
unique proposal number generation; int.
calendar: Calendar object which contains Appointment objects.
proposer: Proposer object used in Synod Algorithm; passed node_id so
it can create unique proposal numbers.
acceptor: Acceptor object used in Synod Algorithm.
log: Dictionary of Calendar objects used in Paxos Algorithm;
intially empty, Synod Algorithm is used to fill each entry
of log where integer keys represents slots and the values
being the Calendar agreed upon via conscensus.
leader: The current leader elected via the bully algorithm;
initially None and updated every ~6 seconds.
"""
_ip_filename = "./IP_translations.txt"
def __init__(self, node_id):
"""Construct a Node object."""
if type(node_id) != int:
raise TypeError("node_id must be an int")
if node_id < 0:
raise ValueError("node id must be a nonnegative integer")
try:
Node._ip_table = Node._make_ip_table()
except IOError:
raise IOError("Node-to-IP translation file: " + ip_filename + " not found.")
self._node_id = node_id
self._calendar = Calendar()
self._proposer = Proposer(node_id,self._ip_table)
self._acceptor = Acceptor(self._ip_table)
self._log = {}
self._leader = None
self._terminate = False
self._is_Node = True
def insert(self, appointment):
"""Insert an Appointment into this Node's Calendar."""
#First create new Calendar with new appointment
from copy import deepcopy
new_calendar = deepcopy(self._calendar)
new_calendar += appointment
if self._log.keys():
next_log_slot = max(self._log.keys()) + 1
else:
next_log_slot = 0
#Then ask leader to propose the new Calendar
try:
leader_IP, leader_TCP, leader_UDP = self._ip_table[self._leader]
proposal_message = pickle.dumps(
("propose", Calendar.serialize(new_calendar), next_log_slot))
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.sendto(proposal_message, (leader_IP, leader_UDP))
udp_socket.close()
except KeyError as excinfo:
print "Unable to find leader, waiting until one is selected..."
while self._leader == None:
pass
print "Found leader, continuing...\n"
self.insert(appointment)
def delete(self, appointment):
"""Delete an Appointment in this Node's Calendar."""
#First create new Calendar without appointment
from copy import deepcopy
new_calendar = Calendar()
for self_appointment in self._calendar:
if self_appointment != appointment:
new_calendar += deepcopy(self_appointment)
if self._log.keys():
next_log_slot = max(self._log.keys()) + 1
else:
next_log_slot = 0
#Then ask leader to propose the new Calendar
try:
leader_IP, leader_TCP, leader_UDP = self._ip_table[self._leader]
proposal_message = pickle.dumps(
("propose", Calendar.serialize(new_calendar), next_log_slot))
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.sendto(proposal_message, (leader_IP, leader_UDP))
udp_socket.close()
except KeyError as excinfo:
print "Unable to find leader, waiting until one is selected..."
while self._leader == None:
pass
print "Found leader, continuing...\n"
self.delete(appointment)
def paxos(self):
"""Engage this Node in Paxos algorithm."""
def _parse_message(message):
"""
Parse UDP pickled tuple message.
Self is available from closure.
"""
valid_message_types = [
"propose", "prepare", "promise", "accept", "ack", "commit"]
message_type, message_args = message[0], message[1:]
#syntactic checking
if message_type not in valid_message_types:
logging.error("Invalid message type")
return
if 3 <= len(message_args) <= 4:
arg_0_is_int = type(message_args[0]) == int
arg_0_is_calendar = hasattr(message_args[0], "_is_Calendar")
arg_1_is_calendar = hasattr(message_args[1], "_is_Calendar")
if not arg_0_is_calendar:
arg_0_is_None = message_args[0] == None
else:
arg_0_is_None = False
if not arg_1_is_calendar:
arg_1_is_None = message_args[1] == None
else:
arg_1_is_None = False
#handle prepare messages
if message_type == "propose":
if arg_0_is_calendar:
#If in this conditional, we are the leader.
#First we have to fill any empty log slots
#'''
log_slots = self._log.keys()
proposed_slot = message[2]
for i in range(proposed_slot):
if i not in self._log.keys():
#dummy_message = ("propose", Calendar(), i, self._node_id)
#self._proposer._command_queue.append(dummy_message)
#time.sleep(.1)
slot_calendar = self._acceptor._accVals[i]
self._log[i] = slot_calendar
#'''
#Then we can add this new proposal
self._proposer._command_queue.append(message)
else:
logging.error(
"Propose message must be of form "
"'propose' Calendar")
#handle prepare messages
elif message_type == "prepare":
if arg_0_is_int:
self._acceptor._command_queue.append(message)
else:
logging.error(
"Prepare message must be of form 'prepare' int")
#handle promise messages
elif message_type == "promise":
if (arg_0_is_int and arg_1_is_calendar) or (arg_0_is_None and arg_1_is_None):
self._proposer._command_queue.append(message)
else:
logging.error(
"Promise message must be of form "
"'promise' int Calendar")
#handle accept messages
elif message_type == "accept":
if arg_0_is_int and arg_1_is_calendar:
self._acceptor._command_queue.append(message)
else:
print ' '.join([str(i) for i in message])
logging.error(
"Accept message must be of form "
"'accept' int Calendar")
#handle ack messages
elif message_type == "ack":
if arg_0_is_int and arg_1_is_calendar:
self._proposer._command_queue.append(message)
else:
logging.error(
"Ack message must be of form "
"'ack' int Calendar")
#handle commit messages
elif message_type == "commit":
if arg_0_is_calendar:
self._acceptor._command_queue.append(message)
else:
logging.error(
"Commit message must be of form 'commit' Calendar")
else:
logging.error("Invalid message parameters")
return
def _learner(self):
"""Poll the Acceptor commits queue to update Node's log."""
while True:
if self._acceptor._commits_queue:
(log_slot, v) = self._acceptor._commits_queue.pop()
self._log[log_slot] = v
self._calendar = self._log[max(self._log.keys())]
if self._terminate:
break
time.sleep(.001)
def _shut_down(self):
"""."""
while True:
if self._terminate:
self._proposer._terminate = True
self._acceptor._terminate = True
break
def _do_paxos(self):
"""Do Paxos algorithm for this Node."""
#Begin running the Acceptor and Proposer in the background
thread.start_new_thread(self._proposer.start, ())
thread.start_new_thread(self._acceptor.start, ())
thread.start_new_thread(_learner, (self,))
thread.start_new_thread(_shut_down, (self,))
IP, UDP_PORT = '0.0.0.0', self._ip_table[self._node_id][2]
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((IP, UDP_PORT))
while True:
data, addr = sock.recvfrom(4096) # buffer size is 1024 bytes
if data == "terminate":
sock.close()
break
#Quick lookup of ID of sender from IP received
sender_ID = filter(
lambda row: row[1][0] == addr[0],
self._ip_table.items())[0][0]
message = pickle.loads(data)
#bind sender_ID to message
message = message + (sender_ID,)
#construct deserailized version of message
new_message = []
for field in message:
if type(field) == str:
try:
deserialized_calendar = Calendar.deserialize(field)
new_message.append(deserialized_calendar)
except:
new_message.append(field)
else:
new_message.append(field)
new_message = tuple(new_message)
_parse_message(new_message)
thread.start_new_thread(_do_paxos, (self,))
def elect_leader(self, poll_time=6, timeout=3):
"""Engage this Node in leader selection."""
def _do_leader_election(self, poll_time, timeout):
"""Do leader election as new thread."""
IP, TCP_PORT = "0.0.0.0", self._ip_table[self._node_id][1]
recv_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
recv_socket.bind((IP, TCP_PORT))
#backlog; 1 for each Node besides self
recv_socket.listen(4)
prev_leader = None
while True:
thread.start_new_thread(bully_algorithm, (self, recv_socket, timeout))
time.sleep(poll_time)
if self._leader != prev_leader:
logging.debug("NEW LEADER IS: " + str(self._leader))
prev_leader = self._leader
if self._terminate:
break
recv_socket.close()
thread.start_new_thread(_do_leader_election, (self, poll_time, timeout))
def terminate(self):
"""Initiate termination protocol; close all threads."""
#Set termination field
self._terminate = True
#Send special termination message to self
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
my_ip_info = self._ip_table[self._node_id]
my_IP, my_UDP_PORT = my_ip_info[0], my_ip_info[2]
s.sendto("terminate", (my_IP, my_UDP_PORT))
s.close()
#Sleep for a second to ensure everything closes before main
time.sleep(1)
@staticmethod
def save(Node, path="./", filename="state.pkl"):
"""Save this Node's log and Acceptor to stable storage."""
if not hasattr(Node, "_is_Node"):
raise TypeError("Node parameter must be a Node object")
if type(filename) != str or type(path) != str:
raise TypeError("path and filename must be strings")
if filename[-4:] != ".pkl":
raise ValueError("filename must have .pkl extension")
if not os.path.exists(path):
raise ValueError("path provided does not exist")
import pickle
with open(path + filename, 'w') as f:
state = (Node._node_id, Node._log, Node._acceptor)
pickle.dump(state, f)
@staticmethod
def load(path="./", filename="state.pkl"):
"""
Load log and Acceptor from stable storage if path and filename exist.
"""
def _rebuild_calendar(node, log):
"""Rebuild the calendar of node by reconstructing it from log."""
#Get the latest entry in the log for most up-to-date Calendar
node._calendar = log[max(log.keys())]
if type(filename) != str or type(path) != str:
raise TypeError("path and filename must be strings")
if filename[-4:] != ".pkl":
raise ValueError("filename must have .pkl extension")
if not os.path.exists(path+filename):
raise ValueError("path provided does not exist")
with open(path + filename, 'r') as f:
state = pickle.load(f)
node_id, log, acceptor = state
node = Node(node_id)
node._log = log
node._acceptor = acceptor
_rebuild_calendar(node, log)
return node
@staticmethod
def _make_ip_table():
"""Create the ID-to-IP translation table used for socket connection."""
table = {}
import re
pattern = r"^\d+,\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3},\d{4},\d{5}$"
with open(Node._ip_filename, "r") as f:
for translation in f:
match = re.match(pattern, translation.strip())
if not match:
raise ValueError(
"Every line in IP_translations.txt must be of "
"form ID,IP")
ID, IP, TCP_PORT, UDP_PORT, = translation.strip().split(',')
table[int(ID)] = [IP, int(TCP_PORT), int(UDP_PORT)]
return table
@staticmethod
def _parse_command(command, node):
"""Parse command provided, possibly involving provided node."""
def _do_show(argv, node):
"""Perform show command for debugging/user information."""
if len(argv) == 1:
raise ValueError(
"Invalid show argument; show needs argument "
"{calendar,log,acceptor,proposer,all}")
#Handle showing the calendar
if argv[1] == "calendar":
print node._calendar
#Handle showing the log
elif argv[1] == "log":
print "Log:"
#copy the log into a list ordered by slot number
ordered_slots = sorted(node._log.items(), key=lambda x: x[0])
#if -short flag not thrown, print entire log
if len(argv) == 2:
for slot in ordered_slots:
print "Slot " + str(slot[0]) + ' ' + str(slot[1])
#Short flag is thrown, just print names of Appointments in each
#Calendar slot
elif len(argv) == 3:
if argv[2] == "-s":
for slot in ordered_slots:
log_string = "Slot " + str(slot[0]) + " Calendar: \t"
log_string += ', '.join(
slot[1].get_appointment_names())
print log_string
print
else:
raise ValueError(
"Invalid show arguments; Only flags \"-s\" "
"permitted")
#Bad number of arguments to show log
else:
raise ValueError(
"Invalid show arguments; show log supports only a "
"single optional flag argument \"-s\"")
#Handle showing Node's Acceptor object
elif argv[1] == "acceptor":
print str(node._acceptor) + '\n'
#Handle showing Node's Proposer object
elif argv[1] == "proposer":
print str(node._proposer) + '\n'
#Handle printing entire state of Node
elif argv[1] == "all":
print "-" * 100
print "Node ID: " + str(node._node_id)
_do_show(['show', 'calendar'], node)
_do_show(['show', 'log', '-s'], node)
_do_show(['show', 'acceptor'], node)
_do_show(['show', 'proposer'], node)
print "-" * 100
else:
raise ValueError(
"Invalid show argument; show needs argument "
"{calendar,log,acceptor,proposer,all}")
def _parse_appointment(argv):
"""Try to parse an Appointment object from given argv."""
generic_error_msg = "Invalid command; Schedule and cancel " + \
"commands must be of form: \n" + \
"{schedule,cancel} [Appointment name] " + \
"(user1,user2,...usern) (start_time,end_time) [day]"
if len(argv) != 5:
raise ValueError(generic_error_msg)
name, participants, times, day = argv[1:]
participants = participants[1:-1].split(",")
try:
participants = [int(user[4:]) for user in participants]
except ValueError:
raise ValueError(
"Invalid command; participants must be of form "
"(user1,user2,...,usern)")
try:
start, end = times[1:-1].split(',')
except ValueError:
raise ValueError(
"Invalid command; times must be of form "
"(start_time,end_time)")
try:
return Appointment(name, day, start, end, participants)
except ValueError as excinfo:
raise ValueError("Invalid command; " + excinfo.message)
def _do_clear():
"""Perform clear command via ASCI escape code."""
print(chr(27) + "[2J")
argv = command.split()
if not argv:
return
#If command is to clear, clear the screen
if argv[0] == "clear":
_do_clear()
return
#If command was to show something, do show
if argv[0] == "show":
try:
_do_show(argv, node)
except ValueError as excinfo:
print excinfo
print
finally:
return
#If command is to schedule or cancel an Appointment, parse then
#initiate Synod algorithm
if argv[0] == "schedule":
try:
appointment = _parse_appointment(argv)
for user in appointment._participants:
node._ip_table[user]
#determine if the Appointment the user is trying to schedule
#is already in their Calendar or in conflict with some
#Appointment in their Calendar
conflict_cond = node._calendar._is_appointment_conflicting(
appointment)
in_cond = appointment in node._calendar
#if it's not already in the Calendar and not in conflict with
#any Appointment in it, begin Synod
if not conflict_cond and not in_cond:
node.insert(appointment)
else:
print "User scheduled appointment already in their " + \
"own Calendar or in conflict with their own " + \
"Calendar; ignoring.\n"
except KeyError:
print "User id is not in the IP table."
except ValueError as excinfo:
print excinfo
print
#fail-safe catch in case something fucks up and we don't know what
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()[:]
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
finally:
return
if argv[0] == "cancel":
try:
appointment = _parse_appointment(argv)
if appointment in node._calendar:
node.delete(appointment)
else:
print "User cancelled appointment not in their own " + \
"Calendar; ignoring.\n"
except ValueError as excinfo:
print excinfo
print
finally:
return
print "Invalid command; supported commands = {clear,show,schedule,cancel}"
print
def set_verbosity(verbose_level=3):
"""Set the level of verbosity of the Preprocessing."""
if not type(verbose_level) == int:
raise TypeError("verbose_level must be an int")
if verbose_level < 0 or verbose_level > 4:
raise ValueError("verbose_level must be between 0 and 4")
verbosity = [
logging.CRITICAL,
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG]
logging.basicConfig(
format='%(asctime)s:\t %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=verbosity[verbose_level])
def main():
"""Quick tests."""
"schedule yaboi (user0,user1,user2,user3) (4:00pm,6:00pm) Friday"
"schedule xxboi (user1,user3,user4) (1:30am,11:30am) Wednesday"
"schedule beez (user0,user1,user2,user3) (4:00pm,6:00pm) Saturday"
"schedule beez2 (user0,user1,user2,user3) (3:00pm,4:00pm) Saturday"
"schedule zo (user1,user2,user3) (12:30pm,1:30pm) Friday"
"schedule hamma (user1,user2,user3) (1:00am,1:30am) Friday"
"cancel yaboi (user0,user1,user2,user3) (4:00pm,6:00pm) Friday"
"cancel xxboi (user1,user3,user4) (1:30am,11:30am) Wednesday"
a1 = Appointment("zo","Friday","12:30pm","1:30pm", [1, 2, 8])
a2 = Appointment("xxboi","Wednesday","1:30am","11:30am", [1, 4, 5])
a3 = Appointment("lol","saturday","11:30am","12:30pm", [1])
a4 = Appointment("yeee","MondAy","11:30am","12:30pm", [1])
a5 = Appointment("lolololol","Thursday","11:30am","12:30pm", [1])
c = Calendar()
c1 = Calendar(a1)
c2 = Calendar(a1, a2)
c3 = Calendar(a1, a2, a3)
c4 = Calendar(a1, a2, a3, a4)
c5 = Calendar(a1, a2, a3, a4, a5)
set_verbosity(4)
N = Node(int(sys.argv[1]))
'''
N._log[0] = c1
N._log[1] = c2
N._log[2] = c3
N._log[3] = c4
N._log[4] = c5
'''
N._calendar = c
#try to load a previous state of this Node
#'''
try:
N = Node.load()
except ValueError:
pass
except IOError:
pass
#'''
N.elect_leader(poll_time=6, timeout=3)
N.paxos()
print("@> Node Started")
while True:
message = raw_input('')
if message == "quit":
Node.save(N)
N.terminate()
break
else:
Node._parse_command(message, N)
if __name__ == "__main__":
main() | mit | -1,191,132,476,585,437,400 | 37.472868 | 97 | 0.503264 | false |
mizhi/mimecat | docs/conf.py | 1 | 7984 | # -*- coding: utf-8 -*-
#
# mimecat documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 11 12:01:01 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mimecat'
copyright = u'2013, Mitchell Peabody'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mimecatdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'mimecat.tex', u'mimecat Documentation',
u'Mitchell Peabody', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mimecat', u'mimecat Documentation',
[u'Mitchell Peabody'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mimecat', u'mimecat Documentation',
u'Mitchell Peabody', 'mimecat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -8,834,379,199,522,356,000 | 31.193548 | 80 | 0.70516 | false |
uw-it-aca/course-dashboards | coursedashboards/migrations/0014_auto_20200911_2040.py | 1 | 1189 | # Generated by Django 2.1.15 on 2020-09-11 20:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coursedashboards', '0013_auto_20190108_2238'),
]
operations = [
migrations.CreateModel(
name='CourseGradeAverage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('curriculum', models.CharField(max_length=20)),
('course_number', models.PositiveSmallIntegerField()),
('grade', models.CharField(max_length=5, null=True)),
],
options={
'db_table': 'CourseGradeAverage',
},
),
migrations.AlterField(
model_name='course',
name='course_title',
field=models.CharField(default='', max_length=64),
),
migrations.AlterField(
model_name='term',
name='quarter',
field=models.CharField(choices=[('winter', 'Winter'), ('spring', 'Spring'), ('summer', 'Summer'), ('autumn', 'Autumn')], max_length=6),
),
]
| apache-2.0 | 5,090,606,726,105,432,000 | 32.971429 | 147 | 0.544996 | false |
kdar/rconsoft | setup.py | 1 | 2919 | #!/usr/bin/env python
# Read LICENSE for licensing details.
import sys
import textwrap
import glob
import shutil
import os
app_name = 'rconsoft'
#-----------------------------
# Do some checks
if sys.version_info < (2, 4, 0):
sys.stderr.write(app_name+' requires Python 2.4 or newer.\n')
sys.exit(-1)
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
#-----------------------------
# Get all of our packages
#plugin_names = find_packages('plugins')
#plugins = [app_name+'.plugins.'+p for p in plugin_names]
#packages = find_packages(exclude=['ez_setup', 'tests', 'tests.*', 'plugins', 'plugins.*'])+[app_name+'.plugins']+plugins
#package_dir = {app_name+'.plugins': 'plugins'}
#for name in plugin_names:
# package_dir[app_name+'.plugins.' + name] = 'plugins/' + name
packages = find_packages(exclude=['ez_setup', 'tests', 'tests.*'])
package_dir = {}
version = '0.1'
setup(
# Metadata
name=app_name,
version=version,
author='Kevin Darlington',
url='',
author_email='[email protected]',
download_url='',
description='A program to interact with HL servers.',
install_requires=[
'configobj', 'twisted', 'mechanize'
],
#install_requires=[
# "Routes>=1.10.1", "WebHelpers>=0.6.3", "Beaker>=1.1.3",
# "Paste>=1.7.2", "PasteDeploy>=1.3.2", "PasteScript>=1.7.3",
# "FormEncode>=1.2.1", "simplejson>=2.0.6", "decorator>=2.3.2",
# "nose>=0.10.4", "Mako>=0.2.4", "WebOb>=0.9.5", "WebError>=0.10.1",
# "WebTest>=1.1", "Tempita>=0.2",
# ],
# dependency_links=[
# "http://www.pylonshq.com/download/0.9.7"
# ],
# classifiers=[
# "Development Status :: 5 - Production/Stable",
# "Intended Audience :: Developers",
# "License :: OSI Approved :: BSD License",
# "Framework :: Pylons",
# "Programming Language :: Python",
# "Topic :: Internet :: WWW/HTTP",
# "Topic :: Internet :: WWW/HTTP :: Dynamic Content",
# "Topic :: Internet :: WWW/HTTP :: WSGI",
# "Topic :: Software Development :: Libraries :: Python Modules",
# ],
# extras_require = {
# 'cheetah': ["Cheetah>=1.0", "TurboCheetah>=0.9.5"],
# 'myghty': ["Myghty>=1.1"],
# 'kid': ["kid>=0.9", "TurboKid>=0.9.1"],
# 'genshi': ["Genshi>=0.4.4"],
# 'jinja2': ['Jinja2'],
# 'full': [
# "docutils>=0.4", "elementtree>=1.2.6",
# "Pygments>=0.7", "Cheetah>=1.0",
# "TurboCheetah>=0.9.5", "kid>=0.9", "TurboKid>=0.9.1",
# 'Genshi>=0.4.4',
# ],
# },
# Installation data
packages=packages,
package_dir=package_dir,
include_package_data=True,
#scripts=['scripts/'+app_name],
entry_points = {
'console_scripts': [
'%s = %s.app:main_func' % (app_name, app_name)
]
}
)
| mit | 1,058,570,883,171,243,500 | 28.484848 | 121 | 0.564919 | false |
vim-python/python-syntax | tests/test.py | 1 | 4201 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Above the run-comment and file encoding comment.
# Comments.
# TODO FIXME XXX
# Keywords.
with break continue del return pass raise global assert lambda yield
for while if elif else import as try except finally
self cls mcs
from test import var as name
raise Exception from ex
yield from
def functionname
functionname()
functionname ()
functionname ()
test.functionname()
test.functionname ()
class Classname
class classname
class classname_cls
def функция
функция()
class Класс
class класс
# Keywords: Python 2
exec
print
# Keywords: Python 3
await
async def Test
async with
async for
# Builtin objects.
True False Ellipsis None NotImplemented
__debug__ __doc__ __file__ __name__ __package__ __loader__ __spec__ __path__ __cached__
# Bultin types
bool bytearray dict float frozenset int list object set str tuple
# Builtin functions
__import__()
abs()
all()
any()
bin()
bool()
breakpoint()
bytearray()
callable()
chr()
classmethod()
compile()
complex()
delattr()
dict()
dir()
divmod()
enumerate()
eval()
filter()
float()
format()
frozenset()
getattr()
globals()
hasattr()
hash()
help()
hex()
id()
input()
int()
isinstance()
issubclass()
iter()
len()
list()
locals()
map()
max()
memoryview()
min()
next()
object()
oct()
open()
ord()
pow()
property()
range()
repr()
reversed()
round()
set()
setattr()
slice()
sorted()
staticmethod()
str()
sum()
super()
tuple()
type()
vars()
zip()
# Builtin functions: Python 2
apply()
basestring()
buffer()
cmp()
coerce()
execfile()
file()
intern()
long()
raw_input()
reduce()
reload()
unichr()
unicode()
xrange()
print()
# Builtin functions: Python 3
ascii()
bytes()
exec()
print()
# Builtin exceptions and warnings.
BaseException Exception StandardError ArithmeticError LookupError
EnvironmentError
AssertionError AttributeError EOFError FloatingPointError GeneratorExit IOError
ImportError IndexError KeyError KeyboardInterrupt MemoryError NameError
NotImplementedError OSError OverflowError ReferenceError RuntimeError
StopIteration SyntaxError IndentationError TabError SystemError SystemExit
TypeError UnboundLocalError UnicodeError UnicodeEncodeError UnicodeDecodeError
UnicodeTranslateError ValueError WindowsError ZeroDivisionError
Warning UserWarning DeprecationWarning PendingDeprecationWarning SyntaxWarning
RuntimeWarning FutureWarning ImportWarning UnicodeWarning
# Decorators.
@ decoratorname
@ object.__init__(arg1, arg2)
@ декоратор
@ декоратор.décorateur
# Operators
and or in is not
- + * ** **- **+ **~ @ / // %
& | ^ ~ << >>
< <= == != >= >
= =- =+ =~
-= += *= **= @= /= //= %= :=
&= |= ^= ~= <<= >>=
->
# Erroneous operators
$ ?
===
-- ++ *** @@ /// %%
&& || ^^ ~~ <<< >>>
<== !== !!= >==
%- +- -+
# Numbers
0 1 2 9 10 0x1f 1. .3 12.34 0j 124j 34.2E-3 0b10 0o77 1023434 0x0
1_1 1_1.2_2 1_2j 0x_1f 0x1_f 34_56e-3 34_56e+3_1 0o7_7
# Erroneous numbers
077 100L 0xfffffffL 0L 08 0xk 0x 0b102 0o78 0o123LaB
0_ 0_1 0_x1f 0x1f_ 0_b77 0b77_ .2_ 1_j
# Strings
" test " ' test '
"test\
test"
'test\
test'
"""
test
\""""
'''
test
\''''
" \a\b\c\"\'\n\r \x34\077 \08 \xag"
r" \" \' "
"testтест"
b"test"
b"test\r\n\xffff"
b"тестtest"
br"test"
br"\a\b\n\r"
# Formattings
" %f "
b" %f "
"{0.name!r:b} {0[n]} {name!s: } {{test}} {{}} {} {.__len__:s}"
b"{0.name!r:b} {0[n]} {name!s: } {{test}} {{}} {} {.__len__:s}"
"${test} ${test ${test}aname $$$ $test+nope"
b"${test} ${test ${test}aname $$$ $test+nope"
f"{var}...{arr[123]} normal {var['{'] // 0xff} \"xzcb\" 'xzcb' {var['}'] + 1} text"
f"{expr1 if True or False else expr2} {None} wow {','.join(c.lower() for c in 'asdf')}"
f"hello {expr:.2f} yes {(lambda: 0b1)():#03x} lol {var!r}"
f'brackets: {{{ 1 + 2 }}} and {{{{ 3 + 4 }}}}'
fr'this {that}'
f"{f'{1+1}'}"
'{{ }}'
f"{"{test}"}" # FIXME: syntax error that should not be highlighted
f'{self.__name__}
# Doctests.
"""
Test:
>>> a = 5
>>> a
5
Test
"""
'''
Test:
>>> a = 5
>>> a
5
Test
'''
# Erroneous variable names
6xav
# Indentation errors.
break
# Trailing space errors.
break
"""
test
"""
| mit | -3,624,663,839,608,986,000 | 13.02027 | 87 | 0.63253 | false |
jbenden/ansible | lib/ansible/modules/cloud/misc/serverless.py | 1 | 6837 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Ryan Scott Brown <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: serverless
short_description: Manages a Serverless Framework project
description:
- Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks.
version_added: "2.3"
options:
state:
choices: ['present', 'absent']
description:
- Goal state of given stage/project
required: false
default: present
serverless_bin_path:
description:
- The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless
required: false
version_added: "2.4"
service_path:
description:
- The path to the root of the Serverless Service to be operated on.
required: true
stage:
description:
- The name of the serverless framework project stage to deploy to. This uses the serverless framework default "dev".
required: false
functions:
description:
- A list of specific functions to deploy. If this is not provided, all functions in the service will be deployed.
required: false
default: []
region:
description:
- AWS region to deploy the service to
required: false
default: us-east-1
deploy:
description:
- Whether or not to deploy artifacts after building them. When this option is `false` all the functions will be built, but no stack update will be
run to send them out. This is mostly useful for generating artifacts to be stored/deployed elsewhere.
required: false
default: true
notes:
- Currently, the `serverless` command must be in the path of the node executing the task. In the future this may be a flag.
requirements: [ "serverless", "yaml" ]
author: "Ryan Scott Brown @ryansb"
'''
EXAMPLES = """
# Basic deploy of a service
- serverless:
service_path: '{{ project_dir }}'
state: present
# Deploy specific functions
- serverless:
service_path: '{{ project_dir }}'
functions:
- my_func_one
- my_func_two
# deploy a project, then pull its resource list back into Ansible
- serverless:
stage: dev
region: us-east-1
service_path: '{{ project_dir }}'
register: sls
# The cloudformation stack is always named the same as the full service, so the
# cloudformation_facts module can get a full list of the stack resources, as
# well as stack events and outputs
- cloudformation_facts:
region: us-east-1
stack_name: '{{ sls.service_name }}'
stack_resources: true
# Deploy a project but use a locally installed serverless binary instead of the global serverless binary
- serverless:
stage: dev
region: us-east-1
service_path: '{{ project_dir }}'
serverless_bin_path: node_modules/.bin/serverless
"""
RETURN = """
service_name:
type: string
description: Most
returned: always
sample: my-fancy-service-dev
state:
type: string
description: Whether the stack for the serverless project is present/absent.
returned: always
command:
type: string
description: Full `serverless` command run by this module, in case you want to re-run the command outside the module.
returned: always
sample: serverless deploy --stage production
"""
import os
import traceback
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
from ansible.module_utils.basic import AnsibleModule
def read_serverless_config(module):
path = module.params.get('service_path')
try:
with open(os.path.join(path, 'serverless.yml')) as sls_config:
config = yaml.safe_load(sls_config.read())
return config
except IOError as e:
module.fail_json(msg="Could not open serverless.yml in {}. err: {}".format(path, str(e)), exception=traceback.format_exc())
module.fail_json(msg="Failed to open serverless config at {}".format(
os.path.join(path, 'serverless.yml')))
def get_service_name(module, stage):
config = read_serverless_config(module)
if config.get('service') is None:
module.fail_json(msg="Could not read `service` key from serverless.yml file")
if stage:
return "{}-{}".format(config['service'], stage)
return "{}-{}".format(config['service'], config.get('stage', 'dev'))
def main():
module = AnsibleModule(
argument_spec=dict(
service_path = dict(required=True, type='path'),
state = dict(default='present', choices=['present', 'absent'], required=False),
functions = dict(type='list', required=False),
region = dict(default='', required=False),
stage = dict(default='', required=False),
deploy = dict(default=True, type='bool', required=False),
serverless_bin_path = dict(required=False, type='path')
),
)
if not HAS_YAML:
module.fail_json(msg='yaml is required for this module')
service_path = module.params.get('service_path')
state = module.params.get('state')
functions = module.params.get('functions')
region = module.params.get('region')
stage = module.params.get('stage')
deploy = module.params.get('deploy', True)
serverless_bin_path = module.params.get('serverless_bin_path')
if serverless_bin_path is not None:
command = serverless_bin_path + " "
else:
command = "serverless "
if state == 'present':
command += 'deploy '
elif state == 'absent':
command += 'remove '
else:
module.fail_json(msg="State must either be 'present' or 'absent'. Received: {}".format(state))
if not deploy and state == 'present':
command += '--noDeploy '
if region:
command += '--region {} '.format(region)
if stage:
command += '--stage {} '.format(stage)
rc, out, err = module.run_command(command, cwd=service_path)
if rc != 0:
if state == 'absent' and "-{}' does not exist".format(stage) in out:
module.exit_json(changed=False, state='absent', command=command,
out=out, service_name=get_service_name(module, stage))
module.fail_json(msg="Failure when executing Serverless command. Exited {}.\nstdout: {}\nstderr: {}".format(rc, out, err))
# gather some facts about the deployment
module.exit_json(changed=True, state='present', out=out, command=command,
service_name=get_service_name(module, stage))
if __name__ == '__main__':
main()
| gpl-3.0 | 280,174,094,090,039,230 | 31.402844 | 152 | 0.655404 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_scale_set_update_os_disk.py | 1 | 2121 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetUpdateOSDisk(Model):
"""Describes virtual machine scale set operating system disk Update Object.
This should be used for Updating VMSS OS Disk.
:param caching: The caching type. Possible values include: 'None',
'ReadOnly', 'ReadWrite'
:type caching: str or ~azure.mgmt.compute.v2017_03_30.models.CachingTypes
:param image: The Source User Image VirtualHardDisk. This VirtualHardDisk
will be copied before using it to attach to the Virtual Machine. If
SourceImage is provided, the destination VirtualHardDisk should not exist.
:type image: ~azure.mgmt.compute.v2017_03_30.models.VirtualHardDisk
:param vhd_containers: The list of virtual hard disk container uris.
:type vhd_containers: list[str]
:param managed_disk: The managed disk parameters.
:type managed_disk:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetManagedDiskParameters
"""
_attribute_map = {
'caching': {'key': 'caching', 'type': 'CachingTypes'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'},
'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSetUpdateOSDisk, self).__init__(**kwargs)
self.caching = kwargs.get('caching', None)
self.image = kwargs.get('image', None)
self.vhd_containers = kwargs.get('vhd_containers', None)
self.managed_disk = kwargs.get('managed_disk', None)
| mit | -2,492,063,701,815,970,000 | 46.133333 | 102 | 0.654408 | false |
mjsauvinen/P4UL | pyRaster/tif2NumpyTile.py | 1 | 1956 | #!/usr/bin/env python3
import sys
import argparse
import numpy as np
from mapTools import *
from utilities import filesFromList, writeLog
from plotTools import addImagePlot
import matplotlib.pyplot as plt
'''
Author: Mikko Auvinen
[email protected]
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='tif2NumpyTile.py')
parser.add_argument("-f", "--filename",type=str, help="Input tif-image file name.")
parser.add_argument("-fo", "--fileout",type=str, help="Output npz file name.")
parser.add_argument("-r", "--reso",type=float, help="Resolution of the tif-image.")
parser.add_argument("-xo", "--xorig",type=float, nargs=2,default=[0.,0.],\
help="Coords [N,E] of the tif-images top-left corner. Default=[0,0]")
parser.add_argument("-p", "--printOn", help="Print the numpy array data.",\
action="store_true", default=False)
parser.add_argument("-pp", "--printOnly", help="Only print the numpy array data. Don't save.",\
action="store_true", default=False)
parser.add_argument("-s", "--scale",type=float, default=1.,\
help="Scale factor for the output. Default=1.")
args = parser.parse_args()
writeLog( parser, args, args.printOnly )
#==========================================================#
# Renaming, nothing more.
filename = args.filename
fileout = args.fileout
reso = args.reso
ROrig = args.xorig
printOn = args.printOn
printOnly = args.printOnly
sc = args.scale
R = openTifAsNumpy(filename)
dPx = np.array([sc*reso, sc*reso])
Rdict = {'R' : R, 'GlobOrig' : ROrig, 'gridRot' : 0., 'dPx' : dPx}
if( not printOnly ):
print(' Writing file {} ... '.format(fileout) )
saveTileAsNumpyZ( fileout, Rdict)
print(' ... done! ')
if( printOn or printOnly ):
pfig = plt.figure(num=1, figsize=(10.,10.))
pfig = addImagePlot( pfig, R, fileout, gridOn=True )
plt.show()
| mit | 4,073,113,538,880,150,500 | 34.563636 | 95 | 0.641616 | false |
Kronopt/pipUpdateAll | pipUpdateAll.py | 1 | 3682 | #!python2
# coding: utf-8
"""
PIP UPDATE ALL
Updates outdated python modules using pip
Checks outdated modules using "pip list --outdated --format columns", parses that column to only show relevant
information (name, current version, new version) and then updates all detected modules using "pip install -U" followed
by each module's name
DEPENDENCIES:
- Python 2.7
- pip
HOW TO RUN:
- Directly, by double clicking the script.
"""
import subprocess
import sys
from time import sleep
__author__ = 'Pedro HC David, https://github.com/Kronopt'
__credits__ = ['Pedro HC David']
__version__ = '1.0'
__date__ = '02:40h, 16/12/2016'
__status__ = 'Finished'
def pip_list_columns_parser(pip_list_columns_format_output):
"""
Parses the output of "pip list --outdated --format columns" into a dictionary
PARAMETERS:
pip_list_columns_format_output : str
output of "pip list --outdated --format columns"
RETURNS: {{module_name : (current_version, new_version)}
Module_name associated with its current_version and new_version
"""
# Column format:
#
# Package Version Latest Type
# ------------- --------- --------- ----
# module_1_name version_1 version_2 type
# module_2_name version_1 version_2 type
final_dictionary = {}
# removes "Package", "Version", etc and "----"
modules_and_versions = pip_list_columns_format_output.split()[8:]
number_of_modules = len(modules_and_versions)/4
# parses list
for module_number in xrange(number_of_modules):
list_position = module_number*4
final_dictionary[modules_and_versions[list_position]] = (modules_and_versions[list_position+1],
modules_and_versions[list_position+2])
return final_dictionary
if __name__ == '__main__':
# location of python executable, avoids dependency on windows PATH
python_executable = sys.executable
# checking if pip is installed
try:
pip_version_output = subprocess.check_output([python_executable, "-m", "pip", "--version"])
pip_version = pip_version_output.split()[1]
except subprocess.CalledProcessError:
print "Python cannot locate pip..."
sys.exit()
print "Modules to be updated using pip version", pip_version + ":"
# Get modules out of date
modules_to_update_columns = subprocess.check_output(
[python_executable, "-m", "pip", "list", "--outdated", "--format", "columns"])
# dictionary in the format {module_name : (current_version, new_version)}
modules_to_update = pip_list_columns_parser(modules_to_update_columns)
if len(modules_to_update) > 0:
module_names = []
# shows modules out of date and each respective current versions and new versions
for module_name, (current_version, new_version) in sorted(modules_to_update.iteritems()):
print module_name + ":", current_version, "->", new_version
module_names.append(module_name)
print
no_correct_answer_given_yet = True
while no_correct_answer_given_yet:
answer = raw_input("Do you wish to continue (y/n)? ")
if answer == "y":
# call "pip install -U" with every outdated module name as parameters
subprocess.call([python_executable, "-m", "pip", "install", "--upgrade"] + module_names)
no_correct_answer_given_yet = False
elif answer == "n":
print "Update canceled"
no_correct_answer_given_yet = False
else:
print "All modules are up to date"
sleep(2)
| mit | 5,579,989,667,137,161,000 | 33.735849 | 118 | 0.625204 | false |
leebird/legonlp | utils/runner.py | 1 | 1644 | import sys
import os
import codecs
class Runner(object):
runnerName = None
def __init__(self):
'''
read input files and process
run directly on input files
run directly on input dir
process output
'''
pass
def run(self, args):
'''
inputs: a list of (dir, suffix) pairs
outputs: a list of (dir, suffix) pairs
Note that dir should be an absolute path
'''
raise NotImplementedError
def read_file(self, filepath):
if not os.path.isfile(filepath):
print >> sys.stderr, 'file not found: ' + filepath
return None
f = codecs.open(filepath, 'r', 'utf-8')
text = f.read().strip()
f.close()
return text
def write_file(self, content, filepath):
f = codecs.open(filepath, 'w', 'utf-8')
f.write(content)
f.close()
def get_files(self, dirname, sux, docList):
'''
get a list of path for the docList
'''
return [os.path.join(dirname, doc + sux) for doc in docList]
def get_io_files(self, dirsux, docList):
"""
get a zipped list of paths for all the dirs and the docList
:param dirsux: a list of (dir, suffix) pairs
:type dirsux: list
:param docList: a list of doc name
:type docList: list
:return: a zipped list of dir+file+suffix tuples
:rtype: list
"""
res = []
for ds in dirsux:
dirname, sux = ds[:2]
res.append(self.get_files(dirname, sux, docList))
return zip(*res)
| gpl-2.0 | 7,072,275,004,045,526,000 | 23.909091 | 68 | 0.544404 | false |
stlcoin/stl | contrib/spendfrom/spendfrom.py | 1 | 10053 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a stlcoind or Stlcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting STL values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_stlcoin.config(dbdir):
"""Read the stlcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "stlcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 17069 if testnet else 7069
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the stlcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(stlcoind):
info = stlcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
stlcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = stlcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(stlcoind):
address_summary = dict()
address_to_account = dict()
for info in stlcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = stlcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = stlcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(stlcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(stlcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f STL available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to stlcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = stlcoind.createrawtransaction(inputs, outputs)
signed_rawtx = stlcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(stlcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = stlcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(stlcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = stlcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(stlcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of stlcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_stlcoin.config(options.datadir)
if options.testnet: config['testnet'] = True
stlcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(stlcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(stlcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(stlcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(stlcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = stlcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit | 7,158,991,375,966,233,000 | 36.651685 | 111 | 0.619815 | false |
aarpon/obit_microscopy_core_technology | core-plugins/microscopy/1/dss/drop-boxes/MicroscopyDropbox/LeicaTIFFSeriesCompositeDatasetConfig.py | 1 | 12298 | # -*- coding: utf-8 -*-
"""
Created on Feb 20, 2014
@author: Aaron Ponti
"""
import re
from MicroscopyCompositeDatasetConfig import MicroscopyCompositeDatasetConfig
from LeicaTIFFSeriesMaximumIntensityProjectionGenerationAlgorithm import LeicaTIFFSeriesMaximumIntensityProjectionGenerationAlgorithm
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ChannelColor
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ImageIdentifier
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ImageMetadata
from ch.systemsx.cisd.openbis.dss.etl.dto.api import OriginalDataStorageFormat
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ChannelColorRGB
from ch.systemsx.cisd.openbis.dss.etl.dto.api import Channel
import xml.etree.ElementTree as ET
from GlobalSettings import GlobalSettings
class LeicaTIFFSeriesCompositeDatasetConfig(MicroscopyCompositeDatasetConfig):
"""Image data configuration class for Leica TIFF series."""
_DEBUG = False
# List of metadata attributes obtained either from the settings XML
# file generated by the Annotation Tool or returned by
# BioFormatsProcessor.getMetadata(asXML=False)
# (for all series in the file, sorted by series).
_allSeriesMetadata = None
# Number of the series to register (for a multi-series dataset).
_seriesNum = 0
# Series indices (since they might not always start from zero and
# grow monotonically.
_seriesIndices = []
# Logger
_logger = None
# Dataset base name
_basename = ""
# Metadata folder
_metadataFolder = ""
# Maintain a metadata array
_metadata = []
# Regular expression pattern
_pattern = re.compile("^(.*?)" + \
"((_Series|_s)(\d.*?))?" + \
"(_t(\d.*?))?" + \
"_z(\d.*?)" + \
"_ch(\d.*?)" + \
"\.tif{1,2}$", re.IGNORECASE)
def __init__(self, allSeriesMetadata, seriesIndices, logger, seriesNum=0):
"""Constructor.
@param allSeriesMetadata: list of metadata attributes generated either
by the Annotation Tool and parsed from the
settings XML file, or from BioFormatsProcessor
and returned via:
BioFormatsProcessor.getMetadataXML(asXML=False)
@param seriesIndices: list of known series indices (do not
necessarily need to start at 0 and increase
monotonically by one; could be [22, 30, 32]
@param seriesNum: Int Number of the series to register. All
other series in the file will be ignored.
seriesNum MUST BE CONTAINED in seriesIndices.
@param logger: logger object
"""
# Store the logger
self._logger = logger
# Store the series metadata
self._allSeriesMetadata = allSeriesMetadata
# Store the seriesIndices
if type(seriesIndices) == str:
seriesIndices = seriesIndices.split(",")
self._seriesIndices = map(int, seriesIndices)
# Store the series number: make sure that it belongs to seriesIndices
self._seriesNum = int(seriesNum)
try:
self._seriesIndices.index(self._seriesNum)
except:
raise("seriesNum (" + str(self._seriesNum) + ") MUST be contained " + \
"in seriesIndices " + str(self._seriesIndices) + "!")
# This is microscopy data
self.setMicroscopyData(True)
# Store raw data in original form
self.setOriginalDataStorageFormat(OriginalDataStorageFormat.UNCHANGED)
# Set the image library
self.setImageLibrary("BioFormats")
# Disable thumbnail generation by ImageMagick
self.setUseImageMagicToGenerateThumbnails(False)
# Specify resolution of image representations explicitly
resolutions = GlobalSettings.ImageResolutions
if not resolutions:
self._logger.info("Skipping thumbnails generation.")
self.setGenerateThumbnails(False)
else:
self._logger.info("Creating thumbnails at resolutions: " + str(resolutions))
self.setGenerateImageRepresentationsUsingImageResolutions(resolutions)
self.setGenerateThumbnails(True)
# Set the recognized extensions -- currently just tif(f)
self.setRecognizedImageExtensions(["tif", "tiff"])
# Set the dataset type
self.setDataSetType("MICROSCOPY_IMG")
# Create representative image (MIP) for the first series only
if self._seriesIndices.index(self._seriesNum) == 0:
self.setImageGenerationAlgorithm(
LeicaTIFFSeriesMaximumIntensityProjectionGenerationAlgorithm(
"MICROSCOPY_IMG_THUMBNAIL", 256, 256, "thumbnail.png"))
def createChannel(self, channelCode):
"""Create a channel from the channelCode with the name as read from
the file via the MetadataReader and the color (RGB) as read.
@param channelCode Code of the channel as generated by extractImagesMetadata().
"""
# Get the indices of series and channel from the channel code
(seriesIndx, channelIndx) = self._getSeriesAndChannelNumbers(channelCode)
# Get the channel name
name = self._getChannelName(seriesIndx, channelIndx)
# Get the channel color (RGB)
colorRGB = self._getChannelColor(seriesIndx, channelIndx)
if self._DEBUG:
self._logger.info("LEICATIFFSERIESCOMPOSITEDATASETCONFIG::createChannel(): " +
"channel (s = " + str(seriesIndx) + ", c = " +
str(channelIndx) + ") has code " + channelCode +
", color (" + str(colorRGB) + " and name " + name)
# Return the channel with given name and color (the code is set to
# be the same as the channel name).
return Channel(channelCode, name, colorRGB)
def extractImagesMetadata(self, imagePath, imageIdentifiers):
"""Overrides extractImageMetadata method making sure to store
both series and channel indices in the channel code to be reused
later to extract color information and other metadata.
The channel code is in the form SERIES-(\d+)_CHANNEL-(\d+).
Only metadata for the relevant series number is returned!
@param imagePath Full path to the file to process
@param imageIdentifiers Array of ImageIdentifier's
@see constructor.
"""
# Extract the relevant information from the file name - the image
# identifiers in this case do not carry any useful information.
m = self._pattern.match(imagePath)
if m is None:
err = "MICROSCOPYCOMPOSITEDATASETCONFIG::extractImageMetadata(): " + \
"unexpected file name " + str(imagePath)
self._logger.error(err)
raise Exception(err)
# Get and store the base name
basename = m.group(1)
if self._basename == "" or self._basename != basename:
self._basename = basename
# The series number is not always defined in the file name.
# In the regex, the group(2) optionally matches _s{digits};
# in case group(2) is not None, the actual series number is
# stored in group(4).
if m.group(2) is None:
series = 0
else:
series = int(m.group(4))
# Make sure to process only the relevant series
if series != self._seriesNum:
return []
# The time index is also not always specified.
if m.group(5) is None:
timepoint = 0
else:
timepoint = int(m.group(6))
# Plane number is always specified
plane = int(m.group(7))
# Channel number is always specified
ch = int(m.group(8))
# Build the channel code
channelCode = "SERIES-" + str(series) + "_CHANNEL-" + str(ch)
if self._DEBUG:
msg = "Current file = " + imagePath + " has series = " + \
str(series) + " timepoint = " + str(timepoint) + " plane = " + \
str(plane) + " channel = " + str(ch) + "; channelCode = " + \
str(channelCode)
self._logger.info(msg)
# Initialize Metadata array
Metadata = []
# Initialize a new ImageMetadata object
imageMetadata = ImageMetadata();
# Fill in all information
imageMetadata.imageIdentifier = imageIdentifiers.get(0)
imageMetadata.seriesNumber = series
imageMetadata.timepoint = timepoint
imageMetadata.depth = plane
imageMetadata.channelCode = channelCode
imageMetadata.tileNumber = 1 # + self._seriesNum
imageMetadata.well = "IGNORED"
# Now return the image metadata object in an array
Metadata.append(imageMetadata)
return Metadata
def _getChannelName(self, seriesIndx, channelIndx):
"""Returns the channel name (from the parsed metadata) for
a given channel in a given series."
"""
# TODO: Get the real channel name from the metadata!
# Build name of the channel from series and channel indices
name = "SERIES_" + str(seriesIndx) + "_CHANNEL_" + str(channelIndx)
return name
def _getChannelColor(self, seriesIndx, channelIndx):
"""Returns the channel color (from the parsed metadata) for
a given channel in a given series."
"""
if self._DEBUG:
self._logger.info("Trying to find seriesIndx = " + \
str(seriesIndx) + " in seriesIndices = " + \
str(self._seriesIndices))
# Get the position in the seriesIndices list
indx = self._seriesIndices.index(int(seriesIndx))
# Get the metadata for the requested series
metadata = self._allSeriesMetadata[indx]
# Get the metadata
key = "channelColor" + str(channelIndx)
color = metadata[key]
if color is not None:
color = color.split(",")
R = int(255 * float(color[0]))
G = int(255 * float(color[1]))
B = int(255 * float(color[2]))
else:
if channelIndx == 0:
R = 255
G = 0
B = 0
elif channelIndx == 1:
R = 0
G = 255
B = 0
elif channelIndx == 2:
R = 0
G = 0
B = 255
else:
R = random.random_integers(0, 255)
G = random.random_integers(0, 255)
B = random.random_integers(0, 255)
# Create the ChannelColorRGB object
colorRGB = ChannelColorRGB(R, G, B)
# Return it
return colorRGB
def _getSeriesAndChannelNumbers(self, channelCode):
"""Extract series and channel number from channel code in
the form SERIES-(\d+)_CHANNEL-(\d+) to a tuple
(seriesIndx, channelIndx).
@param channelCode Code of the channel as generated by extractImagesMetadata().
"""
# Get the indices of series and channel from the channel code
p = re.compile("SERIES-(\d+)_CHANNEL-(\d+)")
m = p.match(channelCode)
if m is None or len(m.groups()) != 2:
err = "MICROSCOPYCOMPOSITEDATASETCONFIG::_getSeriesAndChannelNumbers(): " + \
"Could not extract series and channel number!"
self._logger.error(err)
raise Exception(err)
# Now assign the indices
seriesIndx = int(m.group(1))
channelIndx = int(m.group(2))
if self._DEBUG:
self._logger.info("Current channel code " + channelCode + \
"corresponds to series = " + str(seriesIndx) + \
" and channel = " + str(channelIndx))
# Return them
return seriesIndx, channelIndx
| apache-2.0 | -3,528,645,985,797,396,500 | 35.930931 | 133 | 0.594406 | false |
crossbario/crossbar-fabric-cli | cbsh/idl/loader.py | 1 | 16965 | #####################################################################################
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g.
# you have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <https://www.gnu.org/licenses/gpl-3.0.en.html>.
#
#####################################################################################
import os
import json
import argparse
import hashlib
import pprint
from typing import Dict, Any # noqa
import six
import click
from cbsh.util import hl
from cbsh.reflection import Schema
import txaio
txaio.use_asyncio()
def extract_attributes(item, allowed_attributes=None):
num_attrs = item.AttributesLength()
attrs = [item.Attributes(i) for i in range(num_attrs)]
attrs_dict = {
x.Key().decode('utf8'): x.Value().decode('utf8')
if x.Value().decode('utf8') not in ['0'] else None
for x in attrs
}
if allowed_attributes:
for attr in attrs_dict:
if attr not in allowed_attributes:
raise Exception(
'invalid XBR attribute "{}" - must be one of {}'.format(
attr, allowed_attributes))
return attrs_dict
def extract_docs(item):
num_docs = item.DocumentationLength()
item_docs = [
item.Documentation(i).decode('utf8').strip() for i in range(num_docs)
]
return item_docs
INTERFACE_ATTRS = ['type', 'uuid']
INTERFACE_MEMBER_ATTRS = ['type', 'stream']
INTERFACE_MEMBER_TYPES = ['procedure', 'topic']
INTERFACE_MEMBER_STREAM_VALUES = [None, 'in', 'out', 'inout']
EXTRACT_ATTRS_RAW = False
_BASETYPE_ID2NAME = {
None: 'Unknown',
0: 'none',
1: 'utype',
2: 'bool',
3: 'int8',
4: 'uint8',
5: 'int16',
6: 'uint16',
7: 'int32',
8: 'uint32',
9: 'int64',
10: 'uint64',
11: 'float',
12: 'double',
13: 'string',
14: 'vector',
15: 'object',
16: 'union',
}
def read_reflection_schema(buf, log=None):
"""
Read a binary FlatBuffers buffer that is typed according to the FlatBuffers
reflection schema.
The function returns extracted information in a plain, JSON serializable dict.
"""
if not log:
log = txaio.make_logger()
_schema = Schema.GetRootAsSchema(buf, 0)
_root = _schema.RootTable()
if _root:
root_name = _root.Name().decode('utf8').strip()
else:
root_name = None
_file_ident = _schema.FileIdent().decode('utf8').strip()
if _file_ident == '':
_file_ident = None
_file_ext = _schema.FileExt().decode('utf8').strip()
if _file_ext == '':
_file_ext = None
m = hashlib.sha256()
m.update(buf)
schema_meta = {
'bfbs_size': len(buf),
'bfbs_sha256': m.hexdigest(),
'file_ident': _file_ident,
'file_ext': _file_ext,
'root': root_name,
}
schema = None # type: dict
schema = {
'meta': schema_meta,
'tables': [],
'enums': [],
'services': [],
}
schema_by_uri = None # type: dict
schema_by_uri = {
'meta': schema_meta,
'types': {},
}
enums = []
objects = []
services = []
fqn2type = dict() # type: Dict[str, Any]
enum_cnt = 0
object_cnt = 0
service_cnt = 0
typerefs_cnt = 0
typerefs_error_cnt = 0
for i in range(_schema.EnumsLength()):
item = _schema.Enums(i)
name = item.Name().decode('utf8')
if name in fqn2type:
raise Exception('duplicate name "{}"'.format(name))
enum_cnt += 1
for i in range(_schema.ObjectsLength()):
item = _schema.Objects(i)
name = item.Name().decode('utf8')
if name in fqn2type:
raise Exception('duplicate name "{}"'.format(name))
object_cnt += 1
for i in range(_schema.ServicesLength()):
item = _schema.Services(i)
name = item.Name().decode('utf8')
if name in fqn2type:
raise Exception('duplicate name "{}"'.format(name))
service_cnt += 1
log.info('Processing schema with {} enums, {} objects and {} services ...'.
format(enum_cnt, object_cnt, service_cnt))
# enums
#
num_enums = _schema.EnumsLength()
for i in range(num_enums):
# extract enum base information
#
_enum = _schema.Enums(i)
enum_name = _enum.Name().decode('utf8')
log.debug('processing enum {} ("{}")'.format(i, enum_name))
enum = {
# '_index': i,
'type': 'enum',
'name': enum_name,
'docs': extract_docs(_enum),
}
if EXTRACT_ATTRS_RAW:
enum['attr'] = extract_attributes(_enum)
# extract enum values
#
enum_values_dict = dict() # type: Dict[str, Any]
for j in range(_enum.ValuesLength()):
_enum_value = _enum.Values(j)
enum_value_name = _enum_value.Name().decode('utf8')
enum_value = {
'docs': extract_docs(_enum_value),
# enum values cannot have attributes
}
if enum_value_name in enum_values_dict:
raise Exception(
'duplicate enum value "{}"'.format(enum_value_name))
enum_values_dict[enum_value_name] = enum_value
enum['values'] = enum_values_dict
if enum_name in schema_by_uri['types']:
raise Exception(
'unexpected duplicate definition for qualified name "{}"'.
format(enum_name))
enums.append(enum)
schema_by_uri['types'][enum_name] = enum
# objects (tables/structs)
#
for i in range(_schema.ObjectsLength()):
_obj = _schema.Objects(i)
obj_name = _obj.Name().decode('utf8')
object_type = 'struct' if _obj.IsStruct() else 'table'
obj = {
# '_index': i,
'type': object_type,
'name': obj_name,
'docs': extract_docs(_obj),
}
if EXTRACT_ATTRS_RAW:
obj['attr'] = extract_attributes(_obj)
# extract fields
num_fields = _obj.FieldsLength()
fields = []
fields_by_name = {}
for j in range(num_fields):
_field = _obj.Fields(j)
field_name = _field.Name().decode('utf8')
log.debug('processing field {} ("{}")'.format(i, field_name))
_field_type = _field.Type()
_field_index = int(_field_type.Index())
_field_base_type = _BASETYPE_ID2NAME.get(_field_type.BaseType(),
None)
_field_element = _BASETYPE_ID2NAME.get(_field_type.Element(), None)
if _field_element == 'none':
_field_element = None
# FIXME
# if _field_element == 'object':
# el = _schema.Objects(_field_type.Element())
# if isinstance(el, reflection.Type) and hasattr(el, 'IsStruct'):
# _field_element = 'struct' if el.Element().IsStruct(
# ) else 'table'
field = {
# '_index': j,
'name': field_name,
'id': int(_field.Id()),
'offset': int(_field.Offset()),
'base_type': _field_base_type,
}
if _field_element:
# vector
field['element_type'] = _field_element
if _field_index != -1:
# field['field_index'] = _field_index
if _field_base_type in [
'object', 'struct'
] or _field_element in ['object', 'struct']:
# obj/struct
if _field_index < _schema.ObjectsLength():
l_obj = _schema.Objects(_field_index)
l_obj_ref = _obj.Name().decode('utf8')
field['ref_category'] = 'struct' if l_obj.IsStruct(
) else 'table'
field['ref_type'] = l_obj_ref
typerefs_cnt += 1
else:
log.info(
'WARNING - referenced table/struct for index {} ("{}.{}") not found'.
format(_field_index, obj_name, field_name))
field['ref_category'] = 'object'
field['ref_type'] = None
typerefs_error_cnt += 1
elif _field_base_type in [
'utype', 'bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'float',
'double', 'string'
]:
# enum
field['ref_category'] = 'enum'
if _field_index < _schema.EnumsLength():
_enum_ref = _schema.Enums(_field_index).Name().decode(
'utf8')
field['ref_type'] = _enum_ref
typerefs_cnt += 1
else:
log.info('WARNING - referenced enum not found')
field['ref_type'] = None
typerefs_error_cnt += 1
else:
raise Exception('unhandled field type: {} {} {} {}'.format(
field_name, _field_base_type, _field_element,
_field_index))
field_docs = extract_docs(_field)
if field_docs:
field['docs'] = field_docs
if EXTRACT_ATTRS_RAW:
_field_attrs = extract_attributes(_field)
if _field_attrs:
field['attr'] = _field_attrs
fields.append(field)
fields_by_name[field_name] = field
obj['fields'] = fields_by_name
if obj['name'] in schema_by_uri['types']:
raise Exception(
'unexpected duplicate definition for qualified name "{}"'.
format(field['name']))
# always append the object here, so we can dereference indexes
# correctly
objects.append(obj)
# skip our "void marker"
if False and obj_name in ['Void']:
pass
else:
schema_by_uri['types'][obj['name']] = obj
# iterate over services
#
num_services = _schema.ServicesLength()
for i in range(num_services):
_service = _schema.Services(i)
service_name = _service.Name().decode('utf8')
service_attrs_dict = extract_attributes(_service, INTERFACE_ATTRS)
service_type = service_attrs_dict.get('type', None)
if service_type != 'interface':
raise Exception(
'invalid value "{}" for attribute "type" in XBR interface'.
format(service_type))
service = {
# '_index': i,
'type': service_type,
'name': service_name,
'docs': extract_docs(_service),
}
if EXTRACT_ATTRS_RAW:
service['attrs'] = service_attrs_dict
else:
service['uuid'] = service_attrs_dict.get('uuid', None)
num_calls = _service.CallsLength()
calls = []
calls_by_name = {}
for j in range(num_calls):
_call = _service.Calls(j)
_call_name = _call.Name().decode('utf8')
call_attrs_dict = extract_attributes(_call)
call_type = call_attrs_dict.get('type', None)
if call_type not in INTERFACE_MEMBER_TYPES:
raise Exception(
'invalid XBR interface member type "{}" - must be one of {}'.
format(call_type, INTERFACE_MEMBER_TYPES))
call_stream = call_attrs_dict.get('stream', None)
if call_stream in ['none', 'None', 'null', 'Null']:
call_stream = None
if call_stream not in INTERFACE_MEMBER_STREAM_VALUES:
raise Exception(
'invalid XBR interface member stream modifier "{}" - must be one of {}'.
format(call_stream, INTERFACE_MEMBER_STREAM_VALUES))
def _decode_type(x):
res = x.Name().decode('utf8')
if res in ['Void', 'wamp.Void']:
res = None
return res
call = {
'type': call_type,
'name': _call_name,
'in': _decode_type(_call.Request()),
'out': _decode_type(_call.Response()),
'stream': call_stream,
# 'id': int(_call.Id()),
# 'offset': int(_call.Offset()),
}
# call['attrs'] = call_attrs_dict
call['docs'] = extract_docs(_call)
calls.append(call)
calls_by_name[_call_name] = call
# service['calls'] = sorted(calls, key=lambda field: field['id'])
service['slots'] = calls_by_name
services.append(service)
if service_name in schema_by_uri['types']:
raise Exception(
'unexpected duplicate definition for qualified name "{}"'.
format(service_name))
else:
schema_by_uri['types'][service_name] = service
if typerefs_error_cnt:
raise Exception(
'{} unresolved type references encountered in schema'.format(
typerefs_error_cnt))
schema['enums'] = sorted(enums, key=lambda enum: enum['name'])
schema['tables'] = sorted(objects, key=lambda obj: obj['name'])
schema['services'] = sorted(services, key=lambda service: service['name'])
return schema_by_uri
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'infile', help='FlatBuffers binary schema input file (.bfbs)')
parser.add_argument(
'-o', '--outfile', help='FlatBuffers JSON schema output (.json)')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Enable verbose processing output.')
parser.add_argument(
'-d', '--debug', action='store_true', help='Enable debug output.')
options = parser.parse_args()
log = txaio.make_logger()
txaio.start_logging(level='debug' if options.debug else 'info')
infile_path = os.path.abspath(options.infile)
with open(infile_path, 'rb') as f:
buf = f.read()
log.info('Loading FlatBuffers binary schema ({} bytes) ...'.format(
len(buf)))
try:
schema = read_reflection_schema(buf, log=log)
except Exception as e:
log.error(e)
if True:
schema['meta']['file_name'] = os.path.basename(options.infile)
schema['meta']['file_path'] = infile_path
with open(options.outfile, 'wb') as f:
outdata = json.dumps(
schema,
ensure_ascii=False,
sort_keys=False,
indent=4,
separators=(', ', ': ')).encode('utf8')
f.write(outdata)
cnt_bytes = len(outdata)
cnt_defs = len(schema['types'].keys())
log.info(
'FlatBuffers JSON schema data written ({} bytes, {} defs).'.format(
cnt_bytes, cnt_defs))
if options.verbose:
log.info('Schema metadata:')
schema_meta_str = pprint.pformat(schema['meta'])
# log.info(schema_meta_str)
# log.info('{}'.format(schema_meta_str))
print(schema_meta_str)
for o in schema['types'].values():
if o['type'] == 'interface':
log.info('interface: {}'.format(hl(o['name'], bold=True)))
for s in o['slots'].values():
log.info('{:>12}: {}'.format(s['type'], hl(s['name'])))
| mit | 3,829,342,878,639,213,000 | 31.009434 | 97 | 0.519776 | false |
catapult-project/catapult | dashboard/dashboard/file_bug_test.py | 3 | 43198 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=too-many-lines
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import datetime
import json
import mock
import sys
import webapp2
import webtest
# Importing mock_oauth2_decorator before file_bug mocks out
# OAuth2Decorator usage in that file.
# pylint: disable=unused-import
from dashboard import mock_oauth2_decorator
# pylint: enable=unused-import
from dashboard import file_bug
from dashboard.common import namespaced_stored_object
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import bug_label_patterns
from dashboard.models import histogram
from dashboard.models.subscription import Subscription
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
class FileBugTest(testing_common.TestCase):
def setUp(self):
super(FileBugTest, self).setUp()
testing_common.SetSheriffDomains(['chromium.org'])
testing_common.SetIsInternalUser('[email protected]', True)
testing_common.SetIsInternalUser('[email protected]', False)
self.SetCurrentUser('[email protected]')
self._issue_tracker_service = testing_common.FakeIssueTrackerService()
self.PatchObject(file_bug.file_bug.issue_tracker_service,
'IssueTrackerService',
lambda *_: self._issue_tracker_service)
app = webapp2.WSGIApplication([('/file_bug', file_bug.FileBugHandler)])
self.testapp = webtest.TestApp(app)
def tearDown(self):
super(FileBugTest, self).tearDown()
self.UnsetCurrentUser()
def _AddSampleAlerts(self, master='ChromiumPerf', has_commit_positions=True):
"""Adds sample data and returns a dict of rev to anomaly key."""
# Add sample sheriff, masters, bots, and tests.
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff'],
bug_components=['Blink>Javascript'],
)
testing_common.AddTests(
[master], ['linux'],
{'scrolling': {
'first_paint': {},
'mean_frame_time': {},
}})
test_path1 = '%s/linux/scrolling/first_paint' % master
test_path2 = '%s/linux/scrolling/mean_frame_time' % master
test_key1 = utils.TestKey(test_path1)
test_key2 = utils.TestKey(test_path2)
anomaly_key1 = self._AddAnomaly(111995, 112005, test_key1, subscription)
anomaly_key2 = self._AddAnomaly(112000, 112010, test_key2, subscription)
anomaly_key3 = self._AddAnomaly(112015, 112015, test_key2, subscription)
rows_1 = testing_common.AddRows(test_path1, [112005])
rows_2 = testing_common.AddRows(test_path2, [112010])
rows_2 = testing_common.AddRows(test_path2, [112015])
if has_commit_positions:
rows_1[0].r_commit_pos = 112005
rows_2[0].r_commit_pos = 112010
return (anomaly_key1, anomaly_key2, anomaly_key3)
def _AddSampleClankAlerts(self):
"""Adds sample data and returns a dict of rev to anomaly key.
The biggest difference here is that the start/end revs aren't chromium
commit positions. This tests the _MilestoneLabel function to make sure
it will update the end_revision if r_commit_pos is found.
"""
# Add sample sheriff, masters, bots, and tests. Doesn't need to be Clank.
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff'],
bug_components=['Blink>Javascript'],
)
testing_common.AddTests(
['ChromiumPerf'], ['linux'],
{'scrolling': {
'first_paint': {},
'mean_frame_time': {},
}})
test_path1 = 'ChromiumPerf/linux/scrolling/first_paint'
test_path2 = 'ChromiumPerf/linux/scrolling/mean_frame_time'
test_key1 = utils.TestKey(test_path1)
test_key2 = utils.TestKey(test_path2)
anomaly_key1 = self._AddAnomaly(1476193324, 1476201840, test_key1,
subscription)
anomaly_key2 = self._AddAnomaly(1476193320, 1476201870, test_key2,
subscription)
anomaly_key3 = self._AddAnomaly(1476193390, 1476193390, test_key2,
subscription)
rows_1 = testing_common.AddRows(test_path1, [1476201840])
rows_2 = testing_common.AddRows(test_path2, [1476201870])
rows_3 = testing_common.AddRows(test_path2, [1476193390])
# These will be the revisions used to determine label.
rows_1[0].r_commit_pos = 112005
rows_2[0].r_commit_pos = 112010
rows_3[0].r_commit_pos = 112015
return (anomaly_key1, anomaly_key2, anomaly_key3)
def _AddAnomaly(self, start_rev, end_rev, test_key, subscription):
return anomaly.Anomaly(
start_revision=start_rev,
end_revision=end_rev,
test=test_key,
median_before_anomaly=100,
median_after_anomaly=200,
subscription_names=[subscription.name],
subscriptions=[subscription]).put()
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch('google.appengine.api.app_identity.get_default_version_hostname',
mock.MagicMock(return_value='chromeperf.appspot.com'))
def testBisectDisabled(self):
http = utils.ServiceAccountHttp()
owner = ''
cc = '[email protected]'
summary = 'test'
description = 'Test test.'
project_id = None
labels = []
components = []
test_path = 'ChromiumPerf/linux/scrolling/first_paint'
test_key = utils.TestKey(test_path)
subscription = Subscription(name='Sheriff',)
keys = [self._AddAnomaly(10, 20, test_key, subscription).urlsafe()]
bisect = False
result = file_bug.file_bug.FileBug(http, owner, cc, summary, description,
project_id, labels, components, keys,
bisect)
self.assertNotIn('bisect_error', result)
self.assertNotIn('jobId', result)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch('google.appengine.api.app_identity.get_default_version_hostname',
mock.MagicMock(return_value='chromeperf.appspot.com'))
def testSupportsCCList(self):
http = utils.ServiceAccountHttp()
owner = ''
cc = '[email protected],[email protected],[email protected],,'
summary = 'test'
description = 'Test test.'
project_id = None
labels = []
components = []
test_path = 'ChromiumPerf/linux/scrolling/first_paint'
test_key = utils.TestKey(test_path)
subscription = Subscription(name='Sheriff',)
keys = [self._AddAnomaly(10, 20, test_key, subscription).urlsafe()]
bisect = False
result = file_bug.file_bug.FileBug(http, owner, cc, summary, description,
project_id, labels, components, keys,
bisect)
self.assertNotIn('bisect_error', result)
self.assertNotIn('jobId', result)
def testGet_WithNoKeys_ShowsError(self):
# When a request is made and no keys parameter is given,
# an error message is shown in the reply.
response = self.testapp.get('/file_bug?summary=s&description=d&finish=true')
self.assertIn('<div class="error">', response.body)
self.assertIn('No alerts specified', response.body)
def testGet_WithNoFinish_ShowsForm(self):
# When a GET request is sent with keys specified but the finish parameter
# is not given, the response should contain a form for the sheriff to fill
# in bug details (summary, description, etc).
alert_keys = self._AddSampleAlerts()
response = self.testapp.get('/file_bug?summary=s&description=d&keys=%s' %
alert_keys[0].urlsafe())
self.assertEqual(1, len(response.html('form')))
self.assertIn('<input name="cc" type="text" value="[email protected]">',
str(response.html('form')[0]))
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
def testInternalBugLabel(self):
# If any of the alerts are marked as internal-only, which should happen
# when the corresponding test is internal-only, then the create bug dialog
# should suggest adding a Restrict-View-Google label.
self.SetCurrentUser('[email protected]')
alert_keys = self._AddSampleAlerts()
anomaly_entity = alert_keys[0].get()
anomaly_entity.internal_only = True
anomaly_entity.put()
response = self.testapp.get('/file_bug?summary=s&description=d&keys=%s' %
alert_keys[0].urlsafe())
self.assertIn('Restrict-View-Google', response.body)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
def testGet_SetsBugLabelsComponents(self):
self.SetCurrentUser('[email protected]')
alert_keys = self._AddSampleAlerts()
bug_label_patterns.AddBugLabelPattern('label1-foo', '*/*/*/first_paint')
bug_label_patterns.AddBugLabelPattern('Cr-Performance-Blink',
'*/*/*/mean_frame_time')
response = self.testapp.get(
'/file_bug?summary=s&description=d&keys=%s,%s' %
(alert_keys[0].urlsafe(), alert_keys[1].urlsafe()))
self.assertIn('label1-foo', response.body)
self.assertIn('Performance>Blink', response.body)
self.assertIn('Performance-Sheriff', response.body)
self.assertIn('Blink>Javascript', response.body)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch('google.appengine.api.app_identity.get_default_version_hostname',
mock.MagicMock(return_value='chromeperf.appspot.com'))
@mock.patch.object(file_bug.file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={
'issue_id': 123,
'issue_url': 'foo.com'
}))
def _PostSampleBug(self,
has_commit_positions=True,
master='ChromiumPerf',
is_single_rev=False):
if master == 'ClankInternal':
alert_keys = self._AddSampleClankAlerts()
else:
alert_keys = self._AddSampleAlerts(master, has_commit_positions)
if is_single_rev:
alert_keys = alert_keys[2].urlsafe()
else:
alert_keys = '%s,%s' % (alert_keys[0].urlsafe(), alert_keys[1].urlsafe())
response = self.testapp.post('/file_bug', [
('keys', alert_keys),
('summary', 's'),
('description', 'd\n'),
('finish', 'true'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
return response
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(file_bug.file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
@mock.patch.object(file_bug.file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={
'issue_id': 123,
'issue_url': 'foo.com'
}))
def testGet_WithFinish_CreatesBug(self):
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
self._issue_tracker_service._bug_id_counter = 277761
response = self._PostSampleBug()
# The response page should have a bug number.
self.assertIn('277761', response.body)
# The anomaly entities should be updated.
for anomaly_entity in anomaly.Anomaly.query().fetch():
if anomaly_entity.end_revision in [112005, 112010]:
self.assertEqual(277761, anomaly_entity.bug_id)
else:
self.assertIsNone(anomaly_entity.bug_id)
# Two HTTP requests are made when filing a bug; only test 2nd request.
comment = self._issue_tracker_service.add_comment_args[1]
self.assertIn('https://chromeperf.appspot.com/group_report?bug_id=277761',
comment)
self.assertIn('https://chromeperf.appspot.com/group_report?sid=', comment)
self.assertIn('\n\n\nBot(s) for this bug\'s original alert(s):\n\nlinux',
comment)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(file_bug.file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
@mock.patch.object(file_bug.file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={
'issue_id': 123,
'issue_url': 'foo.com'
}))
def testGet_WithFinish_CreatesBug_WithDocs(self):
diag_dict = generic_set.GenericSet([[u'Benchmark doc link',
u'http://docs']])
diag = histogram.SparseDiagnostic(
data=diag_dict.AsDict(),
start_revision=1,
end_revision=sys.maxsize,
name=reserved_infos.DOCUMENTATION_URLS.name,
test=utils.TestKey('ChromiumPerf/linux/scrolling'))
diag.put()
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
self._issue_tracker_service._bug_id_counter = 277761
response = self._PostSampleBug()
# The response page should have a bug number.
self.assertIn('277761', response.body)
# The anomaly entities should be updated.
for anomaly_entity in anomaly.Anomaly.query().fetch():
if anomaly_entity.end_revision in [112005, 112010]:
self.assertEqual(277761, anomaly_entity.bug_id)
else:
self.assertIsNone(anomaly_entity.bug_id)
# Two HTTP requests are made when filing a bug; only test 2nd request.
comment = self._issue_tracker_service.add_comment_args[1]
self.assertIn('https://chromeperf.appspot.com/group_report?bug_id=277761',
comment)
self.assertIn('https://chromeperf.appspot.com/group_report?sid=', comment)
self.assertIn('\n\n\nBot(s) for this bug\'s original alert(s):\n\nlinux',
comment)
self.assertIn('scrolling - Benchmark doc link:', comment)
self.assertIn('http://docs', comment)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(file_bug.file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
@mock.patch.object(
file_bug.file_bug.crrev_service, 'GetNumbering',
mock.MagicMock(
return_value={'git_sha': '852ba7672ce02911e9f8f2a22363283adc80940e'}))
@mock.patch('dashboard.services.gitiles_service.CommitInfo',
mock.MagicMock(return_value={
'author': {
'email': '[email protected]'
},
'message': 'My first commit!'
}))
def testGet_WithFinish_CreatesBugSingleRevOwner(self):
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
namespaced_stored_object.Set(
'repositories', {
"chromium": {
"repository_url":
"https://chromium.googlesource.com/chromium/src"
}
})
self._issue_tracker_service._bug_id_counter = 277761
response = self._PostSampleBug(is_single_rev=True)
# The response page should have a bug number.
self.assertIn('277761', response.body)
# Three HTTP requests are made when filing a bug with owner; test third
# request for owner hame.
comment = self._issue_tracker_service.add_comment_args[1]
self.assertIn(
'Assigning to [email protected] because this is the only CL in range',
comment)
self.assertIn('My first commit', comment)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(file_bug.file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
@mock.patch.object(
file_bug.file_bug.crrev_service, 'GetNumbering',
mock.MagicMock(
return_value={'git_sha': '852ba7672ce02911e9f8f2a22363283adc80940e'}))
@mock.patch('dashboard.services.gitiles_service.CommitInfo',
mock.MagicMock(
return_value={
'author': {
'email': 'v8-ci-autoroll-builder@chops-service-'
'accounts.iam.gserviceaccount.com'
},
'message': 'This is a roll\n\[email protected]'
}))
def testGet_WithFinish_CreatesBugSingleRevAutorollOwner(self):
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
namespaced_stored_object.Set(
'repositories', {
"chromium": {
"repository_url":
"https://chromium.googlesource.com/chromium/src"
}
})
self._issue_tracker_service._bug_id_counter = 277761
response = self._PostSampleBug(is_single_rev=True)
# The response page should have a bug number.
self.assertIn('277761', response.body)
# Two HTTP requests are made when filing a bug; only test 2nd request.
comment = self._issue_tracker_service.add_comment_args[1]
self.assertIn('Assigning to [email protected]', comment)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(file_bug.file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
def testGet_WithFinish_SingleRevOwner_Clank_Skips(self):
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
namespaced_stored_object.Set(
'repositories', {
"chromium": {
"repository_url":
"https://chromium.googlesource.com/chromium/src"
}
})
self._issue_tracker_service._bug_id_counter = 277761
response = self._PostSampleBug(is_single_rev=True, master='ClankInternal')
# The response page should have a bug number.
self.assertIn('277761', response.body)
# Three HTTP requests are made when filing a bug with owner; test third
# request for owner hame.
comment = self._issue_tracker_service.add_comment_args[1]
self.assertNotIn(
'Assigning to [email protected] because this is the only CL in range',
comment)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(file_bug.file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
def testGet_WithFinish_SingleRevOwner_InvalidRepository_Skips(self):
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
namespaced_stored_object.Set(
'repositories', {
"chromium": {
"repository_url":
"https://chromium.googlesource.com/chromium/src"
}
})
self._issue_tracker_service._bug_id_counter = 277761
response = self._PostSampleBug(is_single_rev=True, master='FakeMaster')
# The response page should have a bug number.
self.assertIn('277761', response.body)
# Three HTTP requests are made when filing a bug with owner; test third
# request for owner hame.
comment = self._issue_tracker_service.add_comment_args[1]
self.assertNotIn(
'Assigning to [email protected] because this is the only CL in range',
comment)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(file_bug.file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
@mock.patch.object(
file_bug.file_bug.crrev_service, 'GetNumbering',
mock.MagicMock(
return_value={'git_sha': '852ba7672ce02911e9f8f2a22363283adc80940e'}))
@mock.patch('dashboard.services.gitiles_service.CommitInfo',
mock.MagicMock(return_value={
'author': {
'email': '[email protected]'
},
'message': 'My first commit!'
}))
def testGet_WithFinish_CreatesBugSingleRevDifferentMasterOwner(self):
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
namespaced_stored_object.Set(
'repositories', {
"chromium": {
"repository_url":
"https://chromium.googlesource.com/chromium/src"
}
})
self._issue_tracker_service._bug_id_counter = 277761
response = self._PostSampleBug(is_single_rev=True, master='Foo')
# The response page should have a bug number.
self.assertIn('277761', response.body)
# Three HTTP requests are made when filing a bug with owner; test third
# request for owner hame.
comment = self._issue_tracker_service.add_comment_args[1]
self.assertNotIn(
'Assigning to [email protected] because this is the only CL in range',
comment)
self.assertNotIn('My first commit', comment)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(file_bug.file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
@mock.patch.object(
file_bug.file_bug.crrev_service, 'GetNumbering',
mock.MagicMock(
return_value={'git_sha': '852ba7672ce02911e9f8f2a22363283adc80940e'}))
@mock.patch('dashboard.services.gitiles_service.CommitInfo',
mock.MagicMock(return_value={
'author': {
'email': 'robot@chops-service-accounts'
'.iam.gserviceaccount.com'
},
'message': 'This is an autoroll\n\[email protected]',
}))
def testGet_WithFinish_CreatesBugSingleRevAutorollSheriff(self):
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
namespaced_stored_object.Set(
'repositories', {
"chromium": {
"repository_url":
"https://chromium.googlesource.com/chromium/src"
}
})
self._issue_tracker_service._bug_id_counter = 277761
response = self._PostSampleBug(is_single_rev=True)
# The response page should have a bug number.
self.assertIn('277761', response.body)
# Three HTTP requests are made when filing a bug with owner; test third
# request for owner hame.
comment = self._issue_tracker_service.add_comment_args[1]
self.assertIn(
'Assigning to [email protected] because this is the only CL in range',
comment)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(file_bug.file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[{
'versions': [{
'branch_base_position': '112000',
'current_version': '2.0'
}, {
'branch_base_position': '111990',
'current_version': '1.0'
}]
}]))
@mock.patch.object(file_bug.file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={
'issue_id': 123,
'issue_url': 'foo.com'
}))
def testGet_WithFinish_LabelsBugWithMilestone(self):
# Here, we expect the bug to have the following end revisions:
# [112005, 112010] and the milestones are M-1 for rev 111990 and
# M-2 for 11200. Hence the expected behavior is to label the bug
# M-2 since 111995 (lowest possible revision introducing regression)
# is less than 112010 (revision for M-2).
self._PostSampleBug()
self.assertIn('M-2', self._issue_tracker_service.new_bug_kwargs['labels'])
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(file_bug.file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[{
'versions': [{
'branch_base_position': '112000',
'current_version': '2.0'
}, {
'branch_base_position': '111990',
'current_version': '1.0'
}]
}]))
@mock.patch.object(file_bug.file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={
'issue_id': 123,
'issue_url': 'foo.com'
}))
def testGet_WithFinish_LabelsBugWithNoMilestoneBecauseNoCommitPos(self):
# Here, we expect to return no Milestone label because the alerts do not
# contain r_commit_pos (and therefore aren't chromium). Assuming
# testGet_WithFinish_LabelsBugWithMilestone passes, M-2
# would be the label that it would get if the alert was Chromium.
self._PostSampleBug(has_commit_positions=False)
labels = self._issue_tracker_service.new_bug_kwargs['labels']
self.assertEqual(0, len([x for x in labels if x.startswith(u'M-')]))
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(file_bug.file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[{
'versions': [{
'branch_base_position': '113000',
'current_version': '2.0'
}, {
'branch_base_position': '112000',
'current_version': '2.0'
}, {
'branch_base_position': '111990',
'current_version': '1.0'
}]
}]))
@mock.patch.object(file_bug.file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={
'issue_id': 123,
'issue_url': 'foo.com'
}))
def testGet_WithFinish_LabelsBugForClank(self):
# Here, we expect to return M-2 even though the alert revisions aren't
# even close to the branching points. We use r_commmit_pos to determine
# which revision to check. There are 3 branching points to ensure we are
# actually changing the revision that is checked to r_commit_pos instead
# of just displaying the highest one (previous behavior).
self._PostSampleBug(master='ClankInternal')
self.assertIn('M-2', self._issue_tracker_service.new_bug_kwargs['labels'])
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(return_value=testing_common.FakeResponseObject(200, '[]')))
def testGet_WithFinish_SucceedsWithNoVersions(self):
# Here, we test that we don't label the bug with an unexpected value when
# there is no version information from omahaproxy (for whatever reason)
self._PostSampleBug()
labels = self._issue_tracker_service.new_bug_kwargs['labels']
self.assertEqual(0, len([x for x in labels if x.startswith(u'M-')]))
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(return_value=testing_common.FakeResponseObject(200, '[]')))
def testGet_WithFinish_SucceedsWithComponents(self):
# Here, we test that components are posted separately from labels.
self._PostSampleBug()
self.assertIn('Foo>Bar',
self._issue_tracker_service.new_bug_kwargs['components'])
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch('google.appengine.api.urlfetch.fetch',
mock.MagicMock(
return_value=testing_common.FakeResponseObject(
200,
json.dumps([{
'versions': [{
'branch_base_position': '0',
'current_version': '1.0'
}]
}]))))
def testGet_WithFinish_SucceedsWithRevisionOutOfRange(self):
# Here, we test that we label the bug with the highest milestone when the
# revision introducing regression is beyond all milestones in the list.
self._PostSampleBug()
self.assertIn('M-1', self._issue_tracker_service.new_bug_kwargs['labels'])
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch('google.appengine.api.urlfetch.fetch',
mock.MagicMock(
return_value=testing_common.FakeResponseObject(
200,
json.dumps([{
'versions': [{
'branch_base_position': 'N/A',
'current_version': 'N/A'
}]
}]))))
@mock.patch('logging.warn')
def testGet_WithFinish_SucceedsWithNAAndLogsWarning(self, mock_warn):
self._PostSampleBug()
labels = self._issue_tracker_service.new_bug_kwargs['labels']
self.assertEqual(0, len([x for x in labels if x.startswith(u'M-')]))
self.assertEqual(1, mock_warn.call_count)
def testGet_OwnersAreEmptyEvenWithOwnership(self):
ownership_samples = [{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'emails': ['[email protected]']
}, {
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'emails': ['[email protected]']
}]
test_paths = [
'ChromiumPerf/linux/scrolling/first_paint',
'ChromiumPerf/linux/scrolling/mean_frame_time'
]
test_keys = [utils.TestKey(test_path) for test_path in test_paths]
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
anomaly_1 = anomaly.Anomaly(
start_revision=1476193324,
end_revision=1476201840,
test=test_keys[0],
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
ownership=ownership_samples[0]).put()
anomaly_2 = anomaly.Anomaly(
start_revision=1476193320,
end_revision=1476201870,
test=test_keys[1],
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
ownership=ownership_samples[1]).put()
response = self.testapp.post('/file_bug', [
('keys', '%s,%s' % (anomaly_1.urlsafe(), anomaly_2.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn('<input type="text" name="owner" value="">', response.body)
response_changed_order = self.testapp.post('/file_bug', [
('keys', '%s,%s' % (anomaly_2.urlsafe(), anomaly_1.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn('<input type="text" name="owner" value="">',
response_changed_order.body)
def testGet_OwnersNotFilledWhenNoOwnership(self):
test_key = utils.TestKey('ChromiumPerf/linux/scrolling/first_paint')
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
anomaly_entity = anomaly.Anomaly(
start_revision=1476193324,
end_revision=1476201840,
test=test_key,
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
).put()
response = self.testapp.post('/file_bug', [
('keys', '%s' % (anomaly_entity.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn('<input type="text" name="owner" value="">', response.body)
def testGet_WithAllOwnershipComponents(self):
ownership_samples = [{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'component': 'Abc>Xyz'
}, {
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'component': 'Def>123'
}]
test_paths = [
'ChromiumPerf/linux/scrolling/first_paint',
'ChromiumPerf/linux/scrolling/mean_frame_time'
]
test_keys = [utils.TestKey(test_path) for test_path in test_paths]
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
anomaly_1 = anomaly.Anomaly(
start_revision=1476193324,
end_revision=1476201840,
test=test_keys[0],
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
ownership=ownership_samples[0]).put()
anomaly_2 = anomaly.Anomaly(
start_revision=1476193320,
end_revision=1476201870,
test=test_keys[1],
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
ownership=ownership_samples[1]).put()
response = self.testapp.post('/file_bug', [
('keys', '%s' % (anomaly_1.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn(
'<input type="checkbox" checked name="component" value="Abc>Xyz">',
response.body)
response_with_both_anomalies = self.testapp.post('/file_bug', [
('keys', '%s,%s' % (anomaly_1.urlsafe(), anomaly_2.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn(
'<input type="checkbox" checked name="component" value="Abc>Xyz">',
response_with_both_anomalies.body)
self.assertIn(
'<input type="checkbox" checked name="component" value="Def>123">',
response_with_both_anomalies.body)
def testGet_UsesOnlyMostRecentComponents(self):
ownership_samples = [
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'component': 'Abc>Def'
},
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'component': '123>456'
},
]
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
test_key = utils.TestKey('ChromiumPerf/linux/scrolling/first_paint')
now_datetime = datetime.datetime.now()
older_alert = anomaly.Anomaly(
start_revision=1476193320,
end_revision=1476201870,
test=test_key,
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
ownership=ownership_samples[0],
timestamp=now_datetime).put()
newer_alert = anomaly.Anomaly(
start_revision=1476193320,
end_revision=1476201870,
test=test_key,
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
ownership=ownership_samples[1],
timestamp=now_datetime + datetime.timedelta(10)).put()
response = self.testapp.post('/file_bug', [
('keys', '%s,%s' % (older_alert.urlsafe(), newer_alert.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertNotIn(
'<input type="checkbox" checked name="component" value="Abc>Def">',
response.body)
self.assertIn(
'<input type="checkbox" checked name="component" value="123>456">',
response.body)
response_inverted_order = self.testapp.post('/file_bug', [
('keys', '%s,%s' % (newer_alert.urlsafe(), older_alert.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertNotIn(
'<input type="checkbox" checked name="component" value="Abc>Def">',
response_inverted_order.body)
self.assertIn(
'<input type="checkbox" checked name="component" value="123>456">',
response_inverted_order.body)
def testGet_ComponentsChosenPerTest(self):
ownership_samples = [
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'component': 'Abc>Def'
},
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'component': '123>456'
},
]
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
test_paths = [
'ChromiumPerf/linux/scrolling/first_paint',
'ChromiumPerf/linux/scrolling/mean_frame_time'
]
test_keys = [utils.TestKey(test_path) for test_path in test_paths]
now_datetime = datetime.datetime.now()
alert_test_key_0 = anomaly.Anomaly(
start_revision=1476193320,
end_revision=1476201870,
test=test_keys[0],
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
ownership=ownership_samples[0],
timestamp=now_datetime).put()
alert_test_key_1 = anomaly.Anomaly(
start_revision=1476193320,
end_revision=1476201870,
test=test_keys[1],
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
ownership=ownership_samples[1],
timestamp=now_datetime + datetime.timedelta(10)).put()
response = self.testapp.post('/file_bug', [
('keys', '%s,%s' %
(alert_test_key_0.urlsafe(), alert_test_key_1.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn(
'<input type="checkbox" checked name="component" value="Abc>Def">',
response.body)
self.assertIn(
'<input type="checkbox" checked name="component" value="123>456">',
response.body)
def testGet_UsesFirstDefinedComponent(self):
ownership_samples = [{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
}, {
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'component': ''
}, {
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'component': 'Abc>Def'
}]
now_datetime = datetime.datetime.now()
test_key = utils.TestKey('ChromiumPerf/linux/scrolling/first_paint')
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
alert_without_ownership = anomaly.Anomaly(
start_revision=1476193320,
end_revision=1476201870,
test=test_key,
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
timestamp=now_datetime).put()
alert_without_component = anomaly.Anomaly(
start_revision=1476193320,
end_revision=1476201870,
test=test_key,
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
ownership=ownership_samples[0],
timestamp=now_datetime + datetime.timedelta(10)).put()
alert_with_empty_component = anomaly.Anomaly(
start_revision=1476193320,
end_revision=1476201870,
test=test_key,
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
ownership=ownership_samples[1],
timestamp=now_datetime + datetime.timedelta(20)).put()
alert_with_component = anomaly.Anomaly(
start_revision=1476193320,
end_revision=1476201870,
test=test_key,
median_before_anomaly=100,
median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
ownership=ownership_samples[2],
timestamp=now_datetime + datetime.timedelta(30)).put()
response = self.testapp.post('/file_bug', [
('keys', '%s,%s,%s,%s' %
(alert_without_ownership.urlsafe(), alert_without_component.urlsafe(),
alert_with_empty_component.urlsafe(),
alert_with_component.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn(
'<input type="checkbox" checked name="component" value="Abc>Def">',
response.body)
| bsd-3-clause | 6,918,287,774,553,883,000 | 41.3095 | 80 | 0.616209 | false |
vi4m/django-dedal | runtests.py | 1 | 1164 | import sys
try:
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="dedal.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"dedal",
],
SITE_ID=1,
NOSE_ARGS=['-s'],
MIDDLEWARE_CLASSES=(),
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
from django_nose import NoseTestSuiteRunner
except ImportError:
import traceback
traceback.print_exc()
raise ImportError("To fix this error, run: pip install -r requirements-test.txt")
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
test_runner = NoseTestSuiteRunner(verbosity=1)
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(failures)
if __name__ == '__main__':
run_tests(*sys.argv[1:])
| bsd-3-clause | 4,278,701,809,169,851,000 | 20.163636 | 85 | 0.552405 | false |
omargammoh/rpislave | website/processing.py | 1 | 6269 | from bson import json_util
import multiprocessing
from website.models import Conf
from time import time, sleep
import inspect
import subprocess
import json
try:
import signal
except:
print "signal cannot be imported"
def execute(cmd, daemon=False):
if daemon:
_ = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return None
else:
return subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read()
def fix_corrupt_db():
conf = get_conf()
write_json_file(conf, "/home/pi/data/conf")
execute('sudo rm /home/pi/rpislave/db.sqlite3')
execute('sudo reboot')
return None
def read_json_file(fp):
try:
f = file(fp, "r")
s = f.read()
f.close()
js = json.loads(s)
except:
js = None
return js
def write_json_file(js, fp):
f = file(fp, "w")
f.write(json.dumps(js))
f.close()
class Timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise BaseException(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.setitimer(signal.ITIMER_REAL, self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def filter_kwargs(func, kwargs_input):
"""
creates the kwargs of func from kwargs_input
func: function to inspect
"""
argnames,_,_,defaults = inspect.getargspec(func)
if defaults is None: defaults=[]
required_args = set(argnames[:len(argnames)-len(defaults)])
optional_args = set(argnames[len(argnames)-len(defaults):])
kwargs_needed = {k:v for (k,v) in kwargs_input.iteritems() if k in required_args.union(optional_args) }
return kwargs_needed
def get_pid(command):
"""
gets the pid of the process using the command column in the ps aux table
"""
s = subprocess.Popen("ps aux", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read()
lines = [line.split(None, 10) for line in s.split("\n") if line.lstrip() != ""]
matches = [line for line in lines if line[-1] == command]
if len(matches)==0:
print "no maches found"
return None
elif len(matches)>1:
print "multiple matches found"
return None
else:
pid = matches[0][1]
return pid
def get_conf():
for ob in Conf.objects.all():
try:
js = json_util.loads(ob.data)
if not ("label" in js):
raise BaseException('no label in conf')
return js
except:
print "!!was not able to parse and get label of a configuration row, skipping"
pass
return None
def fix_malformed_db():
try:
#get conf
print 'fix_malformed_db >> getting conf'
conf_x = get_conf()
#save it on a text file
print 'fix_malformed_db >> saving conf as text'
f = file('/home/pi/rpislave/conf.json', 'w')
f.write(json_util.dumps(conf_x))
f.close()
#remove db
import os
print 'fix_malformed_db >> deleting db'
os.remove('/home/pi/rpislave/db.sqlite3')
#keep a note as a file
print 'fix_malformed_db >> saving log as text'
from datetime import datetime
now = datetime.utcnow()
f = file('/home/pi/data/dbdelete-' + now.strftime('%Y%m%d%H%M%S'),'w')
f.write('we have taken a copy of conf, saved it on disk, deleted the database and restarted. %s' %str(now))
f.close()
#restart
print 'fix_malformed_db >> rebooting'
os.system('sudo reboot')
except:
print "error while trying to fix malformed db"
class MP():
def __init__(self, name, target, request, cmd=None):
self.t1 = time()
self.name = name
self.target = target
self.request = request
self.cmd = cmd if cmd else request.GET.get("cmd", None)
self.dic = {}
def start(self):
app_conf = get_conf()['apps'][self.name]
p = multiprocessing.Process(name=self.name, target=self.target, kwargs=filter_kwargs(func=self.target, kwargs_input=app_conf))
p.start()
def ison(self):
ac = [m for m in multiprocessing.active_children() if m.name == self.name ]
if len(ac) == 0:
return False
else:
#return ac[0].is_alive() #this line does not work when switching to uwsgi and gives the error: can only test a child process, this is due to the fact that uwsgi has many workers
return True
def stop(self):
ac = [m for m in multiprocessing.active_children() if self.name == m.name][0]
if ac:
if ac.pid:
kill_command = "sudo kill -INT %s" % ac.pid
print "stopping process in the good way: %s" % kill_command
s = subprocess.Popen(kill_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read()
else:
print "stopping process in in the hard way"
ac.terminate()
sleep(0.5)
return True
else:
return False
def process_command(self):
lis = []
print "%s" %(self.name)
ison_at_start = self.ison()
if self.cmd is None:
lis.append('no cmd has provided')
elif self.cmd == 'start':
if ison_at_start:
lis.append('process was already running')
else:
self.start()
lis.append('process has been started')
elif self.cmd == 'stop':
if self.stop():
lis.append('terminated process')
else:
lis.append('process was not running')
elif self.cmd == 'status':
self.dic["%s" %self.name] = get_conf()['apps'][self.name]
else:
lis.append("we didnt understand your cmd")
#respond with some info
self.dic['log'] = lis
self.dic['ison'] = self.ison()
self.dic['took'] = "%s seconds" %(time()-self.t1)
| gpl-2.0 | 3,806,615,012,987,262,500 | 31.148718 | 189 | 0.584304 | false |
ilastikdev/ilastik | ilastik/applets/thresholdTwoLevels/_OpObjectsSegment.py | 1 | 10889 | ###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
##############################################################################
# basic python modules
import functools
import logging
logger = logging.getLogger(__name__)
from threading import Lock as ThreadLock
# required numerical modules
import numpy as np
import vigra
import opengm
# basic lazyflow types
from lazyflow.operator import Operator
from lazyflow.slot import InputSlot, OutputSlot
from lazyflow.rtype import SubRegion
from lazyflow.stype import Opaque
from lazyflow.request import Request, RequestPool
# required lazyflow operators
from lazyflow.operators.opLabelVolume import OpLabelVolume
from lazyflow.operators.valueProviders import OpArrayCache
from lazyflow.operators.opCompressedCache import OpCompressedCache
from lazyflow.operators.opReorderAxes import OpReorderAxes
from _OpGraphCut import segmentGC, OpGraphCut
## segment predictions with pre-thresholding
#
# This operator segments an image into foreground and background and makes use
# of a preceding thresholding step. After thresholding, connected components
# are computed and are then considered to be "cores" of objects to be segmented.
# The Graph Cut optimization (see _OpGraphCut.OpGraphCut) is then applied to
# the bounding boxes of the object "cores, enlarged by a user-specified margin.
# The pre-thresholding operation allows to apply Graph Cut segmentation on
# large data volumes, in case the segmented foreground consists of sparse objects
# of limited size and the probability map of the unaries is of high recall, but
# possibly low precision. One particular application for this setup is
# segmentation of synapses in anisotropic 3D Electron Microscopy image stacks.
#
#
# The slot CachedOutput guarantees consistent results, the slot Output computes
# the roi on demand.
#
# The operator inherits from OpGraphCut because they share some details:
# * output meta
# * dirtiness propagation
# * input slots
#
class OpObjectsSegment(OpGraphCut):
name = "OpObjectsSegment"
# thresholded predictions, or otherwise obtained ROI indicators
# (a value of 0 is assumed to be background and ignored)
LabelImage = InputSlot()
# margin around each object (always xyz!)
Margin = InputSlot(value=np.asarray((20, 20, 20)))
# bounding boxes of the labeled objects
# this slot returns an array of dicts with shape (t, c)
BoundingBoxes = OutputSlot(stype=Opaque)
### slots from OpGraphCut ###
## prediction maps
#Prediction = InputSlot()
## graph cut parameter
#Beta = InputSlot(value=.2)
## labeled segmentation image
#Output = OutputSlot()
#CachedOutput = OutputSlot()
def __init__(self, *args, **kwargs):
super(OpObjectsSegment, self).__init__(*args, **kwargs)
def setupOutputs(self):
super(OpObjectsSegment, self).setupOutputs()
# sanity checks
shape = self.LabelImage.meta.shape
assert len(shape) == 5,\
"Prediction maps must be a full 5d volume (txyzc)"
tags = self.LabelImage.meta.getAxisKeys()
tags = "".join(tags)
assert tags == 'txyzc',\
"Label image has wrong axes order"\
"(expected: txyzc, got: {})".format(tags)
# bounding boxes are just one element arrays of type object, but we
# want to request boxes from a specific region, therefore BoundingBoxes
# needs a shape
shape = self.Prediction.meta.shape
self.BoundingBoxes.meta.shape = shape
self.BoundingBoxes.meta.dtype = np.object
self.BoundingBoxes.meta.axistags = vigra.defaultAxistags('txyzc')
def execute(self, slot, subindex, roi, result):
# check the axes - cannot do this in setupOutputs because we could be
# in some invalid intermediate state where the dimensions do not agree
shape = self.LabelImage.meta.shape
agree = [i == j for i, j in zip(self.Prediction.meta.shape, shape)]
assert all(agree),\
"shape mismatch: {} vs. {}".format(self.Prediction.meta.shape,
shape)
if slot == self.BoundingBoxes:
return self._execute_bbox(roi, result)
elif slot == self.Output:
self._execute_graphcut(roi, result)
else:
raise NotImplementedError(
"execute() is not implemented for slot {}".format(str(slot)))
def _execute_bbox(self, roi, result):
cc = self.LabelImage.get(roi).wait()
cc = vigra.taggedView(cc, axistags=self.LabelImage.meta.axistags)
cc = cc.withAxes(*'xyz')
logger.debug("computing bboxes...")
feats = vigra.analysis.extractRegionFeatures(
cc.astype(np.float32),
cc.astype(np.uint32),
features=["Count", "Coord<Minimum>", "Coord<Maximum>"])
feats_dict = {}
feats_dict["Coord<Minimum>"] = feats["Coord<Minimum>"]
feats_dict["Coord<Maximum>"] = feats["Coord<Maximum>"]
feats_dict["Count"] = feats["Count"]
return feats_dict
def _execute_graphcut(self, roi, result):
for i in (0, 4):
assert roi.stop[i] - roi.start[i] == 1,\
"Invalid roi for graph-cut: {}".format(str(roi))
t = roi.start[0]
c = roi.start[4]
margin = self.Margin.value
beta = self.Beta.value
MAXBOXSIZE = 10000000 # FIXME justification??
## request the bounding box coordinates ##
# the trailing index brackets give us the dictionary (instead of an
# array of size 1)
feats = self.BoundingBoxes.get(roi).wait()
mins = feats["Coord<Minimum>"]
maxs = feats["Coord<Maximum>"]
nobj = mins.shape[0]
# these are indices, so they should have an index datatype
mins = mins.astype(np.uint32)
maxs = maxs.astype(np.uint32)
## request the prediction image ##
pred = self.Prediction.get(roi).wait()
pred = vigra.taggedView(pred, axistags=self.Prediction.meta.axistags)
pred = pred.withAxes(*'xyz')
## request the connected components image ##
cc = self.LabelImage.get(roi).wait()
cc = vigra.taggedView(cc, axistags=self.LabelImage.meta.axistags)
cc = cc.withAxes(*'xyz')
# provide xyz view for the output (just need 8bit for segmentation
resultXYZ = vigra.taggedView(np.zeros(cc.shape, dtype=np.uint8),
axistags='xyz')
def processSingleObject(i):
logger.debug("processing object {}".format(i))
# maxs are inclusive, so we need to add 1
xmin = max(mins[i][0]-margin[0], 0)
ymin = max(mins[i][1]-margin[1], 0)
zmin = max(mins[i][2]-margin[2], 0)
xmax = min(maxs[i][0]+margin[0]+1, cc.shape[0])
ymax = min(maxs[i][1]+margin[1]+1, cc.shape[1])
zmax = min(maxs[i][2]+margin[2]+1, cc.shape[2])
ccbox = cc[xmin:xmax, ymin:ymax, zmin:zmax]
resbox = resultXYZ[xmin:xmax, ymin:ymax, zmin:zmax]
nVoxels = ccbox.size
if nVoxels > MAXBOXSIZE:
#problem too large to run graph cut, assign to seed
logger.warn("Object {} too large for graph cut.".format(i))
resbox[ccbox == i] = 1
return
probbox = pred[xmin:xmax, ymin:ymax, zmin:zmax]
gcsegm = segmentGC(probbox, beta)
gcsegm = vigra.taggedView(gcsegm, axistags='xyz')
ccsegm = vigra.analysis.labelVolumeWithBackground(
gcsegm.astype(np.uint8))
# Extended bboxes of different objects might overlap.
# To avoid conflicting segmentations, we find all connected
# components in the results and only take the one, which
# overlaps with the object "core" or "seed", defined by the
# pre-thresholding
seed = ccbox == i
filtered = seed*ccsegm
passed = np.unique(filtered)
assert len(passed.shape) == 1
if passed.size > 2:
logger.warn("ambiguous label assignment for region {}".format(
(xmin, xmax, ymin, ymax, zmin, zmax)))
resbox[ccbox == i] = 1
elif passed.size <= 1:
logger.warn(
"box {} segmented out with beta {}".format(i, beta))
else:
# assign to the overlap region
label = passed[1] # 0 is background
resbox[ccsegm == label] = 1
pool = RequestPool()
#FIXME make sure that the parallel computations fit into memory
for i in range(1, nobj):
req = Request(functools.partial(processSingleObject, i))
pool.add(req)
logger.info("Processing {} objects ...".format(nobj-1))
pool.wait()
pool.clean()
logger.info("object loop done")
# prepare result
resView = vigra.taggedView(result, axistags=self.Output.meta.axistags)
resView = resView.withAxes(*'xyz')
# some labels could have been removed => relabel
vigra.analysis.labelVolumeWithBackground(resultXYZ, out=resView)
def propagateDirty(self, slot, subindex, roi):
super(OpObjectsSegment, self).propagateDirty(slot, subindex, roi)
if slot == self.LabelImage:
# time-channel slices are pairwise independent
# determine t, c from input volume
t_ind = 0
c_ind = 4
t = (roi.start[t_ind], roi.stop[t_ind])
c = (roi.start[c_ind], roi.stop[c_ind])
# set output dirty
start = t[0:1] + (0,)*3 + c[0:1]
stop = t[1:2] + self.Output.meta.shape[1:4] + c[1:2]
roi = SubRegion(self.Output, start=start, stop=stop)
self.Output.setDirty(roi)
elif slot == self.Margin:
# margin affects the whole volume
self.Output.setDirty(slice(None))
| gpl-3.0 | -4,906,129,953,672,029,000 | 38.740876 | 81 | 0.619616 | false |
asweigart/pygcurse | examples/shadowtest.py | 1 | 1621 | # Simplified BSD License, Copyright 2011 Al Sweigart
import sys
import os
sys.path.append(os.path.abspath('..'))
import pygcurse, pygame
from pygame.locals import *
win = pygcurse.PygcurseWindow(40, 25)
win.autoblit = False
xoffset = 1
yoffset = 1
mousex = mousey = 0
while True:
for event in pygame.event.get(): # the event loop
if event.type == QUIT or event.type == KEYDOWN and event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_UP:
yoffset -= 1
elif event.key == K_DOWN:
yoffset += 1
elif event.key == K_LEFT:
xoffset -= 1
elif event.key == K_RIGHT:
xoffset += 1
elif event.key == K_p:
win.fullscreen = not win.fullscreen
elif event.key == K_d:
win._debugchars()
elif event.type == MOUSEMOTION:
mousex, mousey = win.getcoordinatesatpixel(event.pos, onscreen=False)
win.setscreencolors('white', 'blue', clear=True)
win.fill(bgcolor='red', region=(15, 10, 5, 5))
win.addshadow(51, (15, 10, 5, 5), xoffset=xoffset, yoffset=yoffset)
#win.drawline((6,6), (mousex, mousey), bgcolor='red')
win.drawline((6,6), (mousex, mousey), char='+', fgcolor='yellow', bgcolor='green')
win.cursor = 0, win.height-3
win.write('Use mouse to move line, arrow keys to move shadow, p to switch to fullscreen.')
win.cursor = 0, win.height-1
win.putchars('xoffset=%s, yoffset=%s ' % (xoffset, yoffset))
win.blittowindow()
| bsd-3-clause | -364,671,775,039,068,860 | 33.489362 | 94 | 0.584207 | false |
koreiklein/fantasia | ui/render/text/colors.py | 1 | 1104 | # Copyright (C) 2013 Korei Klein <[email protected]>
genericColor = None
variableColor = None
symbolColor = None
andColor = None
orColor = None
callColor = None
quantifierDividerColor = None
notColor = None
alwaysBackgroundColor = None
maybeBackgroundColor = None
relationColor = None
iffColor = None
applyColor = None
hiddenColor = None
symbolVariablePairBorderColor = None
injectionSymbolBackgroundColor = None
injectionVariableBackgroundColor = None
projectionSymbolBackgroundColor = None
projectionVariableBackgroundColor = None
callSymbolBackgroundColor = None
callVariableBackgroundColor = None
_colorPairs = [ (None
,None)
, (None
,None)
, (None
,None)
]
def productPairsColor(i):
return _colorPairs[i % len(_colorPairs)]
symbolBackgroundColor = None
symbolForegroundColor = None
def exponentialColor(isAlways):
if isAlways:
return alwaysBackgroundColor
else:
return maybeBackgroundColor
projectDotColor = None
injectDotColor = None
trueColor = None
falseColor = None
| gpl-2.0 | 6,253,670,935,183,310,000 | 16.806452 | 57 | 0.724638 | false |
hiviah/perspectives-observatory | utilities/cert_client.py | 1 | 3009 | # This file is part of the Perspectives Notary Server
#
# Copyright (C) 2011 Dan Wendlandt
# Copyright (C) 2011 Ondrej Mikle, CZ.NIC Labs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Testing script for get_certs feature. Little copy-pasta from simple_client.
import sys
import traceback
import base64
import urllib
import struct
from M2Crypto import BIO, RSA, EVP
from xml.dom.minidom import parseString
def fetch_certs_xml(notary_server, notary_port, service_id):
host, port = service_id.split(":")
url = "http://%s:%s/get_certs?host=%s&port=%s" % (notary_server, notary_port, host,port)
url_file = urllib.urlopen(url)
xml_text = url_file.read()
code = url_file.getcode()
return (code,xml_text)
def verify_certs_signature(service_id, xml_text, notary_pub_key_text):
doc = parseString(xml_text)
root = doc.documentElement
sig_to_verify = base64.standard_b64decode(root.getAttribute("sig"))
to_verify = service_id
cert_elements = root.getElementsByTagName("certificate")
for cert_elem in cert_elements:
cert = base64.standard_b64decode(cert_elem.getAttribute("body"))
to_verify += cert
start_ts = int(cert_elem.getAttribute("start"))
end_ts = int(cert_elem.getAttribute("end"))
to_verify += struct.pack("!2I", start_ts, end_ts)
bio = BIO.MemoryBuffer(notary_pub_key_text)
rsa_pub = RSA.load_pub_key_bio(bio)
pubkey = EVP.PKey()
pubkey.assign_rsa(rsa_pub)
pubkey.reset_context(md='sha256')
pubkey.verify_init()
pubkey.verify_update(to_verify)
return pubkey.verify_final(sig_to_verify)
if len(sys.argv) not in [4,5]:
print "usage: %s <service-id> <notary-server> <notary-port> [notary-pubkey]" % sys.argv[0]
exit(1)
notary_pub_key = None
if len(sys.argv) == 5:
notary_pub_key_file = sys.argv[4]
notary_pub_key = open(notary_pub_key_file,'r').read()
try:
code, xml_text = fetch_certs_xml(sys.argv[2],int(sys.argv[3]), sys.argv[1])
if code == 404:
print "Notary has no results"
elif code != 200:
print "Notary server returned error code: %s" % code
except Exception, e:
print "Exception contacting notary server:"
traceback.print_exc(e)
exit(1)
print 50 * "-"
print "XML Response:"
print xml_text
print 50 * "-"
if notary_pub_key:
if not verify_certs_signature(sys.argv[1].lower(), xml_text, notary_pub_key):
print "Signature verify failed. Results are not valid"
exit(1)
else:
print "Warning: no public key specified, not verifying notary signature"
| gpl-3.0 | 7,373,507,836,183,155,000 | 30.020619 | 91 | 0.713526 | false |
frostyfrog/mark2 | mk2/events/server.py | 1 | 2945 | import re
from . import Event, get_timestamp
# input/output
output_exp = re.compile(
r'(\d{4}-\d{2}-\d{2} |)(\d{2}:\d{2}:\d{2}) \[([A-Z]+)\] (?:%s)?(.*)' % '|'.join((re.escape(x) for x in (
'[Minecraft] ',
'[Minecraft-Server] '
))))
class ServerInput(Event):
"""Send data to the server's stdin. In plugins, a shortcut
is available: self.send("say hello")"""
line = Event.Arg(required=True)
class ServerOutput(Event):
"""Issued when the server gives us a line on stdout. Note
that to handle this, you must specify both the 'level'
(e.g. INFO or SEVERE) and a regex pattern to match"""
line = Event.Arg(required=True)
time = Event.Arg()
level = Event.Arg()
data = Event.Arg()
def setup(self):
m = output_exp.match(self.line)
if m:
g = m.groups()
self.time = g[0]+g[1]
self.level= g[2]
self.data = g[3]
else:
self.level= "???"
self.data = self.line.strip()
self.time = get_timestamp(self.time)
def prefilter(self, pattern, level=None):
if level and level != self.level:
return False
m = re.match(pattern, self.data)
if not m:
return False
self.match = m
return True
# start
class ServerStart(Event):
"""Issue this event to start the server"""
pass
class ServerStarting(Event):
"""Issued by the ServerStart handler to alert listening plugins
that the server process has started"""
pid = Event.Arg()
class ServerStarted(Event):
"""Issued when we see the "Done! (1.23s)" line from the server
This event has a helper method in plugins - just overwrite
the server_started method.
"""
time = Event.Arg()
#stop
class ServerStop(Event):
"""Issue this event to stop the server."""
reason = Event.Arg(required=True)
respawn = Event.Arg(required=True)
kill = Event.Arg(default=False)
announce = Event.Arg(default=True)
dispatch_once = True
class ServerStopping(Event):
"""Issued by the ServerStop handler to alert listening plugins
that the server is going for a shutdown
This event has a helper method in plugins - just overwrite
the server_started method."""
reason = Event.Arg(required=True)
respawn = Event.Arg(required=True)
kill = Event.Arg(default=False)
class ServerStopped(Event):
"""When the server process finally dies, this event is raised"""
pass
class ServerEvent(Event):
"""Tell plugins about something happening to the server"""
cause = Event.Arg(required=True)
friendly = Event.Arg()
data = Event.Arg(required=True)
priority = Event.Arg(default=0)
def setup(self):
if not self.friendly:
self.friendly = self.cause
| mit | 5,955,427,389,456,440,000 | 23.541667 | 108 | 0.591171 | false |
Times-0/Timeline | Timeline/PacketHandler/UserHandler.py | 1 | 3401 | from Timeline.Server.Constants import TIMELINE_LOGGER, PACKET_TYPE, PACKET_DELIMITER, LOGIN_SERVER, WORLD_SERVER, LOGIN_SERVER_ALLOWED
from Timeline.Utils.Events import PacketEventHandler
from twisted.internet import threads
from twisted.internet.defer import Deferred
from collections import deque
import logging
'''
AS2 and AS3 Compatible
'''
@PacketEventHandler.XTPacketRule('s', 'u#pbi', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 'u#sf', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 'u#sa', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 'u#se', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 'u#ss', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 'u#gp', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 'u#bf', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 's#upc', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 's#uph', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 's#upf', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 's#upn', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 's#upb', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 's#upa', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 's#upe', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 's#upp', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 's#upl', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 'u#followpath', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 'u#sf', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 'u#sa', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 'u#se', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 'u#ss', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 'u#gp', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 's#upc', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 's#uph', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 's#upf', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 's#upn', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 's#upb', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 's#upa', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 's#upe', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 's#upp', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 's#upl', WORLD_SERVER)
def UserRules(data):
return [[int(data[2][0])], {}]
'''
AS2 and AS3 Compatible
'''
@PacketEventHandler.XTPacketRule('s', 'u#sp', WORLD_SERVER)
@PacketEventHandler.XTPacketRule('s', 'u#sb', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 'u#sp', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 'u#sb', WORLD_SERVER)
def SendCoordinatesRule(data):
return [[int(data[2][0]), int(data[2][1])], {}]
'''
AS2 and AS3 Compatible
'''
@PacketEventHandler.XTPacketRule('s', 'u#pbn', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 'u#pbn', WORLD_SERVER)
def UsernameRule(data):
username = data[2][0].strip()
u_w_s = username.replace(' ', '')
if not u_w_s.isalnum():
raise Exception("[TE012] Invalid characters found in username : {}".format(username))
return [[username], {}]
'''
AS2 and AS3 Compatible
'''
@PacketEventHandler.XTPacketRule('s', 'u#pbsu', WORLD_SERVER)
@PacketEventHandler.XTPacketRule_AS2('s', 'u#pbsu', WORLD_SERVER)
def SWIDListRule(data):
return [[map(str, data[2][0].split(','))], {}] | gpl-3.0 | 2,546,374,903,917,814,300 | 41.628205 | 134 | 0.723023 | false |
amitjamadagni/sympy | sympy/functions/special/error_functions.py | 2 | 32620 | """ This module contains various functions that are special cases
of incomplete gamma functions. It should probably be renamed. """
from sympy.core import Add, S, C, sympify, cacheit, pi, I
from sympy.core.function import Function, ArgumentIndexError
from sympy.functions.elementary.miscellaneous import sqrt, root
from sympy.functions.elementary.complexes import polar_lift
from sympy.functions.special.hyper import hyper, meijerg
# TODO series expansions
# TODO see the "Note:" in Ei
###############################################################################
################################ ERROR FUNCTION ###############################
###############################################################################
class erf(Function):
"""
The Gauss error function.
This function is defined as:
:math:`\\mathrm{erf}(x)=\\frac{2}{\\sqrt{\\pi}} \\int_0^x e^{-t^2} \\, \\mathrm{d}x`
Or, in ASCII::
x
/
|
| 2
| -t
2* | e dt
|
/
0
-------------
____
\/ pi
Examples
========
>>> from sympy import I, oo, erf
>>> from sympy.abc import z
Several special values are known:
>>> erf(0)
0
>>> erf(oo)
1
>>> erf(-oo)
-1
>>> erf(I*oo)
oo*I
>>> erf(-I*oo)
-oo*I
In general one can pull out factors of -1 and I from the argument:
>>> erf(-z)
-erf(z)
The error function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(erf(z))
erf(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(erf(z), z)
2*exp(-z**2)/sqrt(pi)
We can numerically evaluate the error function to arbitrary precision
on the whole complex plane:
>>> erf(4).evalf(30)
0.999999984582742099719981147840
>>> erf(-4*I).evalf(30)
-1296959.73071763923152794095062*I
References
==========
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] http://dlmf.nist.gov/7
.. [3] http://mathworld.wolfram.com/Erf.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Erf
"""
nargs = 1
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return 2*C.exp(-self.args[0]**2)/sqrt(S.Pi)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.One
elif arg is S.NegativeInfinity:
return S.NegativeOne
elif arg is S.Zero:
return S.Zero
t = arg.extract_multiplicatively(S.ImaginaryUnit)
if t == S.Infinity or t == S.NegativeInfinity:
return arg
if arg.could_extract_minus_sign():
return -cls(-arg)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
k = C.floor((n - 1)/S(2))
if len(previous_terms) > 2:
return -previous_terms[-2] * x**2 * (n - 2)/(n*k)
else:
return 2*(-1)**k * x**n/(n*C.factorial(k)*sqrt(S.Pi))
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
return self.args[0].is_real
def _eval_rewrite_as_uppergamma(self, z):
return sqrt(z**2)/z*(S.One - C.uppergamma(S.Half, z**2)/sqrt(S.Pi))
def _eval_rewrite_as_tractable(self, z):
return S.One - _erfs(z)*C.exp(-z**2)
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and C.Order(1, x).contains(arg):
return 2*x/sqrt(pi)
else:
return self.func(arg)
###############################################################################
#################### EXPONENTIAL INTEGRALS ####################################
###############################################################################
class Ei(Function):
r"""
The classical exponential integral.
For the use in SymPy, this function is defined as
.. math:: \operatorname{Ei}(x) = \sum_{n=1}^\infty \frac{x^n}{n\, n!}
+ \log(x) + \gamma,
where :math:`\gamma` is the Euler-Mascheroni constant.
If :math:`x` is a polar number, this defines an analytic function on the
riemann surface of the logarithm. Otherwise this defines an analytic
function in the cut plane :math:`\mathbb{C} \setminus (-\infty, 0]`.
**Background**
The name 'exponential integral' comes from the following statement:
.. math:: \operatorname{Ei}(x) = \int_{-\infty}^x \frac{e^t}{t} \mathrm{d}t
If the integral is interpreted as a Cauchy principal value, this statement
holds for :math:`x > 0` and :math:`\operatorname{Ei}(x)` as defined above.
Note that we carefully avoided defining :math:`\operatorname{Ei}(x)` for
negative real x. This is because above integral formula does not hold for
any polar lift of such :math:`x`, indeed all branches of
:math:`\operatorname{Ei}(x)` above the negative reals are imaginary.
However, the following statement holds for all :math:`x \in \mathbb{R}^*`:
.. math:: \int_{-\infty}^x \frac{e^t}{t} \mathrm{d}t =
\frac{\operatorname{Ei}\left(|x|e^{i \arg(x)}\right) +
\operatorname{Ei}\left(|x|e^{- i \arg(x)}\right)}{2},
where the integral is again understood to be a principal value if
:math:`x > 0`, and :math:`|x|e^{i \arg(x)}`,
:math:`|x|e^{- i \arg(x)}` denote two conjugate polar lifts of :math:`x`.
See Also
========
expint, sympy.functions.special.gamma_functions.uppergamma
References
==========
- Abramowitz & Stegun, section 5: http://www.math.sfu.ca/~cbm/aands/page_228.htm
- http://en.wikipedia.org/wiki/Exponential_integral
Examples
========
>>> from sympy import Ei, polar_lift, exp_polar, I, pi
>>> from sympy.abc import x
The exponential integral in SymPy is strictly undefined for negative values
of the argument. For convenience, exponential integrals with negative
arguments are immediately converted into an expression that agrees with
the classical integral definition:
>>> Ei(-1)
-I*pi + Ei(exp_polar(I*pi))
This yields a real value:
>>> Ei(-1).n(chop=True)
-0.219383934395520
On the other hand the analytic continuation is not real:
>>> Ei(polar_lift(-1)).n(chop=True)
-0.21938393439552 + 3.14159265358979*I
The exponential integral has a logarithmic branch point at the origin:
>>> Ei(x*exp_polar(2*I*pi))
Ei(x) + 2*I*pi
Differentiation is supported:
>>> Ei(x).diff(x)
exp(x)/x
The exponential integral is related to many other special functions.
For example:
>>> from sympy import uppergamma, expint, Shi
>>> Ei(x).rewrite(expint)
-expint(1, x*exp_polar(I*pi)) - I*pi
>>> Ei(x).rewrite(Shi)
Chi(x) + Shi(x)
"""
nargs = 1
@classmethod
def eval(cls, z):
if not z.is_polar and z.is_negative:
# Note: is this a good idea?
return Ei(polar_lift(z)) - pi*I
nz, n = z.extract_branch_factor()
if n:
return Ei(nz) + 2*I*pi*n
def fdiff(self, argindex=1):
from sympy import unpolarify
arg = unpolarify(self.args[0])
if argindex == 1:
return C.exp(arg)/arg
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
if (self.args[0]/polar_lift(-1)).is_positive:
return Function._eval_evalf(self, prec) + (I*pi)._eval_evalf(prec)
return Function._eval_evalf(self, prec)
def _eval_rewrite_as_uppergamma(self, z):
from sympy import uppergamma
# XXX this does not currently work usefully because uppergamma
# immediately turns into expint
return -uppergamma(0, polar_lift(-1)*z) - I*pi
def _eval_rewrite_as_expint(self, z):
return -expint(1, polar_lift(-1)*z) - I*pi
def _eval_rewrite_as_Si(self, z):
return Shi(z) + Chi(z)
_eval_rewrite_as_Ci = _eval_rewrite_as_Si
_eval_rewrite_as_Chi = _eval_rewrite_as_Si
_eval_rewrite_as_Shi = _eval_rewrite_as_Si
class expint(Function):
r"""
Generalized exponential integral.
This function is defined as
.. math:: \operatorname{E}_\nu(z) = z^{\nu - 1} \Gamma(1 - \nu, z),
where `\Gamma(1 - \nu, z)` is the upper incomplete gamma function
(``uppergamma``).
Hence for :math:`z` with positive real part we have
.. math:: \operatorname{E}_\nu(z)
= \int_1^\infty \frac{e^{-zt}}{z^\nu} \mathrm{d}t,
which explains the name.
The representation as an incomplete gamma function provides an analytic
continuation for :math:`\operatorname{E}_\nu(z)`. If :math:`\nu` is a
non-positive integer the exponential integral is thus an unbranched
function of :math:`z`, otherwise there is a branch point at the origin.
Refer to the incomplete gamma function documentation for details of the
branching behavior.
See Also
========
E1: The classical case, returns expint(1, z).
Ei: Another related function called exponential integral.
sympy.functions.special.gamma_functions.uppergamma
References
==========
- http://dlmf.nist.gov/8.19
- http://functions.wolfram.com/GammaBetaErf/ExpIntegralE/
- http://en.wikipedia.org/wiki/Exponential_integral
Examples
========
>>> from sympy import expint, S
>>> from sympy.abc import nu, z
Differentiation is supported. Differentiation with respect to z explains
further the name: for integral orders, the exponential integral is an
iterated integral of the exponential function.
>>> expint(nu, z).diff(z)
-expint(nu - 1, z)
Differentiation with respect to nu has no classical expression:
>>> expint(nu, z).diff(nu)
-z**(nu - 1)*meijerg(((), (1, 1)), ((0, 0, -nu + 1), ()), z)
At non-postive integer orders, the exponential integral reduces to the
exponential function:
>>> expint(0, z)
exp(-z)/z
>>> expint(-1, z)
exp(-z)/z + exp(-z)/z**2
At half-integers it reduces to error functions:
>>> expint(S(1)/2, z)
-sqrt(pi)*erf(sqrt(z))/sqrt(z) + sqrt(pi)/sqrt(z)
At positive integer orders it can be rewritten in terms of exponentials
and expint(1, z). Use expand_func() to do this:
>>> from sympy import expand_func
>>> expand_func(expint(5, z))
z**4*expint(1, z)/24 + (-z**3 + z**2 - 2*z + 6)*exp(-z)/24
The generalised exponential integral is essentially equivalent to the
incomplete gamma function:
>>> from sympy import uppergamma
>>> expint(nu, z).rewrite(uppergamma)
z**(nu - 1)*uppergamma(-nu + 1, z)
As such it is branched at the origin:
>>> from sympy import exp_polar, pi, I
>>> expint(4, z*exp_polar(2*pi*I))
I*pi*z**3/3 + expint(4, z)
>>> expint(nu, z*exp_polar(2*pi*I))
z**(nu - 1)*(exp(2*I*pi*nu) - 1)*gamma(-nu + 1) + expint(nu, z)
"""
nargs = 2
@classmethod
def eval(cls, nu, z):
from sympy import (unpolarify, expand_mul, uppergamma, exp, gamma,
factorial)
nu2 = unpolarify(nu)
if nu != nu2:
return expint(nu2, z)
if nu.is_Integer and nu <= 0 or (not nu.is_Integer and (2*nu).is_Integer):
return unpolarify(expand_mul(z**(nu - 1)*uppergamma(1 - nu, z)))
# Extract branching information. This can be deduced from what is
# explained in lowergamma.eval().
z, n = z.extract_branch_factor()
if n == 0:
return
if nu.is_integer:
if (nu > 0) is not True:
return
return expint(nu, z) \
- 2*pi*I*n*(-1)**(nu - 1)/factorial(nu - 1)*unpolarify(z)**(nu - 1)
else:
return (exp(2*I*pi*nu*n) - 1)*z**(nu - 1)*gamma(1 - nu) + expint(nu, z)
def fdiff(self, argindex):
from sympy import meijerg
nu, z = self.args
if argindex == 1:
return -z**(nu - 1)*meijerg([], [1, 1], [0, 0, 1 - nu], [], z)
elif argindex == 2:
return -expint(nu - 1, z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_uppergamma(self, nu, z):
from sympy import uppergamma
return z**(nu - 1)*uppergamma(1 - nu, z)
def _eval_rewrite_as_Ei(self, nu, z):
from sympy import exp_polar, unpolarify, exp, factorial
if nu == 1:
return -Ei(z*exp_polar(-I*pi)) - I*pi
elif nu.is_Integer and nu > 1:
# DLMF, 8.19.7
x = -unpolarify(z)
return x**(nu - 1)/factorial(nu - 1)*E1(z).rewrite(Ei) + \
exp(x)/factorial(nu - 1) * \
Add(*[factorial(nu - k - 2)*x**k for k in range(nu - 1)])
else:
return self
def _eval_expand_func(self, **hints):
return self.rewrite(Ei).rewrite(expint, **hints)
def _eval_rewrite_as_Si(self, nu, z):
if nu != 1:
return self
return Shi(z) - Chi(z)
_eval_rewrite_as_Ci = _eval_rewrite_as_Si
_eval_rewrite_as_Chi = _eval_rewrite_as_Si
_eval_rewrite_as_Shi = _eval_rewrite_as_Si
def E1(z):
"""
Classical case of the generalized exponential integral.
This is equivalent to ``expint(1, z)``.
"""
return expint(1, z)
###############################################################################
#################### TRIGONOMETRIC INTEGRALS ##################################
###############################################################################
class TrigonometricIntegral(Function):
""" Base class for trigonometric integrals. """
nargs = 1
@classmethod
def eval(cls, z):
if z == 0:
return cls._atzero
elif z is S.Infinity:
return cls._atinf
elif z is S.NegativeInfinity:
return cls._atneginf
nz = z.extract_multiplicatively(polar_lift(I))
if nz is None and cls._trigfunc(0) == 0:
nz = z.extract_multiplicatively(I)
if nz is not None:
return cls._Ifactor(nz, 1)
nz = z.extract_multiplicatively(polar_lift(-I))
if nz is not None:
return cls._Ifactor(nz, -1)
nz = z.extract_multiplicatively(polar_lift(-1))
if nz is None and cls._trigfunc(0) == 0:
nz = z.extract_multiplicatively(-1)
if nz is not None:
return cls._minusfactor(nz)
nz, n = z.extract_branch_factor()
if n == 0 and nz == z:
return
return 2*pi*I*n*cls._trigfunc(0) + cls(nz)
def fdiff(self, argindex=1):
from sympy import unpolarify
arg = unpolarify(self.args[0])
if argindex == 1:
return self._trigfunc(arg)/arg
def _eval_rewrite_as_Ei(self, z):
return self._eval_rewrite_as_expint(z).rewrite(Ei)
def _eval_rewrite_as_uppergamma(self, z):
from sympy import uppergamma
return self._eval_rewrite_as_expint(z).rewrite(uppergamma)
def _eval_nseries(self, x, n, logx):
# NOTE this is fairly inefficient
from sympy import log, EulerGamma, Pow
n += 1
if self.args[0].subs(x, 0) != 0:
return super(TrigonometricIntegral, self)._eval_nseries(x, n, logx)
baseseries = self._trigfunc(x)._eval_nseries(x, n, logx)
if self._trigfunc(0) != 0:
baseseries -= 1
baseseries = baseseries.replace(Pow, lambda t, n: t**n/n)
if self._trigfunc(0) != 0:
baseseries += EulerGamma + log(x)
return baseseries.subs(x, self.args[0])._eval_nseries(x, n, logx)
class Si(TrigonometricIntegral):
r"""
Sine integral.
This function is defined by
.. math:: \operatorname{Si}(z) = \int_0^z \frac{\sin{t}}{t} \mathrm{d}t.
It is an entire function.
See Also
========
Ci: Cosine integral.
Shi: Sinh integral.
Chi: Cosh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Si
>>> from sympy.abc import z
The sine integral is an antiderivative of sin(z)/z:
>>> Si(z).diff(z)
sin(z)/z
It is unbranched:
>>> from sympy import exp_polar, I, pi
>>> Si(z*exp_polar(2*I*pi))
Si(z)
Sine integral behaves much like ordinary sine under multiplication by I:
>>> Si(I*z)
I*Shi(z)
>>> Si(-z)
-Si(z)
It can also be expressed in terms of exponential integrals, but beware
that the latter is branched:
>>> from sympy import expint
>>> Si(z).rewrite(expint)
-I*(-expint(1, z*exp_polar(-I*pi/2))/2 +
expint(1, z*exp_polar(I*pi/2))/2) + pi/2
"""
_trigfunc = C.sin
_atzero = S(0)
_atinf = pi*S.Half
_atneginf = -pi*S.Half
@classmethod
def _minusfactor(cls, z):
return -Si(z)
@classmethod
def _Ifactor(cls, z, sign):
return I*Shi(z)*sign
def _eval_rewrite_as_expint(self, z):
# XXX should we polarify z?
return pi/2 + (E1(polar_lift(I)*z) - E1(polar_lift(-I)*z))/2/I
class Ci(TrigonometricIntegral):
r"""
Cosine integral.
This function is defined for positive :math:`x` by
.. math:: \operatorname{Ci}(x) = \gamma + \log{x}
+ \int_0^x \frac{\cos{t} - 1}{t} \mathrm{d}t
= -\int_x^\infty \frac{\cos{t}}{t} \mathrm{d}t,
where :math:`\gamma` is the Euler-Mascheroni constant.
We have
.. math:: \operatorname{Ci}(z) =
-\frac{\operatorname{E}_1\left(e^{i\pi/2} z\right)
+ \operatorname{E}_1\left(e^{-i \pi/2} z\right)}{2}
which holds for all polar :math:`z` and thus provides an analytic
continuation to the Riemann surface of the logarithm.
The formula also holds as stated
for :math:`z \in \mathbb{C}` with :math:`Re(z) > 0`.
By lifting to the principal branch we obtain an analytic function on the
cut complex plane.
See Also
========
Si: Sine integral.
Shi: Sinh integral.
Chi: Cosh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Ci
>>> from sympy.abc import z
The cosine integral is a primitive of cos(z)/z:
>>> Ci(z).diff(z)
cos(z)/z
It has a logarithmic branch point at the origin:
>>> from sympy import exp_polar, I, pi
>>> Ci(z*exp_polar(2*I*pi))
Ci(z) + 2*I*pi
Cosine integral behaves somewhat like ordinary cos under multiplication by I:
>>> from sympy import polar_lift
>>> Ci(polar_lift(I)*z)
Chi(z) + I*pi/2
>>> Ci(polar_lift(-1)*z)
Ci(z) + I*pi
It can also be expressed in terms of exponential integrals:
>>> from sympy import expint
>>> Ci(z).rewrite(expint)
-expint(1, z*exp_polar(-I*pi/2))/2 - expint(1, z*exp_polar(I*pi/2))/2
"""
_trigfunc = C.cos
_atzero = S.ComplexInfinity
_atinf = S.Zero
_atneginf = I*pi
@classmethod
def _minusfactor(cls, z):
return Ci(z) + I*pi
@classmethod
def _Ifactor(cls, z, sign):
return Chi(z) + I*pi/2*sign
def _eval_rewrite_as_expint(self, z):
return -(E1(polar_lift(I)*z) + E1(polar_lift(-I)*z))/2
class Shi(TrigonometricIntegral):
r"""
Sinh integral.
This function is defined by
.. math:: \operatorname{Shi}(z) = \int_0^z \frac{\sinh{t}}{t} \mathrm{d}t.
It is an entire function.
See Also
========
Si: Sine integral.
Ci: Cosine integral.
Chi: Cosh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Shi
>>> from sympy.abc import z
The Sinh integral is a primitive of sinh(z)/z:
>>> Shi(z).diff(z)
sinh(z)/z
It is unbranched:
>>> from sympy import exp_polar, I, pi
>>> Shi(z*exp_polar(2*I*pi))
Shi(z)
Sinh integral behaves much like ordinary sinh under multiplication by I:
>>> Shi(I*z)
I*Si(z)
>>> Shi(-z)
-Shi(z)
It can also be expressed in terms of exponential integrals, but beware
that the latter is branched:
>>> from sympy import expint
>>> Shi(z).rewrite(expint)
expint(1, z)/2 - expint(1, z*exp_polar(I*pi))/2 - I*pi/2
"""
_trigfunc = C.sinh
_atzero = S(0)
_atinf = S.Infinity
_atneginf = S.NegativeInfinity
@classmethod
def _minusfactor(cls, z):
return -Shi(z)
@classmethod
def _Ifactor(cls, z, sign):
return I*Si(z)*sign
def _eval_rewrite_as_expint(self, z):
from sympy import exp_polar
# XXX should we polarify z?
return (E1(z) - E1(exp_polar(I*pi)*z))/2 - I*pi/2
class Chi(TrigonometricIntegral):
r"""
Cosh integral.
This function is defined for positive :math:`x` by
.. math:: \operatorname{Chi}(x) = \gamma + \log{x}
+ \int_0^x \frac{\cosh{t} - 1}{t} \mathrm{d}t,
where :math:`\gamma` is the Euler-Mascheroni constant.
We have
.. math:: \operatorname{Chi}(z) = \operatorname{Ci}\left(e^{i \pi/2}z\right)
- i\frac{\pi}{2},
which holds for all polar :math:`z` and thus provides an analytic
continuation to the Riemann surface of the logarithm.
By lifting to the principal branch we obtain an analytic function on the
cut complex plane.
See Also
========
Si: Sine integral.
Ci: Cosine integral.
Shi: Sinh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Chi
>>> from sympy.abc import z
The cosh integral is a primitive of cosh(z)/z:
>>> Chi(z).diff(z)
cosh(z)/z
It has a logarithmic branch point at the origin:
>>> from sympy import exp_polar, I, pi
>>> Chi(z*exp_polar(2*I*pi))
Chi(z) + 2*I*pi
Cosh integral behaves somewhat like ordinary cosh under multiplication by I:
>>> from sympy import polar_lift
>>> Chi(polar_lift(I)*z)
Ci(z) + I*pi/2
>>> Chi(polar_lift(-1)*z)
Chi(z) + I*pi
It can also be expressed in terms of exponential integrals:
>>> from sympy import expint
>>> Chi(z).rewrite(expint)
-expint(1, z)/2 - expint(1, z*exp_polar(I*pi))/2 - I*pi/2
"""
_trigfunc = C.cosh
_atzero = S.ComplexInfinity
_atinf = S.Infinity
_atneginf = S.Infinity
@classmethod
def _minusfactor(cls, z):
return Chi(z) + I*pi
@classmethod
def _Ifactor(cls, z, sign):
return Ci(z) + I*pi/2*sign
def _eval_rewrite_as_expint(self, z):
from sympy import exp_polar
return -I*pi/2 - (E1(z) + E1(exp_polar(I*pi)*z))/2
###############################################################################
#################### FRESNEL INTEGRALS ########################################
###############################################################################
class FresnelIntegral(Function):
""" Base class for the Fresnel integrals."""
nargs = 1
unbranched = True
@classmethod
def eval(cls, z):
# Value at zero
if z is S.Zero:
return S(0)
# Try to pull out factors of -1 and I
prefact = S.One
newarg = z
changed = False
nz = newarg.extract_multiplicatively(-1)
if nz is not None:
prefact = -prefact
newarg = nz
changed = True
nz = newarg.extract_multiplicatively(I)
if nz is not None:
prefact = cls._sign*I*prefact
newarg = nz
changed = True
if changed:
return prefact*cls(newarg)
# Values at positive infinities signs
# if any were extracted automatically
if z is S.Infinity:
return S.Half
elif z is I*S.Infinity:
return cls._sign*I*S.Half
def fdiff(self, argindex=1):
if argindex == 1:
return self._trigfunc(S.Half*pi*self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _as_real_imag(self, deep=True, **hints):
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (re, im)
def as_real_imag(self, deep=True, **hints):
# Fresnel S
# http://functions.wolfram.com/06.32.19.0003.01
# http://functions.wolfram.com/06.32.19.0006.01
# Fresnel C
# http://functions.wolfram.com/06.33.19.0003.01
# http://functions.wolfram.com/06.33.19.0006.01
x, y = self._as_real_imag(deep=deep, **hints)
sq = -y**2/x**2
re = S.Half*(self.func(x + x*sqrt(sq)) + self.func(x - x*sqrt(sq)))
im = x/(2*y) * sqrt(sq) * (self.func(x - x*sqrt(sq)) -
self.func(x + x*sqrt(sq)))
return (re, im)
class fresnels(FresnelIntegral):
r"""
Fresnel integral S.
This function is defined by
.. math:: \operatorname{S}(z) = \int_0^z \sin{\frac{\pi}{2} t^2} \mathrm{d}t.
It is an entire function.
Examples
========
>>> from sympy import I, oo, fresnels
>>> from sympy.abc import z
Several special values are known:
>>> fresnels(0)
0
>>> fresnels(oo)
1/2
>>> fresnels(-oo)
-1/2
>>> fresnels(I*oo)
-I/2
>>> fresnels(-I*oo)
I/2
In general one can pull out factors of -1 and I from the argument:
>>> fresnels(-z)
-fresnels(z)
>>> fresnels(I*z)
-I*fresnels(z)
The Fresnel S integral obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(fresnels(z))
fresnels(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(fresnels(z), z)
sin(pi*z**2/2)
Defining the Fresnel functions via an integral
>>> from sympy import integrate, pi, sin, gamma, expand_func
>>> integrate(sin(pi*z**2/2), z)
3*fresnels(z)*gamma(3/4)/(4*gamma(7/4))
>>> expand_func(integrate(sin(pi*z**2/2), z))
fresnels(z)
We can numerically evaluate the Fresnel integral to arbitrary precision
on the whole complex plane:
>>> fresnels(2).evalf(30)
0.343415678363698242195300815958
>>> fresnels(-2*I).evalf(30)
0.343415678363698242195300815958*I
See Also
========
fresnelc
References
==========
.. [1] http://en.wikipedia.org/wiki/Fresnel_integral
.. [2] http://dlmf.nist.gov/7
.. [3] http://mathworld.wolfram.com/FresnelIntegrals.html
.. [4] http://functions.wolfram.com/GammaBetaErf/FresnelS
"""
_trigfunc = C.sin
_sign = -S.One
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 1:
p = previous_terms[-1]
return (-pi**2*x**4*(4*n - 1)/(8*n*(2*n + 1)*(4*n + 3))) * p
else:
return x**3 * (-x**4)**n * (S(2)**(-2*n - 1)*pi**(2*n + 1)) / ((4*n + 3)*C.factorial(2*n + 1))
def _eval_rewrite_as_erf(self, z):
return (S.One + I)/4 * (erf((S.One + I)/2*sqrt(pi)*z) - I*erf((S.One - I)/2*sqrt(pi)*z))
def _eval_rewrite_as_hyper(self, z):
return pi*z**3/6 * hyper([S(3)/4], [S(3)/2, S(7)/4], -pi**2*z**4/16)
def _eval_rewrite_as_meijerg(self, z):
return (pi*z**(S(9)/4) / (sqrt(2)*(z**2)**(S(3)/4)*(-z)**(S(3)/4))
* meijerg([], [1], [S(3)/4], [S(1)/4, 0], -pi**2*z**4/16))
class fresnelc(FresnelIntegral):
r"""
Fresnel integral C.
This function is defined by
.. math:: \operatorname{C}(z) = \int_0^z \cos{\frac{\pi}{2} t^2} \mathrm{d}t.
It is an entire function.
Examples
========
>>> from sympy import I, oo, fresnelc
>>> from sympy.abc import z
Several special values are known:
>>> fresnelc(0)
0
>>> fresnelc(oo)
1/2
>>> fresnelc(-oo)
-1/2
>>> fresnelc(I*oo)
I/2
>>> fresnelc(-I*oo)
-I/2
In general one can pull out factors of -1 and I from the argument:
>>> fresnelc(-z)
-fresnelc(z)
>>> fresnelc(I*z)
I*fresnelc(z)
The Fresnel C integral obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(fresnelc(z))
fresnelc(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(fresnelc(z), z)
cos(pi*z**2/2)
Defining the Fresnel functions via an integral
>>> from sympy import integrate, pi, cos, gamma, expand_func
>>> integrate(cos(pi*z**2/2), z)
fresnelc(z)*gamma(1/4)/(4*gamma(5/4))
>>> expand_func(integrate(cos(pi*z**2/2), z))
fresnelc(z)
We can numerically evaluate the Fresnel integral to arbitrary precision
on the whole complex plane:
>>> fresnelc(2).evalf(30)
0.488253406075340754500223503357
>>> fresnelc(-2*I).evalf(30)
-0.488253406075340754500223503357*I
See Also
========
fresnels
References
==========
.. [1] http://en.wikipedia.org/wiki/Fresnel_integral
.. [2] http://dlmf.nist.gov/7
.. [3] http://mathworld.wolfram.com/FresnelIntegrals.html
.. [4] http://functions.wolfram.com/GammaBetaErf/FresnelC
"""
_trigfunc = C.cos
_sign = S.One
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 1:
p = previous_terms[-1]
return (-pi**2*x**4*(4*n - 3)/(8*n*(2*n - 1)*(4*n + 1))) * p
else:
return x * (-x**4)**n * (S(2)**(-2*n)*pi**(2*n)) / ((4*n + 1)*C.factorial(2*n))
def _eval_rewrite_as_erf(self, z):
return (S.One - I)/4 * (erf((S.One + I)/2*sqrt(pi)*z) + I*erf((S.One - I)/2*sqrt(pi)*z))
def _eval_rewrite_as_hyper(self, z):
return z * hyper([S.One/4], [S.One/2, S(5)/4], -pi**2*z**4/16)
def _eval_rewrite_as_meijerg(self, z):
return (pi*z**(S(3)/4) / (sqrt(2)*root(z**2, 4)*root(-z, 4))
* meijerg([], [1], [S(1)/4], [S(3)/4, 0], -pi**2*z**4/16))
###############################################################################
#################### HELPER FUNCTIONS #########################################
###############################################################################
class _erfs(Function):
"""
Helper function to make the :math:`erf(z)` function
tractable for the Gruntz algorithm.
"""
nargs = 1
def _eval_aseries(self, n, args0, x, logx):
if args0[0] != S.Infinity:
return super(_erfs, self)._eval_aseries(n, args0, x, logx)
z = self.args[0]
l = [ 1/sqrt(S.Pi) * C.factorial(2*k)*(-S(
4))**(-k)/C.factorial(k) * (1/z)**(2*k + 1) for k in xrange(0, n) ]
o = C.Order(1/z**(2*n + 1), x)
# It is very inefficient to first add the order and then do the nseries
return (Add(*l))._eval_nseries(x, n, logx) + o
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return -2/sqrt(S.Pi) + 2*z*_erfs(z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_intractable(self, z):
return (S.One - erf(z))*C.exp(z**2)
| bsd-3-clause | 4,165,216,953,519,735,300 | 26.644068 | 110 | 0.549203 | false |
pshchelo/heat | heat/tests/openstack/test_volume.py | 1 | 42764 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import json
from cinderclient import exceptions as cinder_exp
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import cinder
from heat.engine.clients.os import glance
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.objects import resource_data as resource_data_object
from heat.tests.nova import fakes as fakes_nova
from heat.tests import test_volume_utils as vt_base
from heat.tests import utils
cinder_volume_template = '''
heat_template_version: 2013-05-23
description: Cinder volumes and attachments.
resources:
volume:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 1
name: test_name
description: test_description
metadata:
key: value
volume2:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 2
volume3:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 1
name: test_name
scheduler_hints: {"hint1": "good_advice"}
attachment:
type: OS::Cinder::VolumeAttachment
properties:
instance_uuid: WikiDatabase
volume_id: { get_resource: volume }
mountpoint: /dev/vdc
'''
single_cinder_volume_template = '''
heat_template_version: 2013-05-23
description: Cinder volume
resources:
volume:
type: OS::Cinder::Volume
properties:
size: 1
name: test_name
description: test_description
'''
class CinderVolumeTest(vt_base.BaseVolumeTest):
def setUp(self):
super(CinderVolumeTest, self).setUp()
self.t = template_format.parse(cinder_volume_template)
self.use_cinder = True
def _mock_create_volume(self, fv, stack_name, size=1,
final_status='available'):
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=size, availability_zone='nova',
description='test_description',
name='test_name',
metadata={'key': 'value'}).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume(final_status, id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
return fv_ready
def test_cinder_volume_size_constraint(self):
self.t['resources']['volume']['properties']['size'] = 0
stack = utils.parse_stack(self.t)
error = self.assertRaises(exception.StackValidationFailed,
self.create_volume,
self.t, stack, 'volume')
self.assertEqual(
"Property error : resources.volume.properties.size: "
"0 is out of range (min: 1, max: None)", six.text_type(error))
def test_cinder_create(self):
fv = vt_base.FakeVolume('creating')
stack_name = 'test_cvolume_stack'
self.stub_SnapshotConstraint_validate()
self.stub_VolumeConstraint_validate()
self.stub_VolumeTypeConstraint_validate()
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='test_description',
name='test_name',
metadata={'key': 'value'},
volume_type='lvm').AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'].update({
'volume_type': 'lvm',
})
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_create_from_image(self):
fv = vt_base.FakeVolume('downloading')
stack_name = 'test_cvolume_create_from_img_stack'
image_id = '46988116-6703-4623-9dbc-2bc6d284021b'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(glance.GlanceClientPlugin, 'get_image_id')
glance.GlanceClientPlugin.get_image_id(
image_id).MultipleTimes().AndReturn(image_id)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='ImageVolumeDescription',
name='ImageVolume',
imageRef=image_id).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'name': 'ImageVolume',
'description': 'ImageVolumeDescription',
'availability_zone': 'nova',
'image': image_id,
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_create_with_read_only(self):
fv = vt_base.FakeVolume('with_read_only_access_mode')
stack_name = 'test_create_with_read_only'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='ImageVolumeDescription',
name='ImageVolume').AndReturn(fv)
update_readonly_mock = self.patchobject(self.cinder_fc.volumes,
'update_readonly_flag')
update_readonly_mock(fv.id, False).return_value(None)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'name': 'ImageVolume',
'description': 'ImageVolumeDescription',
'availability_zone': 'nova',
'read_only': False,
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_default(self):
fv = vt_base.FakeVolume('creating')
stack_name = 'test_cvolume_default_stack'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
vol_name = utils.PhysName(stack_name, 'volume')
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description=None,
name=vol_name).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'availability_zone': 'nova',
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_fn_getatt(self):
stack_name = 'test_cvolume_fngetatt_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
fv = vt_base.FakeVolume(
'available', availability_zone='zone1',
size=1, snapshot_id='snap-123', name='name',
description='desc', volume_type='lvm',
metadata={'key': 'value'}, source_volid=None,
bootable=False, created_at='2013-02-25T02:40:21.000000',
encrypted=False, attachments=[])
self.cinder_fc.volumes.get('vol-123').MultipleTimes().AndReturn(fv)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.assertEqual(u'zone1', rsrc.FnGetAtt('availability_zone'))
self.assertEqual(u'1', rsrc.FnGetAtt('size'))
self.assertEqual(u'snap-123', rsrc.FnGetAtt('snapshot_id'))
self.assertEqual(u'name', rsrc.FnGetAtt('display_name'))
self.assertEqual(u'desc', rsrc.FnGetAtt('display_description'))
self.assertEqual(u'lvm', rsrc.FnGetAtt('volume_type'))
self.assertEqual(json.dumps({'key': 'value'}),
rsrc.FnGetAtt('metadata'))
self.assertEqual({'key': 'value'},
rsrc.FnGetAtt('metadata_values'))
self.assertEqual(u'None', rsrc.FnGetAtt('source_volid'))
self.assertEqual(u'available', rsrc.FnGetAtt('status'))
self.assertEqual(u'2013-02-25T02:40:21.000000',
rsrc.FnGetAtt('created_at'))
self.assertEqual(u'False', rsrc.FnGetAtt('bootable'))
self.assertEqual(u'False', rsrc.FnGetAtt('encrypted'))
self.assertEqual(u'[]', rsrc.FnGetAtt('attachments'))
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'unknown')
self.assertEqual(
'The Referenced Attribute (volume unknown) is incorrect.',
six.text_type(error))
self.m.VerifyAll()
def test_cinder_attachment(self):
stack_name = 'test_cvolume_attach_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_cinder_volume_shrink_fails(self):
stack_name = 'test_cvolume_shrink_fail_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name, size=2)
# update script
fv = vt_base.FakeVolume('available', size=2)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.m.ReplayAll()
self.t['resources']['volume']['properties']['size'] = 2
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 1
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertEqual('NotSupported: Shrinking volume is not supported.',
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_detached(self):
stack_name = 'test_cvolume_extend_det_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2)
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('available'))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_fails_to_start(self):
stack_name = 'test_cvolume_extend_fail_start_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2).AndRaise(
cinder_exp.OverLimit(413))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertIn('Over limit', six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_fails_to_complete(self):
stack_name = 'test_cvolume_extend_fail_compl_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2)
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('error_extending'))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertIn("Volume resize failed - Unknown status error_extending",
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_attached(self):
stack_name = 'test_cvolume_extend_att_stack'
# create script
self.stub_VolumeConstraint_validate()
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
# update script
attachments = [{'id': 'vol-123',
'device': '/dev/vdc',
'server_id': u'WikiDatabase'}]
fv2 = vt_base.FakeVolume('in-use',
attachments=attachments, size=1)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
# detach script
fvd = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fvd)
self.cinder_fc.volumes.get(fvd.id).AndReturn(fvd)
self.fc.volumes.delete_server_volume('WikiDatabase', 'vol-123')
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fvd)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# resize script
self.cinder_fc.volumes.extend(fvd.id, 2)
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('available'))
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.create_attachment(self.t, stack, 'attachment')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_created_from_backup_with_same_size(self):
stack_name = 'test_cvolume_extend_snapsht_stack'
# create script
fvbr = vt_base.FakeBackupRestore('vol-123')
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(
vt_base.FakeVolume('restoring-backup'))
vol_name = utils.PhysName(stack_name, 'volume')
self.cinder_fc.volumes.update('vol-123', description=None,
name=vol_name).AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndReturn(
vt_base.FakeVolume('available'))
# update script
fv = vt_base.FakeVolume('available', size=2)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'availability_zone': 'nova',
'backup_id': 'backup-123'
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('available', fv.status)
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_retype(self):
fv = vt_base.FakeVolume('available',
size=1, name='my_vol',
description='test')
stack_name = 'test_cvolume_retype'
new_vol_type = 'new_type'
self.patchobject(cinder.CinderClientPlugin, '_create',
return_value=self.cinder_fc)
self.patchobject(self.cinder_fc.volumes, 'create', return_value=fv)
self.patchobject(self.cinder_fc.volumes, 'get', return_value=fv)
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume2')
props = copy.deepcopy(rsrc.properties.data)
props['volume_type'] = new_vol_type
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
self.patchobject(cinder.CinderClientPlugin, 'get_volume_type',
return_value=new_vol_type)
self.patchobject(self.cinder_fc.volumes, 'retype')
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(1, self.cinder_fc.volumes.retype.call_count)
self.cinder_fc.volume_api_version = 1
new_vol_type_1 = 'new_type_1'
props = copy.deepcopy(rsrc.properties.data)
props['volume_type'] = new_vol_type_1
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
# if the volume api is v1, not support to retype
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertEqual('NotSupported: Using Cinder API V1, '
'volume_type update is not supported.',
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.assertEqual(1, self.cinder_fc.volumes.retype.call_count)
def test_cinder_volume_update_name_and_metadata(self):
# update the name, description and metadata
fv = vt_base.FakeVolume('creating',
size=1, name='my_vol',
description='test')
stack_name = 'test_cvolume_updname_stack'
update_name = 'update_name'
meta = {'Key': 'New Value'}
update_description = 'update_description'
kwargs = {
'name': update_name,
'description': update_description
}
fv = self._mock_create_volume(fv, stack_name)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.update(fv, **kwargs).AndReturn(None)
self.cinder_fc.volumes.update_all_metadata(fv, meta).AndReturn(None)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['name'] = update_name
props['description'] = update_description
props['metadata'] = meta
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
def test_cinder_volume_update_read_only(self):
# update read only access mode
fv = vt_base.FakeVolume('update_read_only_access_mode')
stack_name = 'test_update_read_only'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='test_description',
name='test_name',
metadata={u'key': u'value'}).AndReturn(fv)
update_readonly_mock = self.patchobject(self.cinder_fc.volumes,
'update_readonly_flag')
update_readonly_mock(fv.id, True).return_value(None)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['read_only'] = True
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
def test_cinder_snapshot(self):
stack_name = 'test_cvolume_snpsht_stack'
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('available'))
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = stack['volume']
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.snapshot)()
self.assertEqual((rsrc.SNAPSHOT, rsrc.COMPLETE), rsrc.state)
self.assertEqual({'backup_id': 'backup-123'},
resource_data_object.ResourceData.get_all(rsrc))
self.m.VerifyAll()
def test_cinder_snapshot_error(self):
stack_name = 'test_cvolume_snpsht_err_stack'
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
fail_reason = 'Could not determine which Swift endpoint to use'
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('error', fail_reason=fail_reason))
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = stack['volume']
scheduler.TaskRunner(rsrc.create)()
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.snapshot))
self.assertEqual((rsrc.SNAPSHOT, rsrc.FAILED), rsrc.state)
self.assertIn(fail_reason, rsrc.status_reason)
self.assertEqual({u'backup_id': u'backup-123'},
resource_data_object.ResourceData.get_all(rsrc))
self.m.VerifyAll()
def test_cinder_volume_attachment_update_device(self):
stack_name = 'test_cvolume_attach_udev_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
device=u'/dev/vdd',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
props = copy.deepcopy(rsrc.properties.data)
props['mountpoint'] = '/dev/vdd'
props['volume_id'] = 'vol-123'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_attachment_update_volume(self):
stack_name = 'test_cvolume_attach_uvol_stack'
self.stub_VolumeConstraint_validate()
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
fv2 = vt_base.FakeVolume('creating', id='vol-456')
vol2_name = utils.PhysName(stack_name, 'volume2')
self.cinder_fc.volumes.create(
size=2, availability_zone='nova',
description=None,
name=vol2_name).AndReturn(fv2)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
fv2 = vt_base.FakeVolume('available', id=fv2.id)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
fv2a = vt_base.FakeVolume('attaching', id='vol-456')
self._mock_create_server_volume_script(fv2a, volume='vol-456',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.create_volume(self.t, stack, 'volume2')
rsrc = self.create_attachment(self.t, stack, 'attachment')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
props = copy.deepcopy(rsrc.properties.data)
props['volume_id'] = 'vol-456'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(fv2a.id, rsrc.resource_id)
self.m.VerifyAll()
def test_cinder_volume_attachment_update_server(self):
stack_name = 'test_cvolume_attach_usrv_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
server=u'AnotherServer',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
props = copy.deepcopy(rsrc.properties.data)
props['instance_uuid'] = 'AnotherServer'
props['volume_id'] = 'vol-123'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_create_with_scheduler_hints(self):
fv = vt_base.FakeVolume('creating')
cinder.CinderClientPlugin._create().AndReturn(self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, name='test_name', description=None,
availability_zone='nova',
scheduler_hints={'hint1': 'good_advice'}).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
stack_name = 'test_cvolume_scheduler_hints_stack'
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume3')
self.m.VerifyAll()
def test_cinder_create_with_scheduler_hints_and_cinder_api_v1(self):
cinder.CinderClientPlugin._create().AndReturn(self.cinder_fc)
self.cinder_fc.volume_api_version = 1
self.m.ReplayAll()
stack_name = 'test_cvolume_scheduler_hints_api_v1_stack'
stack = utils.parse_stack(self.t, stack_name=stack_name)
ex = self.assertRaises(exception.StackValidationFailed,
self.create_volume, self.t, stack, 'volume3')
self.assertIn('Scheduler hints are not supported by the current '
'volume API.', six.text_type(ex))
self.m.VerifyAll()
def _test_cinder_create_invalid_property_combinations(
self, stack_name, combinations, err_msg, exc):
stack = utils.parse_stack(self.t, stack_name=stack_name)
vp = stack.t['Resources']['volume2']['Properties']
vp.pop('size')
vp.update(combinations)
rsrc = stack['volume2']
ex = self.assertRaises(exc, rsrc.validate)
self.assertEqual(err_msg, six.text_type(ex))
def test_cinder_create_with_image_and_imageRef(self):
stack_name = 'test_create_with_image_and_imageRef'
combinations = {'imageRef': 'image-456', 'image': 'image-123'}
err_msg = ("Cannot define the following properties at the same "
"time: image, imageRef.")
self.stub_ImageConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.ResourcePropertyConflict)
def test_cinder_create_with_size_snapshot_and_image(self):
stack_name = 'test_create_with_size_snapshot_and_image'
combinations = {
'size': 1,
'image': 'image-123',
'snapshot_id': 'snapshot-123'}
self.stub_ImageConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'image\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_size_snapshot_and_imageRef(self):
stack_name = 'test_create_with_size_snapshot_and_imageRef'
combinations = {
'size': 1,
'imageRef': 'image-123',
'snapshot_id': 'snapshot-123'}
self.stub_ImageConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'imageRef\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_size_snapshot_and_sourcevol(self):
stack_name = 'test_create_with_size_snapshot_and_sourcevol'
combinations = {
'size': 1,
'source_volid': 'volume-123',
'snapshot_id': 'snapshot-123'}
self.stub_VolumeConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'source_volid\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_snapshot_and_source_volume(self):
stack_name = 'test_create_with_snapshot_and_source_volume'
combinations = {
'source_volid': 'source_volume-123',
'snapshot_id': 'snapshot-123'}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [\'snapshot_id\', \'source_volid\'].')
self.stub_VolumeConstraint_validate()
self.stub_SnapshotConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_image_and_source_volume(self):
stack_name = 'test_create_with_image_and_source_volume'
combinations = {
'source_volid': 'source_volume-123',
'image': 'image-123'}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [\'source_volid\', \'image\'].')
self.stub_VolumeConstraint_validate()
self.stub_ImageConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_no_size_no_combinations(self):
stack_name = 'test_create_no_size_no_options'
combinations = {}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_volume_restore(self):
stack_name = 'test_cvolume_restore_stack'
# create script
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
# snapshot script
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('available'))
# restore script
fvbr = vt_base.FakeBackupRestore('vol-123')
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.cinder_fc.volumes.update('vol-123',
description='test_description',
name='test_name')
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack.create)()
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
scheduler.TaskRunner(stack.snapshot)()
self.assertEqual((stack.SNAPSHOT, stack.COMPLETE), stack.state)
data = stack.prepare_abandon()
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, stack.id)
stack.restore(fake_snapshot)
self.assertEqual((stack.RESTORE, stack.COMPLETE), stack.state)
self.m.VerifyAll()
| apache-2.0 | -4,979,401,692,813,748,000 | 39.883365 | 79 | 0.607637 | false |
arruda/rmr | rmr/apps/books/migrations/0006_books_to_userbooks.py | 1 | 8533 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
Book = orm['books.Book']
UserBook = orm['books.UserBook']
for book in Book.objects.all():
user = book.user
userBook = UserBook(user=user,book=book)
userBook.desired = book.desired
userBook.purchase_store = book.purchase_store
userBook.purchased = book.purchased
userBook.purchase_value = book.purchase_value
userBook.purchase_date = book.purchase_date
userBook.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'authors.author': {
'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Author'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'books.book': {
'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Book'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'books'", 'to': "orm['authors.Author']"}),
'desired': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['books.Genre']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'books'", 'to': "orm['publishers.Publisher']"}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'purchase_store': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'books_old'", 'null': 'True', 'to': "orm['stores.Store']"}),
'purchase_value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'purchased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'release_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today', 'null': 'True', 'blank': 'True'}),
'synopsis': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'books.genre': {
'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Genre'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'books.userbook': {
'Meta': {'object_name': 'UserBook'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': "orm['books.Book']"}),
'desired': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'purchase_store': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'books'", 'null': 'True', 'to': "orm['stores.Store']"}),
'purchase_value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'purchased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'books'", 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'publishers.publisher': {
'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Publisher'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'stores.store': {
'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Store'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['books']
symmetrical = True
| mit | -4,910,646,489,504,296,000 | 70.108333 | 182 | 0.545881 | false |
az0/entity-metadata | code/etl_openlibrary.py | 1 | 2492 | #!/usr/bin/python3
#
# Copyright (C) 2019 by Compassion International. All rights reserved.
# License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.
# This is free software: you are free to change and redistribute it.
# There is NO WARRANTY, to the extent permitted by law.
"""
This program ETLs the Open Library authors dump file.
The input is a tab-delimited file with JSON in one column.
The output is a simpler file, which is a CSV with basic biographical
information plus unique identifiers.
Get the dump from here
https://openlibrary.org/developers/dumps
Do not decompress the dump file.
"""
import csv
import sys
import gzip
import json
csv.field_size_limit(sys.maxsize)
# The value id_wikidata (not nested under remote_ids) is defined
# exactly once out of 6.9M records, and in that case it's redundant
# to the value nested under remote_ids. It seems to be a mistake,
# so we'll ignore it.
retain_keys = ['key', 'id_wikidata', 'entity_type', 'name', 'fuller_name', 'personal_name', 'alternate_names',
'birth_date', 'death_date']
def process_json(j, writer):
author = json.loads(j)
author_retain = {}
for retain_key in retain_keys:
if retain_key in author:
author_retain[retain_key] = author[retain_key]
if 'remote_ids' in author and 'wikidata' in author['remote_ids']:
# extract nested value
author_retain['id_wikidata'] = author['remote_ids']['wikidata']
if 'alternate_names' in author:
# reformat multiple items from JSON list to pipe delimited
author_retain['alternate_names'] = '|'.join(author['alternate_names'])
writer.writerow(author_retain)
def go():
if len(sys.argv) != 3:
print(
'Usage: %s (path to OpenLibrary authors .txt.gz) (path to output .csv)' % sys.argv[0])
sys.exit(1)
txt_gz_fn = sys.argv[1]
csv_out_fn = sys.argv[2]
with gzip.open(txt_gz_fn, 'rt') as inf: # inf= IN File
reader = csv.reader(inf, delimiter='\t')
with open(csv_out_fn, 'w') as outf:
writer = csv.DictWriter(outf, fieldnames=retain_keys)
writer.writeheader()
print('Processing...')
count = 0
for row in reader:
process_json(row[4], writer)
count += 1
if (count % 10000) == 0:
# progress indicator
print('.', end='', flush=True)
print('\nDone.')
go()
| gpl-3.0 | -2,435,090,541,281,703,000 | 30.15 | 110 | 0.630016 | false |
jimporter/bfg9000 | test/unit/shell/test_shell.py | 1 | 5850 | import sys
from unittest import mock
from .. import *
from bfg9000.path import Root
from bfg9000.safe_str import jbos
from bfg9000.shell import (CalledProcessError, convert_args, execute, Mode,
split_paths, which)
base_dirs = {
Root.srcdir: '$(srcdir)',
Root.builddir: None,
}
class TestSplitPaths(TestCase):
def test_empty(self):
self.assertEqual(split_paths(''), [])
def test_single(self):
self.assertEqual(split_paths('foo'), ['foo'])
def test_multiple(self):
self.assertEqual(split_paths('foo:bar', ':'), ['foo', 'bar'])
class TestWhich(TestCase):
def setUp(self):
self.env = {'PATH': '/usr/bin{}/usr/local/bin'.format(os.pathsep)}
def test_simple(self):
with mock.patch('os.path.exists', return_value=True):
self.assertEqual(which('python', env=self.env), ['python'])
def test_multiword(self):
with mock.patch('os.path.exists', return_value=True):
self.assertEqual(which('python --foo', env=self.env),
['python', '--foo'])
def test_abs(self):
with mock.patch('os.path.exists', return_value=True):
self.assertEqual(which('/path/to/python', env=self.env),
['/path/to/python'])
def test_multiple(self):
with mock.patch('os.path.exists', side_effect=[False, False, True]):
self.assertEqual(which(['python', 'python3'], env=self.env),
['python3'])
def test_multiple_args(self):
with mock.patch('os.path.exists', side_effect=[False, False, True]):
self.assertEqual(which(['python', ['python3', '--foo']],
env=self.env), ['python3', '--foo'])
def test_resolve(self):
with mock.patch('os.path.exists', side_effect=[False, True]):
self.assertEqual(which('python', env=self.env, resolve=True),
[os.path.normpath('/usr/local/bin/python')])
def test_path_ext(self):
class MockInfo:
has_path_ext = True
env = {'PATH': '/usr/bin', 'PATHEXT': '.exe'}
with mock.patch('bfg9000.shell.platform_info', MockInfo), \
mock.patch('os.path.exists', side_effect=[False, True]): # noqa
self.assertEqual(which('python', env=env), ['python'])
with mock.patch('bfg9000.shell.platform_info', MockInfo), \
mock.patch('os.path.exists', side_effect=[False, True]): # noqa
self.assertEqual(which('python', env=env, resolve=True),
[os.path.normpath('/usr/bin/python.exe')])
with mock.patch('bfg9000.shell.platform_info', MockInfo), \
mock.patch('os.path.exists', side_effect=[False, True]): # noqa
self.assertEqual(
which([['python', '--foo']], env=env, resolve=True),
[os.path.normpath('/usr/bin/python.exe'), '--foo']
)
def test_not_found(self):
with mock.patch('os.path.exists', return_value=False):
self.assertRaises(IOError, which, 'python')
def test_empty(self):
self.assertRaises(TypeError, which, [])
class TestConvertArgs(PathTestCase):
def test_string(self):
self.assertEqual(convert_args(['foo', 'bar']), ['foo', 'bar'])
def test_path(self):
self.assertEqual(convert_args([self.Path('/foo')]),
[self.ospath.sep + 'foo'])
self.assertEqual(convert_args([self.Path('foo')], base_dirs), ['foo'])
self.assertEqual(convert_args([self.Path('foo', Root.srcdir)],
base_dirs),
[self.ospath.join('$(srcdir)', 'foo')])
self.assertRaises(TypeError, convert_args, [self.Path('foo')])
def test_jbos(self):
self.assertEqual(convert_args([jbos('foo', 'bar')]), ['foobar'])
self.assertEqual(convert_args([jbos('foo', self.Path('/bar'))]),
['foo' + self.ospath.sep + 'bar'])
class TestExecute(TestCase):
def test_no_output(self):
self.assertEqual(execute([sys.executable, '-c', 'exit()']), None)
def test_stdout(self):
self.assertEqual(execute([sys.executable, '-c', 'print("hello")'],
stdout=Mode.pipe), 'hello\n')
def test_stderr(self):
self.assertEqual(execute(
[sys.executable, '-c', 'import sys; sys.stderr.write("hello\\n")'],
stderr=Mode.pipe
), 'hello\n')
def test_stdout_stderr(self):
self.assertEqual(execute(
[sys.executable, '-c',
'import sys; sys.stdout.write("stdout\\n"); ' +
'sys.stderr.write("stderr\\n");'],
stdout=Mode.pipe, stderr=Mode.pipe
), ('stdout\n', 'stderr\n'))
def test_returncode(self):
self.assertEqual(execute([sys.executable, '-c', 'exit(1)'],
returncode=1), None)
self.assertEqual(execute([sys.executable, '-c', 'exit(1)'],
returncode=[1, 2]), None)
self.assertEqual(execute([sys.executable, '-c', 'exit(1)'],
returncode='any'), None)
self.assertEqual(execute([sys.executable, '-c', 'exit(1)'],
returncode='fail'), None)
with self.assertRaises(CalledProcessError):
self.assertEqual(execute([sys.executable, '-c', 'exit(1)']), None)
with self.assertRaises(CalledProcessError):
self.assertEqual(execute([sys.executable, '-c', 'exit(0)'],
returncode='fail'), None)
def test_shell(self):
self.assertEqual(execute('echo hello', shell=True, stdout=Mode.pipe),
'hello\n')
| bsd-3-clause | -537,182,260,989,453,700 | 38 | 79 | 0.547521 | false |
18F/github-issue-lifecycle | app/models.py | 1 | 15856 | import itertools
import os
from collections import OrderedDict
from datetime import date, datetime, timedelta
import requests
from requests.auth import HTTPBasicAuth
from . import db
from .app import app
from .utils import to_py_datetime
GH_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
BEGINNING_OF_TIME = '1970-01-01T00:00:00Z'
BEGINNING_DATETIME = datetime.strptime(BEGINNING_OF_TIME, GH_DATE_FORMAT)
def authorization():
try:
auth = HTTPBasicAuth(os.environ['GITHUB_USER'],
os.environ['GITHUB_AUTH'])
return auth
except KeyError:
app.logger.warning(
'Environment variables GITHUB_USER and GITHUB_AUTH not set')
app.logger.warning('Skipping authentication...')
return None
class Repo(db.Model):
id = db.Column(db.Integer, primary_key=True)
owner = db.Column(db.Text, nullable=False)
name = db.Column(db.Text, nullable=False)
synched_at = db.Column(db.DateTime(),
nullable=False,
default=BEGINNING_DATETIME)
issues = db.relationship('Issue',
cascade='all, delete-orphan',
order_by='Issue.created_at',
backref='repo')
ISSUES_PAGE_SIZE = 100
@classmethod
def get_fresh(cls, owner_name, repo_name, refresh_threshhold_seconds=None):
"""For a repo ``repo_name`` owned by ``owner_name``:
1. Fetches or creates the Repo model instance
2. Refreshes the data from Github if necessary"""
if refresh_threshhold_seconds is None:
refresh_threshhold_seconds = app.config[
'REFRESH_THRESHHOLD_SECONDS']
(owner_name, repo_name) = (owner_name.lower(), repo_name.lower())
repo = (cls.query.filter_by(owner=owner_name,
name=repo_name).first() or
cls(owner=owner_name,
name=repo_name,
synched_at=BEGINNING_DATETIME))
if (datetime.now() - repo.synched_at) > timedelta(
seconds=int(refresh_threshhold_seconds)):
repo.fetch_issues()
db.session.add(repo)
db.session.commit()
repo.set_milestone_color_map()
return repo
def url(self):
return 'https://api.github.com/repos/{}/{}/'.format(self.owner,
self.name)
@classmethod
def _latest_update(cls, items, field_name='updated_at'):
"Returns latest `field_name` in `items`"
updates = [datetime.strptime(
i.get(field_name, BEGINNING_OF_TIME), GH_DATE_FORMAT)
for i in items]
return max(updates).strftime(GH_DATE_FORMAT)
def raw_issue_data(self):
params = {
'since': self.synched_at.strftime(GH_DATE_FORMAT),
'per_page': self.ISSUES_PAGE_SIZE,
'sort': 'updated',
'direction': 'asc',
'state': 'all' # include closed issues
}
auth = authorization()
issues = requests.get(self.url() + 'issues', params=params, auth=auth)
if issues.ok:
result = {}
new_issues = [i for i in issues.json()
if i['number'] not in result]
while new_issues:
result.update({i['number']: i for i in new_issues})
# Github seems to be ignoring `sort` parameter, have to
# check all results, alas
params['since'] = self._latest_update(new_issues)
issues = requests.get(self.url() + 'issues',
params=params,
auth=authorization())
new_issues = [i
for i in issues.json()
if i['number'] not in result]
return result.values()
else:
err_msg = 'Could not fetch issues for repo {}/{}: {}'.format(
self.owner, self.name, issues.text)
if not auth:
err_msg += '\nNOTE: Environment variables GITHUB_USER and GITHUB_AUTH not set'
raise FileNotFoundError(err_msg)
def fetch_issues(self):
"""Refresh the database's store of issues for this repo from github."""
for issue_data in self.raw_issue_data():
issue = Issue.query.filter_by(
number=issue_data.get('number')).first()
if issue:
db.session.delete(issue)
db.session.commit()
issue = Issue.from_raw(issue_data)
issue.repo = self
issue.fetch_events()
self.synched_at = datetime.now()
db.session.commit()
def json_summary(self):
result = dict(name=self.name,
owner=self.owner,
issues=[iss.json_summary() for iss in self.issues])
return result
def json_summary_flattened(self):
spans = list(self.spans())
result = dict(spans=spans,
stones=(self.stones()),
colors=[self.milestone_colors[s['span']['milestones'][
-1]] for s in spans], )
return result
def spans(self):
for (idx, iss) in enumerate(self.issues):
lifecycle = iss.lifecycle()
for span in lifecycle['spans']:
yield {'issue': iss,
'index': idx,
'span': span,
'final': lifecycle['final']}
def stones(self):
for (idx, iss) in enumerate(self.issues):
lifecycle = iss.lifecycle()
for stone in lifecycle['points']:
yield {'issue': iss, 'index': idx, 'stone': stone}
def milestones(self):
"List of milestones in all issues, in rough order of first appearance"
nested = [[e.milestone for e in i.events] for i in self.issues]
all_milestones = list(OrderedDict.fromkeys(
itertools.chain.from_iterable(nested)))
if None in all_milestones:
all_milestones.remove(None)
return all_milestones
_PALLETTE = ('greenyellow',
'cornflowerblue',
'hotpink',
'indigo',
'fuschia',
'green',
'lightskyblue',
'firebrick',
'gray',
'lightcoral',
'darkslategray',
'darkorange',
'darkolivegreen',
'cyan',
'chocolate',
'blueviolet',
'burlywood',
'aquamarine', )
def set_milestone_color_map(self):
"Decide a color to correspond to each type of milestone used in the repo"
colors = itertools.cycle(self._PALLETTE
) # reuse colors if too many milestones
self.milestone_colors = {}
for milestone in self.milestones():
self.milestone_colors[milestone] = colors.__next__()
self.milestone_colors.update({'opened': 'gold',
'reopened': 'gold',
'closed': 'black'})
labels_issues = db.Table(
'labels_issues',
db.Column('label_id', db.Integer, db.ForeignKey('label.id')),
db.Column('issue_id', db.Integer, db.ForeignKey('issue.id')))
class Issue(db.Model):
id = db.Column(db.Integer, primary_key=True)
repo_id = db.Column(db.Integer(), db.ForeignKey(Repo.id))
number = db.Column(db.Integer)
title = db.Column(db.String())
body = db.Column(db.String())
state = db.Column(db.String())
creator_login = db.Column(db.String(),
db.ForeignKey('person.login'),
nullable=False)
assignee_login = db.Column(db.String(),
db.ForeignKey('person.login'),
nullable=True)
comments = db.Column(db.String())
locked = db.Column(db.Boolean)
url = db.Column(db.String(), nullable=True)
events_url = db.Column(db.String(), nullable=True)
labels_url = db.Column(db.String(), nullable=True)
comments_url = db.Column(db.String(), nullable=True)
html_url = db.Column(db.String(), nullable=True)
created_at = db.Column(db.DateTime(), default=date.today)
updated_at = db.Column(db.DateTime(), default=date.today)
closed_at = db.Column(db.DateTime(), nullable=True)
labels = db.relationship('Label',
secondary=labels_issues,
backref=db.backref('issues',
lazy='dynamic'))
events = db.relationship('Event',
cascade='all, delete-orphan',
order_by='Event.created_at',
backref='issue')
@classmethod
def from_raw(cls, issue_data):
insertable = {
'id': issue_data.get('id'),
'number': issue_data.get('number'),
'title': issue_data.get('title'),
'state': issue_data.get('state'),
'body': issue_data.get('body'),
'locked': issue_data.get('locked'),
'url': issue_data.get('url'),
'labels_url': issue_data.get('labels_url'),
'html_url': issue_data.get('html_url'),
'events_url': issue_data.get('events_url'),
'updated_at': to_py_datetime(issue_data['updated_at']),
'created_at': to_py_datetime(issue_data['created_at']),
'closed_at': to_py_datetime(issue_data['closed_at']),
}
creator = Person.from_raw(issue_data['user'])
insertable['creator_login'] = creator.login
if issue_data.get('assignee'):
assignee = Person.from_raw(issue_data['assignee'])
insertable['assignee_login'] = assignee.login
issue = cls(**insertable)
for label_data in issue_data['labels']:
issue.labels.append(Label.get_or_create(label_data))
db.session.add(issue)
return issue
def fetch_events(self):
response = requests.get('{}?per_page=100'.format(self.events_url),
auth=authorization())
if self.number in (4, 17):
from pprint import pprint
with open('events{}.json'.format(self.number), 'w') as outfile:
pprint(response.json(), outfile)
# todo: if > 100 events?
if response.ok:
for raw_event in response.json():
self.events.append(Event.from_raw(raw_event))
def json_summary(self):
lifecycle = self.lifecycle()
return {
'number': self.number,
'title': self.title,
'html_url': self.html_url,
'created_at': self.created_at,
'updated_at': self.updated_at,
'closed_at': self.closed_at,
'spans': lifecycle['spans'],
'points': lifecycle['points'],
}
def lifecycle(self):
"""Description of the events of this issue's lifecycle.
Returns dict with:
final: Last milestone marked
points: (name, date) of milestones and open/close events
spans: ([statuses], start date, end date) describing each time period
in the issue's lifecycle.
[statuses] is the list of milestones in effect. The last in the list
will generally be the one of interest.
"""
statuses = ['opened', ]
result = {'spans': [], 'final': 'opened', 'points': []}
start_date = self.created_at
for event in self.events:
if event.event in ('milestoned', 'demilestoned', 'closed',
'reopened'):
if event.milestone and event.milestone in statuses:
continue
result['spans'].append({'milestones': statuses[:],
'start': start_date,
'end': event.created_at})
if event.event == 'demilestoned':
try:
statuses.remove(event.milestone)
except ValueError:
pass # sometimes they demilestone a nonexistent milestone!
elif event.event == 'milestoned':
statuses.append(event.milestone)
elif event.event in ('closed', 'reopened'):
statuses.append(event.event)
result['points'].append({'status': statuses[-1],
'at': event.created_at})
start_date = event.created_at
if self.closed_at:
if statuses[-1] != 'closed':
if self.closed_at > start_date:
result['spans'].append({'milestones': statuses[:],
'start': start_date,
'end': self.closed_at})
result['points'].append({'status': 'closed',
'at': self.closed_at})
else:
result['spans'].append({'milestones': statuses[:],
'start': start_date,
'end': datetime.now()})
result['final'] = [s for s in statuses
if s not in ('closed', 'reopened')][-1]
return result
class Person(db.Model):
login = db.Column(db.String(), primary_key=True)
url = db.Column(db.String(), nullable=True)
created = db.relationship('Issue',
foreign_keys=[Issue.creator_login, ],
backref='author')
assigned = db.relationship('Issue',
foreign_keys=[Issue.assignee_login, ],
backref='assignee')
@classmethod
def from_raw(cls, raw_data):
person = cls.query.filter_by(login=raw_data['login']).first()
if person:
person.url = raw_data.get('url')
else:
person = cls(login=raw_data['login'], url=raw_data.get('url'))
db.session.add(person)
db.session.flush() # TODO: ugh, all this flushing
return person
class Label(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
url = db.Column(db.String())
color = db.Column(db.String(), nullable=True)
@classmethod
def get_or_create(cls, label_data):
label = cls.query.filter_by(name=label_data['name']).first() \
or cls(**label_data)
return label
class Event(db.Model):
id = db.Column(db.Integer, primary_key=True)
commit_id = db.Column(db.String())
url = db.Column(db.String())
actor = db.Column(db.String())
event = db.Column(db.String())
milestone = db.Column(db.String())
created_at = db.Column(db.DateTime())
issue_id = db.Column(db.Integer, db.ForeignKey('issue.id'))
@classmethod
def from_raw(cls, event_data):
"Given dict of event data fetched from GitHub API, return instance"
insertable = dict(
id=event_data['id'],
commit_id=event_data['commit_id'],
url=event_data['url'],
actor=event_data['actor'].get('login') if event_data[
'actor'] else None,
milestone=event_data.get('milestone') and event_data['milestone'][
'title'],
event=event_data['event'],
created_at=to_py_datetime(event_data.get('created_at')), )
return cls(**insertable)
| cc0-1.0 | 6,412,924,456,398,388,000 | 38.739348 | 94 | 0.521443 | false |
cscutcher/naruto-aufs-layers | naruto/cli.py | 1 | 8374 | # -*- coding: utf-8 -*-
"""
Main group for naruto cli
"""
import io
import logging
import os
import pathlib
import shutil
import click
from naruto import NarutoLayer, LayerNotFound
DEV_LOGGER = logging.getLogger(__name__)
DEFAULT_NARUTO_HOME = pathlib.Path(os.path.expanduser('~/.naruto'))
DEFAULT_LOG_LEVEL = logging.INFO
class CLIContext(object):
'''
Context for CLI
'''
def __init__(self):
self.naruto_home = DEFAULT_NARUTO_HOME
cli_context = click.make_pass_decorator(CLIContext, ensure=True)
@click.group()
@click.option(
'--naruto-home',
default=str(DEFAULT_NARUTO_HOME),
type=click.Path(
file_okay=False,
dir_okay=True,
writable=True,
readable=True,
resolve_path=True,
exists=False),
help=(
'Set default config directory used to store and retrieve layers. Default: {}'.format(
DEFAULT_NARUTO_HOME)))
@click.option(
'--verbosity',
'-V',
help='Set verbosity level explicitly (int or CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET)',
default=DEFAULT_LOG_LEVEL,
type=str)
@cli_context
def naruto_cli(ctx, naruto_home, verbosity):
'''
CLI for naruto
'''
try:
verbosity = int(verbosity)
except ValueError:
#Ints and strings are ok
pass
logging.basicConfig(level=verbosity)
DEV_LOGGER.debug('Set log level to %s', verbosity)
ctx.naruto_home = pathlib.Path(naruto_home)
DEV_LOGGER.debug('Home path is %r', ctx.naruto_home)
class _LayerLookup(click.ParamType):
'''
Type which loads naruto dir
'''
name = 'NarutoDir'
def __init__(self, allow_discovery=True):
self._allow_discovery = allow_discovery
def convert(self, value, param, local_context):
'''
Parse Naruto argument
'''
DEV_LOGGER.debug('Trying to find root layer for value %r', value)
root_spec, _, layer_spec = value.partition(':')
cli_context = local_context.ensure_object(CLIContext)
if not root_spec and self._allow_discovery:
try:
layer = NarutoLayer.find_layer_mounted_at_dest(pathlib.Path(os.getcwd()))
except LayerNotFound:
self.fail(
'Couldn\'t auto-discover layer. '
'You must in a directory which is a mounted layer for auto-discovery to work')
else:
if os.sep in root_spec:
naruto_root = pathlib.Path(root_spec)
else:
naruto_root = cli_context.naruto_home / root_spec
try:
naruto_root, = tuple(naruto_root.iterdir())
except FileNotFoundError:
self.fail('Directory {} does not exist'.format(naruto_root))
except ValueError:
self.fail('Unexpected number of folders in {}'.format(naruto_root))
try:
layer = NarutoLayer(naruto_root)
except LayerNotFound:
self.fail('{!s} is not a layer.'.format(naruto_root))
if layer_spec:
layer = layer.find_layer(layer_spec)
DEV_LOGGER.debug('Parsed layer at %r from cli', layer)
return layer
@naruto_cli.command()
@click.argument('name_or_path')
@click.option('--description', help='Add description to new naruto layer')
@cli_context
def create(ctx, name_or_path, description):
'''
Create new NarutoLayer
'''
if os.sep in name_or_path:
path = pathlib.Path(name_or_path)
DEV_LOGGER.info('Creating at raw path %r', path)
else:
home_naruto_dir = ctx.naruto_home
if not home_naruto_dir.is_dir():
home_naruto_dir.mkdir()
home_naruto_dir = home_naruto_dir.resolve()
path = home_naruto_dir / name_or_path
if not path.is_dir():
path.mkdir()
# Check nothing nasty from user
assert path.parent == home_naruto_dir
DEV_LOGGER.info('Creating %r in naruto home %r', home_naruto_dir, name_or_path)
if len(tuple(path.iterdir())) != 0:
raise Exception('Expected create directory {!s} to be empty'.format(path))
NarutoLayer.create(path, description=description)
@naruto_cli.command()
@cli_context
def list_home_layers(ctx):
'''
List layers stored in home directory
'''
for path in ctx.naruto_home.iterdir():
click.echo(str(path))
#################################################################################################
## Commands that modify or inspect existing layers
#################################################################################################
def _modification_command(fn):
'''
Add common options for modification
'''
fn = naruto_cli.command()(fn)
layer_lookup_help = (
'This specifies the layer you want to act upon. '
'If not specified we will try and discover the layer you have currently mounted.')
fn = click.option('-l', '--layer', type=_LayerLookup(), default='', help=layer_lookup_help)(fn)
return fn
class InfoNodeAdapter(object):
'''
Adapt NarutoLayer for info printout
'''
def __init__(self, layer):
self._layer = layer
def output(self, io_stream, level, highlight):
io_stream.write('{indent}+-- {highlight}{layer!s}{highlight}\n'.format(
indent=' ' * level,
layer=self._layer,
highlight='!!!!' if self._layer in highlight else ''))
for child in self._layer:
self.__class__(child).output(io_stream, level + 1, highlight)
@_modification_command
def info(layer):
'''
Get info about a layer
'''
io_stream = io.StringIO()
InfoNodeAdapter(layer.get_root()).output(io_stream, 0, highlight=(layer,))
click.echo(io_stream.getvalue())
@_modification_command
@click.argument('mount_dest')
def mount(layer, mount_dest):
'''
Mount a layer
'''
layer.mount(mount_dest)
@_modification_command
@click.argument('mount_dest')
@click.option('--description', help='Add description to new naruto layer')
def branch_and_mount(layer, mount_dest, description):
'''
Branch a layer and mount at new dest
'''
layer.create_child(description=description).mount(mount_dest)
@_modification_command
def unmount_all(layer):
'''
Unmount all uses of this layer
'''
layer.unmount_all()
@_modification_command
def find_mounts(layer):
'''
Find where layer is mounted
'''
for branch in layer.find_mounted_branches_iter():
click.echo('{branch.path}={branch.permission} at {branch.mount_point}'.format(
branch=branch))
@_modification_command
@click.option('--no-prompt', default=False, is_flag=True)
def delete(layer, no_prompt):
'''
Delete a layer
'''
if no_prompt:
confirm = click.echo
else:
confirm = lambda message: click.confirm(message, abort=True)
if layer.has_children:
click.secho(
'WARNING: This layer has {} direct children and a further {} descendants.'.format(
len(layer.children),
len(layer.descendants)),
fg='red')
if layer.mounted:
confirm(
'{} is currently mounted. Must unmount first. Continue?'.format(layer))
layer.unmount_all()
confirm(
click.style(
'This will irreversible delete {} and all {} descendants. Continue?'.format(
layer, len(layer.descendants)),
fg='red'))
shutil.rmtree(str(layer.path.resolve()))
@_modification_command
@click.argument('description', default='')
def description(layer, description):
'''
Get set layer description
'''
if description:
layer.description = description
else:
click.echo(layer.description)
@_modification_command
@click.argument('tags', nargs=-1)
def tags(layer, tags):
'''
Get set tags
'''
if tags:
layer.tags = tags
else:
click.echo(', '.join(layer.tags))
@_modification_command
@click.argument('tags', nargs=-1)
def add_tags(layer, tags):
''' Add tag to layer'''
layer.tags = layer.tags.union(tags)
@_modification_command
@click.argument('tags', nargs=-1)
def remove_tags(layer, tags):
''' Remove tag from layer'''
layer.tags = layer.tags.difference(tags)
| gpl-3.0 | -2,532,855,138,760,252,400 | 26.455738 | 99 | 0.597683 | false |
zhangg/trove | integration/tests/integration/tests/volumes/driver.py | 1 | 21014 | # Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from numbers import Number
import os
import re
import shutil
import six
import socket
import time
import unittest
import pexpect
from proboscis import test
from proboscis.asserts import assert_raises
from proboscis.decorators import expect_exception
from proboscis.decorators import time_out
from trove.tests.config import CONFIG
from trove.common.utils import poll_until
from trove.tests.util import process
from trove.common.utils import import_class
from tests import initialize
WHITE_BOX = CONFIG.white_box
VOLUMES_DRIVER = "trove.volumes.driver"
if WHITE_BOX:
# TODO(tim.simpson): Restore this once white box functionality can be
# added back to this test module.
pass
# from nova import context
# from nova import exception
# from nova import flags
# from nova import utils
# from trove import exception as trove_exception
# from trove.utils import poll_until
# from trove import volume
# from trove.tests.volume import driver as test_driver
# FLAGS = flags.FLAGS
UUID_PATTERN = re.compile('^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-'
'[0-9a-f]{4}-[0-9a-f]{12}$')
HUGE_VOLUME = 5000
def is_uuid(text):
return UUID_PATTERN.search(text) is not None
class StoryDetails(object):
def __init__(self):
self.api = volume.API()
self.client = volume.Client()
self.context = context.get_admin_context()
self.device_path = None
self.volume_desc = None
self.volume_id = None
self.volume_name = None
self.volume = None
self.host = socket.gethostname()
self.original_uuid = None
self.original_device_info = None
self.resize_volume_size = 2
def get_volume(self):
return self.api.get(self.context, self.volume_id)
@property
def mount_point(self):
return "%s/%s" % (LOCAL_MOUNT_PATH, self.volume_id)
@property
def test_mount_file_path(self):
return "%s/test.txt" % self.mount_point
story = None
storyFail = None
LOCAL_MOUNT_PATH = "/testsmnt"
class VolumeTest(unittest.TestCase):
"""This test tells the story of a volume, from cradle to grave."""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def setUp(self):
global story, storyFail
self.story = story
self.storyFail = storyFail
def assert_volume_as_expected(self, volume):
self.assertIsInstance(volume["id"], Number)
self.assertEqual(self.story.volume_name, volume["display_name"])
self.assertEqual(self.story.volume_desc, volume["display_description"])
self.assertEqual(1, volume["size"])
self.assertEqual(self.story.context.user_id, volume["user_id"])
self.assertEqual(self.story.context.project_id, volume["project_id"])
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[initialize.start_volume])
class SetUp(VolumeTest):
def test_05_create_story(self):
"""Creating 'story' vars used by the rest of these tests."""
global story, storyFail
story = StoryDetails()
storyFail = StoryDetails()
@time_out(60)
def test_10_wait_for_topics(self):
"""Wait until the volume topic is up before proceeding."""
topics = ["volume"]
from tests.util.topics import hosts_up
while not all(hosts_up(topic) for topic in topics):
pass
def test_20_refresh_local_folders(self):
"""Delete the local folders used as mount locations if they exist."""
if os.path.exists(LOCAL_MOUNT_PATH):
#TODO(rnirmal): Also need to remove any existing mounts.
shutil.rmtree(LOCAL_MOUNT_PATH)
os.mkdir(LOCAL_MOUNT_PATH)
# Give some time for the services to startup
time.sleep(10)
@time_out(60)
def test_30_mgmt_volume_check(self):
"""Get the volume information from the mgmt API"""
story_context = self.story.context
device_info = self.story.api.get_storage_device_info(story_context)
print("device_info : %r" % device_info)
self.assertNotEqual(device_info, None,
"the storage device information should exist")
self.story.original_device_info = device_info
@time_out(60)
def test_31_mgmt_volume_info(self):
"""Check the available space against the mgmt API info."""
story_context = self.story.context
device_info = self.story.api.get_storage_device_info(story_context)
print("device_info : %r" % device_info)
info = {'spaceTotal': device_info['raw_total'],
'spaceAvail': device_info['raw_avail']}
self._assert_available_space(info)
def _assert_available_space(self, device_info, fail=False):
"""
Give the SAN device_info(fake or not) and get the asserts for free
"""
print("DEVICE_INFO on SAN : %r" % device_info)
# Calculate the GBs; Divide by 2 for the FLAGS.san_network_raid_factor
gbs = 1.0 / 1024 / 1024 / 1024 / 2
total = int(device_info['spaceTotal']) * gbs
free = int(device_info['spaceAvail']) * gbs
used = total - free
usable = total * (FLAGS.san_max_provision_percent * 0.01)
real_free = float(int(usable - used))
print("total : %r" % total)
print("free : %r" % free)
print("used : %r" % used)
print("usable : %r" % usable)
print("real_free : %r" % real_free)
check_space = self.story.api.check_for_available_space
self.assertFalse(check_space(self.story.context, HUGE_VOLUME))
self.assertFalse(check_space(self.story.context, real_free + 1))
if fail:
self.assertFalse(check_space(self.story.context, real_free))
self.assertFalse(check_space(self.story.context, real_free - 1))
self.assertFalse(check_space(self.story.context, 1))
else:
self.assertTrue(check_space(self.story.context, real_free))
self.assertTrue(check_space(self.story.context, real_free - 1))
self.assertTrue(check_space(self.story.context, 1))
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetUp])
class AddVolumeFailure(VolumeTest):
@time_out(60)
def test_add(self):
"""
Make call to FAIL a prov. volume and assert the return value is a
FAILURE.
"""
self.assertIsNone(self.storyFail.volume_id)
name = "TestVolume"
desc = "A volume that was created for testing."
self.storyFail.volume_name = name
self.storyFail.volume_desc = desc
volume = self.storyFail.api.create(self.storyFail.context,
size=HUGE_VOLUME,
snapshot_id=None, name=name,
description=desc)
self.assertEqual(HUGE_VOLUME, volume["size"])
self.assertTrue("creating", volume["status"])
self.assertTrue("detached", volume["attach_status"])
self.storyFail.volume = volume
self.storyFail.volume_id = volume["id"]
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AddVolumeFailure])
class AfterVolumeFailureIsAdded(VolumeTest):
"""Check that the volume can be retrieved via the API, and setup.
All we want to see returned is a list-like with an initial string.
"""
@time_out(120)
def test_api_get(self):
"""Wait until the volume is a FAILURE."""
volume = poll_until(lambda: self.storyFail.get_volume(),
lambda volume: volume["status"] != "creating")
self.assertEqual(volume["status"], "error")
self.assertTrue(volume["attach_status"], "detached")
@time_out(60)
def test_mgmt_volume_check(self):
"""Get the volume information from the mgmt API"""
info = self.story.api.get_storage_device_info(self.story.context)
print("device_info : %r" % info)
self.assertNotEqual(info, None,
"the storage device information should exist")
self.assertEqual(self.story.original_device_info['raw_total'],
info['raw_total'])
self.assertEqual(self.story.original_device_info['raw_avail'],
info['raw_avail'])
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetUp])
class AddVolume(VolumeTest):
@time_out(60)
def test_add(self):
"""Make call to prov. a volume and assert the return value is OK."""
self.assertIsNone(self.story.volume_id)
name = "TestVolume"
desc = "A volume that was created for testing."
self.story.volume_name = name
self.story.volume_desc = desc
volume = self.story.api.create(self.story.context, size=1,
snapshot_id=None, name=name,
description=desc)
self.assert_volume_as_expected(volume)
self.assertTrue("creating", volume["status"])
self.assertTrue("detached", volume["attach_status"])
self.story.volume = volume
self.story.volume_id = volume["id"]
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AddVolume])
class AfterVolumeIsAdded(VolumeTest):
"""Check that the volume can be retrieved via the API, and setup.
All we want to see returned is a list-like with an initial string.
"""
@time_out(120)
def test_api_get(self):
"""Wait until the volume is finished provisioning."""
volume = poll_until(lambda: self.story.get_volume(),
lambda volume: volume["status"] != "creating")
self.assertEqual(volume["status"], "available")
self.assert_volume_as_expected(volume)
self.assertTrue(volume["attach_status"], "detached")
@time_out(60)
def test_mgmt_volume_check(self):
"""Get the volume information from the mgmt API"""
print("self.story.original_device_info : %r" %
self.story.original_device_info)
info = self.story.api.get_storage_device_info(self.story.context)
print("device_info : %r" % info)
self.assertNotEqual(info, None,
"the storage device information should exist")
self.assertEqual(self.story.original_device_info['raw_total'],
info['raw_total'])
volume_size = int(self.story.volume['size']) * (1024 ** 3) * 2
print("volume_size: %r" % volume_size)
print("self.story.volume['size']: %r" % self.story.volume['size'])
avail = int(self.story.original_device_info['raw_avail']) - volume_size
print("avail space: %r" % avail)
self.assertEqual(int(info['raw_avail']), avail)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AfterVolumeIsAdded])
class SetupVolume(VolumeTest):
@time_out(60)
def test_assign_volume(self):
"""Tell the volume it belongs to this host node."""
#TODO(tim.simpson) If this is important, could we add a test to
# make sure some kind of exception is thrown if it
# isn't added to certain drivers?
self.assertNotEqual(None, self.story.volume_id)
self.story.api.assign_to_compute(self.story.context,
self.story.volume_id,
self.story.host)
@time_out(60)
def test_setup_volume(self):
"""Set up the volume on this host. AKA discovery."""
self.assertNotEqual(None, self.story.volume_id)
device = self.story.client._setup_volume(self.story.context,
self.story.volume_id,
self.story.host)
if not isinstance(device, six.string_types):
self.fail("Expected device to be a string, but instead it was " +
str(type(device)) + ".")
self.story.device_path = device
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetupVolume])
class FormatVolume(VolumeTest):
@expect_exception(IOError)
@time_out(60)
def test_10_should_raise_IOError_if_format_fails(self):
"""
Tests that if the driver's _format method fails, its
public format method will perform an assertion properly, discover
it failed, and raise an exception.
"""
volume_driver_cls = import_class(FLAGS.volume_driver)
class BadFormatter(volume_driver_cls):
def _format(self, device_path):
pass
bad_client = volume.Client(volume_driver=BadFormatter())
bad_client._format(self.story.device_path)
@time_out(60)
def test_20_format(self):
self.assertNotEqual(None, self.story.device_path)
self.story.client._format(self.story.device_path)
def test_30_check_options(self):
cmd = ("sudo dumpe2fs -h %s 2> /dev/null | "
"awk -F ':' '{ if($1 == \"Reserved block count\") "
"{ rescnt=$2 } } { if($1 == \"Block count\") "
"{ blkcnt=$2 } } END { print (rescnt/blkcnt)*100 }'")
cmd = cmd % self.story.device_path
out, err = process(cmd)
self.assertEqual(float(5), round(float(out)), msg=out)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[FormatVolume])
class MountVolume(VolumeTest):
@time_out(60)
def test_mount(self):
self.story.client._mount(self.story.device_path,
self.story.mount_point)
with open(self.story.test_mount_file_path, 'w') as file:
file.write("Yep, it's mounted alright.")
self.assertTrue(os.path.exists(self.story.test_mount_file_path))
def test_mount_options(self):
cmd = "mount -l | awk '/%s.*noatime/ { print $1 }'"
cmd %= LOCAL_MOUNT_PATH.replace('/', '')
out, err = process(cmd)
self.assertEqual(os.path.realpath(self.story.device_path), out.strip(),
msg=out)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[MountVolume])
class ResizeVolume(VolumeTest):
@time_out(300)
def test_resize(self):
self.story.api.resize(self.story.context, self.story.volume_id,
self.story.resize_volume_size)
volume = poll_until(lambda: self.story.get_volume(),
lambda volume: volume["status"] == "resized")
self.assertEqual(volume["status"], "resized")
self.assertTrue(volume["attach_status"], "attached")
self.assertTrue(volume['size'], self.story.resize_volume_size)
@time_out(300)
def test_resizefs_rescan(self):
self.story.client.resize_fs(self.story.context,
self.story.volume_id)
expected = "trove.tests.volume.driver.ISCSITestDriver"
if FLAGS.volume_driver is expected:
size = self.story.resize_volume_size * \
test_driver.TESTS_VOLUME_SIZE_MULTIPLIER * 1024 * 1024
else:
size = self.story.resize_volume_size * 1024 * 1024
out, err = process('sudo blockdev --getsize64 %s' %
os.path.realpath(self.story.device_path))
if int(out) < (size * 0.8):
self.fail("Size %s is not more or less %s" % (out, size))
# Reset the volume status to available
self.story.api.update(self.story.context, self.story.volume_id,
{'status': 'available'})
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[MountVolume])
class UnmountVolume(VolumeTest):
@time_out(60)
def test_unmount(self):
self.story.client._unmount(self.story.mount_point)
child = pexpect.spawn("sudo mount %s" % self.story.mount_point)
child.expect("mount: can't find %s in" % self.story.mount_point)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[UnmountVolume])
class GrabUuid(VolumeTest):
@time_out(60)
def test_uuid_must_match_pattern(self):
"""UUID must be hex chars in the form 8-4-4-4-12."""
client = self.story.client # volume.Client()
device_path = self.story.device_path # '/dev/sda5'
uuid = client.get_uuid(device_path)
self.story.original_uuid = uuid
self.assertTrue(is_uuid(uuid), "uuid must match regex")
@time_out(60)
def test_get_invalid_uuid(self):
"""DevicePathInvalidForUuid is raised if device_path is wrong."""
client = self.story.client
device_path = "gdfjghsfjkhggrsyiyerreygghdsghsdfjhf"
self.assertRaises(trove_exception.DevicePathInvalidForUuid,
client.get_uuid, device_path)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[GrabUuid])
class RemoveVolume(VolumeTest):
@time_out(60)
def test_remove(self):
self.story.client.remove_volume(self.story.context,
self.story.volume_id,
self.story.host)
self.assertRaises(Exception,
self.story.client._format, self.story.device_path)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[GrabUuid])
class Initialize(VolumeTest):
@time_out(300)
def test_10_initialize_will_format(self):
"""initialize will setup, format, and store the UUID of a volume"""
self.assertTrue(self.story.get_volume()['uuid'] is None)
self.story.client.initialize(self.story.context, self.story.volume_id,
self.story.host)
volume = self.story.get_volume()
self.assertTrue(is_uuid(volume['uuid']), "uuid must match regex")
self.assertNotEqual(self.story.original_uuid, volume['uuid'],
"Validate our assumption that the volume UUID "
"will change when the volume is formatted.")
self.story.client.remove_volume(self.story.context,
self.story.volume_id,
self.story.host)
@time_out(60)
def test_20_initialize_the_second_time_will_not_format(self):
"""If initialize is called but a UUID exists, it should not format."""
old_uuid = self.story.get_volume()['uuid']
self.assertTrue(old_uuid is not None)
class VolumeClientNoFmt(volume.Client):
def _format(self, device_path):
raise RuntimeError("_format should not be called!")
no_fmt_client = VolumeClientNoFmt()
no_fmt_client.initialize(self.story.context, self.story.volume_id,
self.story.host)
self.assertEqual(old_uuid, self.story.get_volume()['uuid'],
"UUID should be the same as no formatting occurred.")
self.story.client.remove_volume(self.story.context,
self.story.volume_id,
self.story.host)
def test_30_check_device_exists(self):
assert_raises(exception.InvalidDevicePath, self.story.client._format,
self.story.device_path)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[Initialize])
class DeleteVolume(VolumeTest):
@time_out(60)
def test_delete(self):
self.story.api.delete(self.story.context, self.story.volume_id)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[DeleteVolume])
class ConfirmMissing(VolumeTest):
@time_out(60)
def test_discover_should_fail(self):
try:
self.story.client.driver.discover_volume(self.story.context,
self.story.volume)
self.fail("Expecting an error but did not get one.")
except exception.Error:
pass
except trove_exception.ISCSITargetNotDiscoverable:
pass
@time_out(60)
def test_get_missing_volume(self):
try:
volume = poll_until(lambda: self.story.api.get(self.story.context,
self.story.volume_id),
lambda volume: volume["status"] != "deleted")
self.assertEqual(volume["deleted"], False)
except exception.VolumeNotFound:
pass
| apache-2.0 | 7,793,867,989,542,511,000 | 37.416819 | 79 | 0.607452 | false |
pas256/troposphere | troposphere/servicecatalog.py | 1 | 4652 | # Copyright (c) 2012-2018, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import boolean
class AcceptedPortfolioShare(AWSObject):
resource_type = "AWS::ServiceCatalog::AcceptedPortfolioShare"
props = {
'AcceptLanguage': (basestring, False),
'PortfolioId': (basestring, True),
}
class ProvisioningArtifactProperties(AWSProperty):
props = {
'Description': (basestring, False),
'Info': (dict, True),
'Name': (basestring, False),
}
class CloudFormationProduct(AWSObject):
resource_type = "AWS::ServiceCatalog::CloudFormationProduct"
props = {
'AcceptLanguage': (basestring, False),
'Description': (basestring, False),
'Distributor': (basestring, False),
'Name': (basestring, True),
'Owner': (basestring, True),
'ProvisioningArtifactParameters':
([ProvisioningArtifactProperties], True),
'SupportDescription': (basestring, False),
'SupportEmail': (basestring, False),
'SupportUrl': (basestring, False),
'Tags': (Tags, False),
}
class ProvisioningParameter(AWSProperty):
props = {
'Key': (basestring, False),
'Value': (basestring, False),
}
class CloudFormationProvisionedProduct(AWSObject):
resource_type = "AWS::ServiceCatalog::CloudFormationProvisionedProduct"
props = {
'AcceptLanguage': (basestring, False),
'NotificationArns': ([basestring], False),
'PathId': (basestring, False),
'ProductId': (basestring, False),
'ProductName': (basestring, False),
'ProvisionedProductName': (basestring, False),
'ProvisioningArtifactId': (basestring, False),
'ProvisioningArtifactName': (basestring, False),
'ProvisioningParameters': ([ProvisioningParameter], False),
'Tags': (Tags, False),
}
class LaunchNotificationConstraint(AWSObject):
resource_type = "AWS::ServiceCatalog::LaunchNotificationConstraint"
props = {
'AcceptLanguage': (basestring, False),
'Description': (basestring, False),
'NotificationArns': ([basestring], True),
'PortfolioId': (basestring, True),
'ProductId': (basestring, True),
}
class LaunchRoleConstraint(AWSObject):
resource_type = "AWS::ServiceCatalog::LaunchRoleConstraint"
props = {
'AcceptLanguage': (basestring, False),
'Description': (basestring, False),
'PortfolioId': (basestring, True),
'ProductId': (basestring, True),
'RoleArn': (basestring, True),
}
class LaunchTemplateConstraint(AWSObject):
resource_type = "AWS::ServiceCatalog::LaunchTemplateConstraint"
props = {
'AcceptLanguage': (basestring, False),
'Description': (basestring, False),
'PortfolioId': (basestring, True),
'ProductId': (basestring, True),
'Rules': (basestring, True),
}
class Portfolio(AWSObject):
resource_type = "AWS::ServiceCatalog::Portfolio"
props = {
'AcceptLanguage': (basestring, False),
'Description': (basestring, False),
'DisplayName': (basestring, True),
'ProviderName': (basestring, True),
'Tags': (Tags, False),
}
class PortfolioPrincipalAssociation(AWSObject):
resource_type = "AWS::ServiceCatalog::PortfolioPrincipalAssociation"
props = {
'AcceptLanguage': (basestring, False),
'PortfolioId': (basestring, True),
'PrincipalARN': (basestring, True),
'PrincipalType': (basestring, True),
}
class PortfolioProductAssociation(AWSObject):
resource_type = "AWS::ServiceCatalog::PortfolioProductAssociation"
props = {
'AcceptLanguage': (basestring, False),
'PortfolioId': (basestring, True),
'ProductId': (basestring, True),
'SourcePortfolioId': (basestring, False),
}
class PortfolioShare(AWSObject):
resource_type = "AWS::ServiceCatalog::PortfolioShare"
props = {
'AcceptLanguage': (basestring, False),
'AccountId': (basestring, True),
'PortfolioId': (basestring, True),
}
class TagOption(AWSObject):
resource_type = "AWS::ServiceCatalog::TagOption"
props = {
'Active': (boolean, False),
'Key': (basestring, True),
'Value': (basestring, True),
}
class TagOptionAssociation(AWSObject):
resource_type = "AWS::ServiceCatalog::TagOptionAssociation"
props = {
'ResourceId': (basestring, True),
'TagOptionId': (basestring, True),
}
| bsd-2-clause | -6,446,356,054,065,016,000 | 27.193939 | 75 | 0.634136 | false |
robertmattmueller/sdac-compiler | tests/test_normalization.py | 1 | 1628 | try:
# Python 2
from StringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
import pddl
from pddl_to_prolog import Rule, PrologProgram
def test_normalization():
prog = PrologProgram()
prog.add_fact(pddl.Atom("at", ["foo", "bar"]))
prog.add_fact(pddl.Atom("truck", ["bollerwagen"]))
prog.add_fact(pddl.Atom("truck", ["segway"]))
prog.add_rule(Rule([pddl.Atom("truck", ["?X"])], pddl.Atom("at", ["?X", "?Y"])))
prog.add_rule(Rule([pddl.Atom("truck", ["X"]), pddl.Atom("location", ["?Y"])],
pddl.Atom("at", ["?X", "?Y"])))
prog.add_rule(Rule([pddl.Atom("truck", ["?X"]), pddl.Atom("location", ["?Y"])],
pddl.Atom("at", ["?X", "?X"])))
prog.add_rule(Rule([pddl.Atom("p", ["?Y", "?Z", "?Y", "?Z"])],
pddl.Atom("q", ["?Y", "?Y"])))
prog.add_rule(Rule([], pddl.Atom("foo", [])))
prog.add_rule(Rule([], pddl.Atom("bar", ["X"])))
prog.normalize()
output = StringIO()
prog.dump(file=output)
sorted_output = "\n".join(sorted(output.getvalue().splitlines()))
assert sorted_output == """\
Atom @object(bar).
Atom @object(bollerwagen).
Atom @object(foo).
Atom @object(segway).
Atom at(foo, bar).
Atom bar(X).
Atom foo().
Atom truck(bollerwagen).
Atom truck(segway).
none Atom at(?X, ?X@0) :- Atom truck(?X), Atom location(?Y), Atom =(?X, ?X@0).
none Atom at(?X, ?Y) :- Atom truck(?X), Atom @object(?Y).
none Atom at(?X, ?Y) :- Atom truck(X), Atom location(?Y), Atom @object(?X).
none Atom q(?Y, ?Y@0) :- Atom p(?Y, ?Z, ?Y, ?Z), Atom =(?Y, ?Y@0), Atom =(?Y, ?Y@1), Atom =(?Z, ?Z@2)."""
| gpl-3.0 | -3,731,900,976,421,474,300 | 37.761905 | 105 | 0.556511 | false |
jacobajit/ion | intranet/middleware/ldap_db.py | 1 | 1702 | # -*- coding: utf-8 -*-
import logging
from django.contrib import messages
from ..db.ldap_db import LDAPConnection
logger = logging.getLogger(__name__)
class CheckLDAPBindMiddleware:
def process_response(self, request, response):
if not hasattr(request, "user") or "_auth_user_backend" not in request.session or not request.user.is_authenticated():
# Nothing to check if user isn't already logged in
return response
auth_backend = request.session["_auth_user_backend"]
kerberos_backend = "KerberosAuthenticationBackend"
if LDAPConnection().did_use_simple_bind() and auth_backend.startswith(kerberos_backend):
# if request.user.is_eighth_admin:
# logger.info("Simple bind being used: staying logged in because eighth admin.")
# return response
logger.info("LDAP simple bind being used for {}".format(request.user if request.user else None))
messages.error(request, "Access to directory information may be limited: LDAP issue. Try logging out and back in.")
"""
logger.info("Simple bind being used: Destroying kerberos cache and logging out")
try:
kerberos_cache = request.session["KRB5CCNAME"]
os.system("/usr/bin/kdestroy -c " + kerberos_cache)
except KeyError:
pass
logout(request)
response = redirect("login")
url = response["Location"]
response["Location"] = urls.add_get_parameters(
url, {"next": request.path}, percent_encode=False)
return response
"""
return response
| gpl-2.0 | -8,821,804,357,833,348,000 | 38.581395 | 127 | 0.619271 | false |
nwjs/chromium.src | third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations.py | 1 | 13338 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generates a fake TestExpectations file consisting of flaky tests from the bot
corresponding to the give port.
"""
import json
import logging
import os.path
import urllib
import urllib2
from blinkpy.web_tests.models.typ_types import Expectation, ResultType
_log = logging.getLogger(__name__)
class ResultsJSON(object):
"""Contains the contents of a results.json file.
results.json v4 format:
{
'version': 4,
'builder name' : {
'blinkRevision': [],
'tests': {
'directory' { # Each path component is a dictionary.
'testname.html': {
'expected' : 'FAIL', # Expectation name.
'results': [], # Run-length encoded result.
'times': [],
'bugs': [], # Bug URLs.
}
}
}
}
'buildNumbers': [],
'secondsSinceEpoch': [],
'chromeRevision': [],
'failure_map': {} # Map from letter code to expectation name.
}
"""
TESTS_KEY = 'tests'
FAILURE_MAP_KEY = 'failure_map'
RESULTS_KEY = 'results'
EXPECTATIONS_KEY = 'expected'
BUGS_KEY = 'bugs'
RLE_LENGTH = 0
RLE_VALUE = 1
# results.json was originally designed to support
# multiple builders in one json file, so the builder_name
# is needed to figure out which builder this json file
# refers to (and thus where the results are stored)
def __init__(self, builder_name, json_dict):
self.builder_name = builder_name
self._json = json_dict
def _walk_trie(self, trie, parent_path):
for name, value in trie.items():
full_path = os.path.join(parent_path, name)
# FIXME: If we ever have a test directory self.RESULTS_KEY
# ("results"), this logic will break!
if self.RESULTS_KEY not in value:
for path, results in self._walk_trie(value, full_path):
yield path, results
else:
yield full_path, value
def walk_results(self, full_path=''):
tests_trie = self._json[self.builder_name][self.TESTS_KEY]
return self._walk_trie(tests_trie, parent_path='')
def expectation_for_type(self, type_char):
return self._json[self.builder_name][self.FAILURE_MAP_KEY][type_char]
# Knowing how to parse the run-length-encoded values in results.json
# is a detail of this class.
def occurances_and_type_from_result_item(self, item):
return item[self.RLE_LENGTH], item[self.RLE_VALUE]
class BotTestExpectationsFactory(object):
RESULTS_URL_FORMAT = (
'https://test-results.appspot.com/testfile?testtype=webkit_layout_tests'
'&name=results-small.json&master=%s&builder=%s')
def __init__(self, builders):
self.builders = builders
def _results_json_for_port(self, port_name, builder_category):
builder = self.builders.builder_name_for_port_name(port_name)
if not builder:
return None
return self._results_json_for_builder(builder)
def _results_url_for_builder(self, builder):
return self.RESULTS_URL_FORMAT % (
urllib.quote(self.builders.master_for_builder(builder)), urllib.quote(builder))
def _results_json_for_builder(self, builder):
results_url = self._results_url_for_builder(builder)
try:
_log.debug('Fetching flakiness data from appengine: %s', results_url)
return ResultsJSON(builder, json.load(urllib2.urlopen(results_url)))
except urllib2.URLError as error:
_log.warning('Could not retrieve flakiness data from the bot. url: %s', results_url)
_log.warning(error)
def expectations_for_port(self, port_name, builder_category='layout'):
# FIXME: This only grabs release builder's flakiness data. If we're running debug,
# when we should grab the debug builder's data.
# FIXME: What should this do if there is no debug builder for a port, e.g. we have
# no debug XP builder? Should it use the release bot or another Windows debug bot?
# At the very least, it should log an error.
results_json = self._results_json_for_port(port_name, builder_category)
if not results_json:
return None
return BotTestExpectations(results_json, self.builders)
def expectations_for_builder(self, builder):
results_json = self._results_json_for_builder(builder)
if not results_json:
return None
return BotTestExpectations(results_json, self.builders)
class BotTestExpectations(object):
# FIXME: Get this from the json instead of hard-coding it.
RESULT_TYPES_TO_IGNORE = ['N', 'X', 'Y'] # NO_DATA, SKIP, NOTRUN
# TODO(ojan): Remove this once crbug.com/514378 is fixed.
# The JSON can contain results for expectations, not just actual result types.
NON_RESULT_TYPES = ['S', 'X'] # SLOW, SKIP
# specifiers arg is used in unittests to avoid the static dependency on builders.
def __init__(self, results_json, builders, specifiers=None):
self.results_json = results_json
self.specifiers = specifiers or set(builders.specifiers_for_builder(results_json.builder_name))
def flakes_by_path(self, only_ignore_very_flaky):
"""Sets test expectations to bot results if there are at least two distinct results."""
flakes_by_path = {}
for test_path, entry in self.results_json.walk_results():
flaky_types = self._flaky_types_in_results(entry, only_ignore_very_flaky)
if len(flaky_types) <= 1:
continue
flakes_by_path[test_path] = flaky_types
return flakes_by_path
def unexpected_results_by_path(self):
unexpected_results_by_path = {}
for test_path, entry in self.results_json.walk_results():
# Expectations for this test. No expectation defaults to PASS.
exp_string = entry.get(self.results_json.EXPECTATIONS_KEY, ResultType.Pass)
# All run-length-encoded results for this test.
results_dict = entry.get(self.results_json.RESULTS_KEY, {})
# Set of distinct results for this test.
result_types = self._all_types_in_results(results_dict)
# Distinct results as non-encoded strings.
results = map(self.results_json.expectation_for_type, result_types)
# Get test expectations
expectations = exp_string.split(' ')
# Unexpected results will become additional expectations
additional_expectations = [res for res in results if res not in expectations]
if not additional_expectations:
continue
# Get typ expectation result tags
unexpected_results_by_path[test_path] = set(expectations + additional_expectations)
return unexpected_results_by_path
def all_results_by_path(self):
"""Returns all seen result types for each test.
Returns a dictionary from each test path that has a result to a list of distinct, sorted result
strings. For example, if the test results are as follows:
a.html IMAGE IMAGE PASS PASS PASS TIMEOUT PASS TEXT
b.html PASS PASS PASS PASS PASS PASS PASS PASS
c.html
This method will return:
{
'a.html': ['IMAGE', 'TEXT', 'TIMEOUT', 'PASS'],
'b.html': ['PASS'],
}
"""
results_by_path = {}
for test_path, entry in self.results_json.walk_results():
results_dict = entry.get(self.results_json.RESULTS_KEY, {})
result_types = self._all_types_in_results(results_dict)
if not result_types:
continue
# Distinct results as non-encoded strings.
result_strings = map(self.results_json.expectation_for_type, result_types)
results_by_path[test_path] = sorted(result_strings)
return results_by_path
def expectation_lines(self, only_ignore_very_flaky):
lines = []
for test_path, entry in self.results_json.walk_results():
flaky_types = self._flaky_types_in_results(entry, only_ignore_very_flaky)
if len(flaky_types) > 1:
line = self._line_from_test_and_flaky_types(test_path, flaky_types)
lines.append(line)
return lines
def _line_from_test_and_flaky_types(self, test_name, flaky_types):
return Expectation(tags=self.specifiers, test=test_name, results=flaky_types)
def _all_types_in_results(self, run_length_encoded_results):
results = set()
for result_item in run_length_encoded_results:
_, result_types = self.results_json.occurances_and_type_from_result_item(result_item)
for result_type in result_types:
if result_type not in self.RESULT_TYPES_TO_IGNORE:
results.add(result_type)
return results
def _flaky_types_in_results(self, results_entry, only_ignore_very_flaky):
flaky_results = set()
# Always include pass as an expected result. Passes will never turn the bot red.
# This fixes cases where the expectations have an implicit Pass, e.g. [ Slow ].
latest_expectations = [ResultType.Pass]
if self.results_json.EXPECTATIONS_KEY in results_entry:
expectations_list = results_entry[self.results_json.EXPECTATIONS_KEY].split(' ')
latest_expectations.extend(expectations_list)
for result_item in results_entry[self.results_json.RESULTS_KEY]:
_, result_types_str = self.results_json.occurances_and_type_from_result_item(result_item)
result_types = []
for result_type in result_types_str:
# TODO(ojan): Remove this if-statement once crbug.com/514378 is fixed.
if result_type not in self.NON_RESULT_TYPES:
result_types.append(self.results_json.expectation_for_type(result_type))
# It didn't flake if it didn't retry.
if len(result_types) <= 1:
continue
# If the test ran as expected after only one retry, it's not very flaky.
# It's only very flaky if it failed the first run and the first retry
# and then ran as expected in one of the subsequent retries.
# If there are only two entries, then that means it failed on the first
# try and ran as expected on the second because otherwise we'd have
# a third entry from the next try.
if only_ignore_very_flaky and len(result_types) == 2:
continue
has_unexpected_results = False
for result_type in result_types:
# TODO(ojan): We really should be grabbing the expected results from the time
# of the run instead of looking at the latest expected results. That's a lot
# more complicated though. So far we've been looking at the aggregated
# results_small.json off test_results.appspot, which has all the information
# for the last 100 runs. In order to do this, we'd need to look at the
# individual runs' full_results.json, which would be slow and more complicated.
# The only thing we lose by not fixing this is that a test that was flaky
# and got fixed will still get printed out until 100 runs have passed.
if result_type not in latest_expectations:
has_unexpected_results = True
break
if has_unexpected_results:
flaky_results = flaky_results.union(set(result_types))
return flaky_results
| bsd-3-clause | 5,242,296,614,790,762,000 | 41.88746 | 103 | 0.64185 | false |
amarfurt/arr | remote.py | 1 | 1112 | """
Starts the remote control worker.
"""
import os
import logging
import argparse
from workers.controller import Controller
def parse_args():
parser = argparse.ArgumentParser(description='Starts the remote control worker.')
parser.add_argument('--logpath', default=os.path.expanduser('~/logs/arr.log'),
help='Path to logfile.')
parser.add_argument('--loglevel', default='INFO', help='Logging level.')
return parser.parse_args()
def main(args):
# configure logging
logformat = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s'
loglevel = logging.getLevelName(args.loglevel)
logging.basicConfig(filename=args.logpath, format=logformat, level=loglevel)
logging.getLogger('pika').setLevel(logging.WARNING)
log = logging.getLogger('main')
log.info('Starting system...')
# start control worker
log.info('Starting control worker...')
c = Controller('localhost', 'control')
c.start()
c.add_cpu()
log.info('System started')
c.join()
log.info('System stopped')
if __name__ == '__main__':
main(parse_args())
| mit | -6,781,648,225,161,846,000 | 28.263158 | 85 | 0.66277 | false |
mpdavis/python-jose | tests/test_jwk.py | 1 | 5083 | import pytest
from jose import jwk
from jose.backends import AESKey, ECKey, HMACKey, RSAKey
from jose.backends.base import Key
from jose.exceptions import JWKError
hmac_key = {
"kty": "oct",
"kid": "018c0ae5-4d9b-471b-bfd6-eef314bc7037",
"use": "sig",
"alg": "HS256",
"k": "hJtXIZ2uSN5kbQfbtTNWbpdmhkV8FJG-Onbc6mxCcYg",
}
rsa_key = {
"kty": "RSA",
"kid": "[email protected]",
"use": "sig",
"n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw",
"e": "AQAB",
}
ec_key = {
"kty": "EC",
"kid": "[email protected]",
"use": "sig",
"crv": "P-521",
"x": "AHKZLLOsCOzz5cY97ewNUajB957y-C-U88c3v13nmGZx6sYl_oJXu9A5RkTKqjqvjyekWF-7ytDyRXYgCF5cj0Kt",
"y": "AdymlHvOiLxXkEhayXQnNCvDX4h9htZaCJN34kfmC6pV5OhQHiraVySsUdaQkAgDPrwQrJmbnX9cwlGfP-HqHZR1",
}
class TestJWK:
def test_interface(self):
key = jwk.Key("key", "ALG")
with pytest.raises(NotImplementedError):
key.sign("")
with pytest.raises(NotImplementedError):
key.verify("", "")
@pytest.mark.skipif(RSAKey is None, reason="RSA is not available")
def test_invalid_hash_alg(self):
with pytest.raises(JWKError):
key = HMACKey(hmac_key, "RS512")
with pytest.raises(JWKError):
key = RSAKey(rsa_key, "HS512")
with pytest.raises(JWKError):
key = ECKey(ec_key, "RS512") # noqa: F841
@pytest.mark.skipif(RSAKey is None, reason="RSA is not available")
def test_invalid_jwk(self):
with pytest.raises(JWKError):
key = HMACKey(rsa_key, "HS256")
with pytest.raises(JWKError):
key = RSAKey(hmac_key, "RS256")
with pytest.raises(JWKError):
key = ECKey(rsa_key, "ES256") # noqa: F841
@pytest.mark.skipif(RSAKey is None, reason="RSA is not available")
def test_RSAKey_errors(self):
rsa_key = {
"kty": "RSA",
"kid": "[email protected]",
"use": "sig",
"n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw",
"e": "AQAB",
}
with pytest.raises(JWKError):
key = RSAKey(rsa_key, "HS256")
rsa_key = {
"kty": "oct",
"kid": "[email protected]",
"use": "sig",
"n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw",
"e": "AQAB",
}
with pytest.raises(JWKError):
key = RSAKey(rsa_key, "RS256") # noqa: F841
def test_construct_from_jwk(self):
hmac_key = {
"kty": "oct",
"kid": "018c0ae5-4d9b-471b-bfd6-eef314bc7037",
"use": "sig",
"alg": "HS256",
"k": "hJtXIZ2uSN5kbQfbtTNWbpdmhkV8FJG-Onbc6mxCcYg",
}
key = jwk.construct(hmac_key)
assert isinstance(key, jwk.Key)
def test_construct_EC_from_jwk(self):
key = ECKey(ec_key, algorithm="ES512")
assert isinstance(key, jwk.Key)
def test_construct_from_jwk_missing_alg(self):
hmac_key = {
"kty": "oct",
"kid": "018c0ae5-4d9b-471b-bfd6-eef314bc7037",
"use": "sig",
"k": "hJtXIZ2uSN5kbQfbtTNWbpdmhkV8FJG-Onbc6mxCcYg",
}
with pytest.raises(JWKError):
key = jwk.construct(hmac_key)
with pytest.raises(JWKError):
key = jwk.construct("key", algorithm="NONEXISTENT") # noqa: F841
def test_get_key(self):
hs_key = jwk.get_key("HS256")
assert hs_key == HMACKey
assert issubclass(hs_key, Key)
if RSAKey is not None:
assert issubclass(jwk.get_key("RS256"), Key)
assert issubclass(jwk.get_key("ES256"), Key)
assert jwk.get_key("NONEXISTENT") is None
@pytest.mark.skipif(AESKey is None, reason="No AES provider")
def test_get_aes_key(self):
assert issubclass(jwk.get_key("A256CBC-HS512"), Key)
def test_register_key(self):
assert jwk.register_key("ALG", jwk.Key)
assert jwk.get_key("ALG") == jwk.Key
with pytest.raises(TypeError):
assert jwk.register_key("ALG", object)
| mit | -2,887,430,949,809,869,000 | 34.298611 | 362 | 0.645485 | false |
migasfree/migasfree | migasfree/catalog/migrations/0003_4_14_packages_by_project.py | 1 | 2349 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import migasfree.server.models.common
class Migration(migrations.Migration):
dependencies = [
('server', '0022_4_14_computers'),
('catalog', '0002_4_14_versions'),
]
operations = [
migrations.CreateModel(
name='PackagesByProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('packages_to_install', models.TextField(blank=True, verbose_name='packages to install')),
],
options={
'verbose_name': 'Packages by Project',
'verbose_name_plural': 'Packages by Projects',
'permissions': (('can_save_packagesbyproject', 'Can save packages by project'),),
},
bases=(models.Model, migasfree.server.models.common.MigasLink),
),
migrations.AlterField(
model_name='application',
name='name',
field=models.CharField(max_length=50, unique=True, verbose_name='name'),
),
migrations.AlterUniqueTogether(
name='application',
unique_together=set([]),
),
migrations.AddField(
model_name='packagesbyproject',
name='application',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='catalog.Application', verbose_name='application',
related_name='packages_by_project'
),
),
migrations.AddField(
model_name='packagesbyproject',
name='project',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='server.Project', verbose_name='project'
),
),
migrations.RemoveField(
model_name='application',
name='packages_to_install',
),
migrations.RemoveField(
model_name='application',
name='project',
),
migrations.AlterUniqueTogether(
name='packagesbyproject',
unique_together={('application', 'project')},
),
]
| gpl-3.0 | -6,319,951,951,391,928,000 | 33.043478 | 114 | 0.553427 | false |
robertostling/bnas | bnas/model.py | 1 | 50808 | """Network models and submodels.
The :class:`Model` class is used to encapsulate a set of Theano shared
variables (model parameters), and can create symbolic expressions for model
outputs and loss functions.
This module also contains subclasses, such as :class:`Linear`, that function
as building blocks for more complex networks.
"""
from collections import OrderedDict
import pickle
import sys
import numpy as np
import theano
from theano.ifelse import ifelse
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano import tensor as T
from . import init
from . import search
from .fun import train_mode, function
from .utils import expand_to_batch, softmax_masked, softmax_3d, softmax_4d
class Model:
"""Base class for neural network models.
Attributes
----------
name : str
Name of the model.
params : OrderedDict of str -> :class:`theano.compile.sharedvalue.SharedVariable`
Mapping from parameter names to Theano shared variables. Note that
submodel parameters are not included, so this should normally not be
accessed directly, rather use `self.parameters()`.
regularization : list of Theano symbolic expressions
These expressions should all be added to the loss function when
optimizing. Use `self.regularize()` to modify.
"""
def __init__(self, name):
"""Initialize an empty model.
Parameters
----------
name : str
Name of the model.
"""
self.name = name
self.params = OrderedDict()
self.regularization = []
self.submodels = OrderedDict()
def loss(self):
"""Part of the loss function that is independent of inputs."""
terms = [submodel.loss() for submodel in self.submodels.values()] \
+ self.regularization
return sum(terms, T.as_tensor_variable(0.0))
def parameters(self, include_submodels=True):
"""Iterate over the parameters of this model and its submodels.
Each value produced by the iterator is a tuple (name, value), where
the name is a tuple of strings describing the hierarchy of submodels,
e.g. ('hidden', 'b'), and the value is a Theano shared variable.
Parameters
----------
include_submodels : bool
If ``True`` (default), also iterate over submodel parameters.
"""
for name, p in self.params.items():
yield ((name,), p)
if include_submodels:
for submodel in self.submodels.values():
for name, p in submodel.parameters():
yield ((submodel.name,) + name, p)
def summarize(self, grads, f=sys.stdout):
def tensor_stats(m):
return ', '.join([
'norm = %g' % np.sqrt((m*m).sum()),
'maxabs = %g' % np.abs(m).max(),
'minabs = %g' % np.abs(m).min()])
def summarize_parameter(name, p, g):
p_stats = tensor_stats(p)
g_stats = tensor_stats(g)
print('%s\n parameter %s\n gradient %s' % (
name, p_stats, g_stats),
file=f)
params = list(self.parameters())
assert len(grads) == len(params)
for (name, p), grad in zip(params, grads):
summarize_parameter('.'.join(name), p.get_value(), grad)
f.flush()
def parameters_list(self, include_submodels=True):
"""Return a list with parameters, without their names."""
return list(p for name, p in
self.parameters(include_submodels=include_submodels))
def parameter(self, name):
"""Return the parameter with the given name.
Parameters
----------
name : tuple of str
Path to variable, e.g. ('hidden', 'b') to find the parameter 'b'
in the submodel 'hidden'.
Returns
-------
value : :class:`theano.compile.sharedvalue.SharedVariable`
"""
if not isinstance(name, tuple):
raise TypeError('Expected tuple, got %s' % type(name))
if len(name) == 1:
return self.params[name[0]]
elif len(name) >= 2:
return self.submodels[name[0]].parameter(name[1:])
else:
raise ValueError('Name tuple must not be empty!')
def parameter_count(self):
"""Return the total number of parameters of the model."""
return sum(p.get_value(borrow=True).size for _,p in self.parameters())
def param(self, name, dims, init_f=None,
value=None, dtype=theano.config.floatX):
"""Create a new parameter, or share an existing one.
Parameters
----------
name : str
Name of parameter, this will be used directly in `self.params`
and used to create `self._name`.
dims : tuple
Shape of the parameter vector.
value : :class:`theano.compile.sharedvalue.SharedVariable`, optional
If this parameter should be shared, a SharedVariable instance can
be passed here.
init_f : (tuple => numpy.ndarray)
Function used to initialize the parameter vector.
dtype : str or numpy.dtype
Data type (default is `theano.config.floatX`)
Returns
-------
p : :class:`theano.compile.sharedvalue.SharedVariable`
"""
if name in self.params:
if not value is None:
raise ValueError('Trying to add a shared parameter (%s), '
'but a parameter with the same name already '
'exists in %s!' % (name, self.name))
return self.params[name]
if value is None:
if init_f is None:
raise ValueError('Creating new parameter, but no '
'initialization specified!')
p = theano.shared(init_f(dims, dtype=dtype), name=name)
self.params[name] = p
else:
p = value
setattr(self, '_'+name, p)
return p
def regularize(self, p, regularizer):
"""Add regularization to a parameter.
Parameters
----------
p : :class:`theano.compile.sharedvalue.SharedVariable`
Parameter to apply regularization
regularizer : function
Regularization function, which should return a symbolic
expression.
"""
if not regularizer is None:
self.regularization.append(regularizer(p))
def add(self, submodel):
"""Import parameters from a submodel.
If a submodel named "hidden" has a parameter "b", it will be imported
as "hidden_b", also accessible as `self._hidden_b`.
Parameters
----------
submodel : :class:`.Model`
Returns
-------
submodel : :class:`.Model`
Equal to the parameter, for convenience.
"""
if submodel.name in self.submodels:
raise ValueError('Submodel with name %s already exists in %s!' % (
submodel.name, self.name))
self.submodels[submodel.name] = submodel
setattr(self, submodel.name, submodel)
return submodel
def save(self, f, include_submodels=True):
"""Save the parameter values of this model to a file object.
Parameters
----------
f : file
File object to write to, assumed to be opened in 'wb' mode.
include_submodels : bool
If ``True`` (default), also save submodel parameters.
"""
pickle.dump({name: p.get_value(borrow=True)
for name, p in self.parameters(
include_submodels=include_submodels)},
f, -1)
def load(self, f, allow_incomplete=False, allow_unused=False):
"""Load (some) weights of this model from a file object.
Parameters
----------
f : file
File object to read from, assumeb to be opened in 'rb' mode.
allow_incomplete : bool
If ``False``, throw a `ValueError` if some model parameters are
missing in the file.
allow_unused : bool
If ``False``, throw a `ValueError` if the file contains model
parameters that are not used in this model.
"""
data = pickle.load(f)
parameters = dict(self.parameters())
names = frozenset(data.keys()) & frozenset(parameters.keys())
if not allow_incomplete and len(names) < len(parameters):
diff = sorted(frozenset(parameters.keys()) - names)
raise ValueError(
'The following parameters are missing: %s' % ', '.join(
'.'.join(t) for t in diff))
if not allow_unused and len(names) < len(data):
diff = sorted(frozenset(data.keys()) - names)
raise ValueError(
'The following parameters are unused: %s' % ', '.join(
'.'.join(t) for t in diff))
for name in names:
value = data[name]
old_value = parameters[name].get_value(borrow=True)
if value.shape != old_value.shape:
raise ValueError(
'Loaded shape is %s but %s expected' % (
value.shape, old_value.shape))
parameters[name].set_value(value)
def compile(self, *args):
return function(list(args), self(*args))
class Linear(Model):
"""Fully connected linear layer.
This layer creates one shared parameter, `w` of shape
`(input_dims, output_dims)` if `use_bias` is ``False``, otherwise it
also creates `name_b` of shape `output_dims` for biases.
Parameters
----------
name : str
Name of layer.
input_dims : int
Number of inputs.
output_dims : int
Number of outputs.
w : :class:`theano.compile.sharedvalue.SharedVariable`
Weight vector to use, or pass ``None`` (default) to create a new
one.
w_init : :class:`.init.InitializationFunction`
Initialization for weight vector, in case `w` is ``None``.
w_regularizer : :class:`.regularize.Regularizer`, optional
Regularization for weight matrix.
b : :class:`theano.compile.sharedvalue.SharedVariable`
Bias vector to use, or pass ``None`` (default) to create a new
one.
b_init : :class:`.init.InitializationFunction`
Initialization for bias vector, in case `b` is ``None``.
b_regularizer : :class:`.regularize.Regularizer`, optional
Regularization for biases.
use_bias : bool
If ``False``, no bias is used and the `b` and `b_init` parameters
are ignored.
dropout : float
Dropout factor (the default value of 0 means dropout is not used).
layernorm : bool
If ``True``, layer normalization is used on the activations.
"""
def __init__(self, name, input_dims, output_dims,
w=None, w_init=None, w_regularizer=None,
b=None, b_init=None, b_regularizer=None,
use_bias=True, dropout=0, layernorm=False):
super().__init__(name)
self.input_dims = input_dims
self.output_dims = output_dims
self.use_bias = use_bias
self.dropout = dropout
self.layernorm = layernorm
if w_init is None: w_init = init.Gaussian(fan_in=input_dims)
if b_init is None: b_init = init.Constant(0.0)
self.param('w', (input_dims, output_dims), init_f=w_init, value=w)
self.regularize(self._w, w_regularizer)
if use_bias:
self.param('b', (output_dims,), init_f=b_init, value=b)
self.regularize(self._b, b_regularizer)
if dropout:
self.add(Dropout('dropout', dropout))
if layernorm:
self.add(LayerNormalization('ln', (None, output_dims)))
def __call__(self, inputs):
outputs = T.dot(inputs, self._w)
if self.layernorm: outputs = self.ln(outputs)
if self.use_bias: outputs = outputs + self._b
if self.dropout: outputs = self.dropout(outputs)
return outputs
class Embeddings(Model):
"""Embeddings layer.
This layer creates one shared parameter, `w` of shape
`(alphabet_size, embedding_dims)`.
Parameters
----------
name : str
Name of layer.
alphabet_size : int
Size of symbol alphabet.
embedding_dims : int
Dimensionality of embeddings.
w : :class:`theano.compile.sharedvalue.SharedVariable`
Weight vector to use, or pass ``None`` (default) to create a new
one.
w_init : :class:`.init.InitializationFunction`
Initialization for weight vector, in case `w` is ``None``.
w_regularizer : :class:`.regularize.Regularizer`, optional
Regularization for weight matrix.
dropout : float
Dropout factor (the default value of 0 means dropout is not used).
"""
def __init__(self, name, alphabet_size, embedding_dims,
w=None, w_init=None, w_regularizer=None,
dropout=0):
super().__init__(name)
self.embedding_dims = embedding_dims
self.alphabet_size = alphabet_size
self.dropout = dropout
if w_init is None: w_init = init.Gaussian(fan_in=embedding_dims)
self.param('w',
(alphabet_size, embedding_dims), init_f=w_init, value=w)
self.regularize(self._w, w_regularizer)
if dropout:
self.add(Dropout('dropout', dropout, sequence=True))
def __call__(self, inputs):
outputs = self._w[inputs]
if self.dropout: outputs = self.dropout(outputs)
return outputs
class Conv1D(Model):
"""1D convolution layer with linear activations.
The input shape is assumed to be (batch_size, length, dims).
"""
def __init__(self, name, input_dims, output_dims,
filter_dims=3, stride=1,
f=None, f_init=None, f_regularizer=None,
b=None, b_init=None, b_regularizer=None):
super().__init__(name)
if f_init is None:
f_init = init.Gaussian(fan_in=filter_dims*input_dims)
if b_init is None:
b_init = init.Constant(0.0)
self.stride = stride
self.input_dims = input_dims
self.f_shape = (output_dims, input_dims, filter_dims, 1)
self.param('f', self.f_shape, init_f=f_init)
self.param('b', (output_dims,), init_f=b_init)
def __call__(self, inputs, inputs_mask):
x = T.nnet.conv2d(
(inputs * inputs_mask.dimshuffle(0,1,'x')
).dimshuffle(0,2,1,'x'),
self._f,
input_shape=(None, self.input_dims, None, 1),
filter_shape=self.f_shape,
border_mode='half',
subsample=(self.stride, 1),
filter_flip=True)
batch_size = inputs.shape[0]
length = inputs.shape[1]
dims = inputs.shape[2]
x = x.reshape((batch_size, dims, length)).dimshuffle(0,2,1)
return x + self._b.dimshuffle('x','x',0)
class LSTM(Model):
"""Long Short-Term Memory.
name : str
Name of layer.
input_dims : int
Length of each vector in the input sequence.
state_dims : int
Size of internal states. An LSTM contains two states, each of the will
be of size state_dims.
attention_dims : int
If specified, use attention and let this be the size of the hidden
attention state.
attented_dims : int
Dimensionality of the sequence to have attention on.
layernorm : str
One of `'ba1'` (eq 20--22 of Ba et al.), `'ba2'` (eq 29--31) or
`False` (no layer normalization).
"""
def __init__(self, name, input_dims, state_dims,
w=None, w_init=None, w_regularizer=None,
u=None, u_init=None, u_regularizer=None,
b=None, b_init=None, b_regularizer=None,
attention_dims=None, attended_dims=None,
layernorm=False, contextgate=False):
super().__init__(name)
assert layernorm in (False, 'ba1', 'ba2')
assert (attention_dims is None) == (attended_dims is None)
assert not (contextgate and (attention_dims is None))
self.n_states = 2
if attended_dims is not None:
if not contextgate:
input_dims += attended_dims
self.input_dims = input_dims
self.state_dims = state_dims
self.layernorm = layernorm
self.attention_dims = attention_dims
self.attended_dims = attended_dims
self.use_attention = attention_dims is not None
self.use_contextgate = contextgate
if w_init is None: w_init = init.Gaussian(fan_in=input_dims)
if u_init is None: u_init = init.Concatenated(
[init.Orthogonal()]*4, axis=1)
if b_init is None: b_init = init.Concatenated(
[init.Constant(x) for x in [0.0, 1.0, 0.0, 0.0]])
if self.use_contextgate:
self.param('wzg', (input_dims, state_dims*2),
init_f=init.Gaussian(fan_in=input_dims))
self.param('uzg', (state_dims, state_dims*2),
init_f=init.Concatenated([init.Orthogonal()]*2, axis=1))
self.param('bzg', (state_dims*2,), init_f=init.Constant(0.0))
self.param('czs', (attended_dims, state_dims*2),
init_f=init.Gaussian(fan_in=attended_dims))
self.param('bs', (state_dims,), init_f=init.Constant(0.0))
self.param('w', (state_dims, state_dims*4), init_f=w_init, value=w)
self.param('u', (state_dims, state_dims*4), init_f=u_init, value=u)
self.param('b', (state_dims*4,), init_f=b_init, value=b)
else:
self.param('w', (input_dims, state_dims*4), init_f=w_init, value=w)
self.param('u', (state_dims, state_dims*4), init_f=u_init, value=u)
self.param('b', (state_dims*4,), init_f=b_init, value=b)
if self.use_attention:
self.add(Linear('attention_u', attended_dims, attention_dims))
self.param('attention_w', (state_dims, attention_dims),
init_f=init.Gaussian(fan_in=state_dims))
self.param('attention_v', (attention_dims,),
init_f=init.Gaussian(fan_in=attention_dims))
self.regularize(self._attention_w, w_regularizer)
if layernorm == 'ba1':
self.add(LayerNormalization('ln_a', (None, attention_dims)))
self.regularize(self._w, w_regularizer)
self.regularize(self._u, u_regularizer)
self.regularize(self._b, b_regularizer)
if layernorm == 'ba1':
self.add(LayerNormalization('ln_1', (None, state_dims*4)))
self.add(LayerNormalization('ln_2', (None, state_dims*4)))
if layernorm:
self.add(LayerNormalization('ln_h', (None, state_dims)))
def __call__(self, inputs, h_tm1, c_tm1,
attended=None, attended_dot_u=None, attention_mask=None):
if self.use_attention:
# Non-precomputed part of the attention vector for this time step
# _ x batch_size x attention_dims
h_dot_w = T.dot(h_tm1, self._attention_w)
if self.layernorm == 'ba1': h_dot_w = self.ln_a(h_dot_w)
h_dot_w = h_dot_w.dimshuffle('x',0,1)
# Attention vector, with distributions over the positions in
# attended. Elements that fall outside the sentence in each batch
# are set to zero.
# sequence_length x batch_size
# Note that attention.T is returned
attention = softmax_masked(
T.dot(
T.tanh(attended_dot_u + h_dot_w),
self._attention_v).T,
attention_mask.T).T
# Compressed attended vector, weighted by the attention vector
# batch_size x attended_dims
compressed = (attended * attention.dimshuffle(0,1,'x')).sum(axis=0)
# Append the compressed vector to the inputs and continue as usual
if not self.use_contextgate:
inputs = T.concatenate([inputs, compressed], axis=1)
else:
zg = (T.dot(inputs, self._wzg) + T.dot(h_tm1, self._uzg) +
self._bzg.dimshuffle('x', 0))
zs = T.dot(compressed, self._czs)
def part(m,i):
return m[:, i*self.state_dims:(i+1)*self.state_dims]
z = T.nnet.sigmoid(part(zg,0) + part(zs,0))
g = part(zg,1)
s = part(zs,1) + self._bs.dimshuffle('x', 0)
inputs = z*s + (1-z)*g
if self.layernorm == 'ba1':
x = (self.ln_1(T.dot(inputs, self._w)) +
self.ln_2(T.dot(h_tm1, self._u)))
else:
x = T.dot(inputs, self._w) + T.dot(h_tm1, self._u)
x = x + self._b.dimshuffle('x', 0)
def x_part(i): return x[:, i*self.state_dims:(i+1)*self.state_dims]
i = T.nnet.sigmoid(x_part(0))
f = T.nnet.sigmoid(x_part(1))
o = T.nnet.sigmoid(x_part(2))
c = T.tanh( x_part(3))
c_t = f*c_tm1 + i*c
h_t = o*T.tanh(self.ln_h(c_t) if self.layernorm else c_t)
if self.use_attention:
return h_t, c_t, attention.T
else:
return h_t, c_t
class LSTMSequence(Model):
def __init__(self, name, backwards, *args,
dropout=0, trainable_initial=False, offset=0, **kwargs):
super().__init__(name)
self.backwards = backwards
self.trainable_initial = trainable_initial
self.offset = offset
self._step_fun = None
self._attention_u_fun = None
self.add(Dropout('dropout', dropout))
self.add(LSTM('gate', *args, **kwargs))
if self.trainable_initial:
self.param('h_0', (self.gate.state_dims,),
init_f=init.Gaussian(fan_in=self.gate.state_dims))
self.param('c_0', (self.gate.state_dims,),
init_f=init.Gaussian(fan_in=self.gate.state_dims))
def step(self, inputs, inputs_mask, h_tm1, c_tm1, h_mask, *non_sequences):
if self.gate.use_attention:
# attended is the
# src_sequence_length x batch_size x attention_dims
# matrix which we have attention on.
#
# attended_dot_u is the h_t-independent part of the final
# attention vectors, which is precomputed for efficiency.
#
# attention_mask is a binary mask over the valid elements of
# attended, which in practice is the same as the mask passed to
# the encoder that created attended. Size
# src_sequence_length x batch_size
h_t, c_t, attention = self.gate(
inputs, h_tm1 * h_mask.astype(theano.config.floatX), c_tm1,
attended=non_sequences[0],
attended_dot_u=non_sequences[1],
attention_mask=non_sequences[2])
return (T.switch(inputs_mask.dimshuffle(0, 'x'), h_t, h_tm1),
T.switch(inputs_mask.dimshuffle(0, 'x'), c_t, c_tm1),
attention)
else:
h_t, c_t = self.gate(
inputs, h_tm1 * h_mask.astype(theano.config.floatX), c_tm1)
return (T.switch(inputs_mask.dimshuffle(0, 'x'), h_t, h_tm1),
T.switch(inputs_mask.dimshuffle(0, 'x'), c_t, c_tm1))
def step_fun(self):
if self._step_fun is None:
inputs = T.matrix('inputs')
h_tm1 = T.matrix('h_tm1')
c_tm1 = T.matrix('c_tm1')
if self.gate.use_attention:
attended=T.tensor3('attended')
attended_dot_u=T.tensor3('attended_dot_u')
attention_mask=T.matrix('attention_mask')
self._step_fun = function(
[inputs, h_tm1, c_tm1,
attended, attended_dot_u, attention_mask],
self.step(inputs, T.ones(inputs.shape[:-1]),
h_tm1, c_tm1, T.ones_like(h_tm1),
attended, attended_dot_u, attention_mask),
name='%s_step_fun'%self.name)
else:
self._step_fun = function(
[inputs, h_tm1, c_tm1],
self.step(inputs, T.ones(inputs.shape[:-1]),
h_tm1, c_tm1, T.ones_like(h_tm1)),
name='%s_step_fun'%self.name)
return self._step_fun
def attention_u_fun(self):
assert self.gate.use_attention
if self._attention_u_fun is None:
attended = T.tensor3('attended')
self._attention_u_fun = function(
[attended], self.gate.attention_u(attended),
name='%s_attention_u_fun'%self.name)
return self._attention_u_fun
def search(self, predict_fun, embeddings,
start_symbol, stop_symbol, max_length,
h_0=None, c_0=None, attended=None, attention_mask=None,
beam_size=4):
if self.gate.use_attention:
attended_dot_u = self.attention_u_fun()(attended)
if self.trainable_initial:
if h_0 is None:
h_0 = self._h_0.get_value()[None,:]
if c_0 is None:
c_0 = self._c_0.get_value()[None,:]
def step(i, states, outputs, outputs_mask):
if self.gate.use_attention:
result = self.step_fun()(
embeddings[outputs[-1]], states[0], states[1],
attended, attended_dot_u, attention_mask)
else:
result = self.step_fun()(
embeddings[outputs[-1]], states[0], states[1])
h_t, c_t = result[:2]
return [h_t, c_t], predict_fun(h_t)
return search.beam(
step, [h_0, c_0], h_0.shape[0], start_symbol, stop_symbol,
max_length, beam_size=beam_size)
def __call__(self, inputs, inputs_mask, h_0=None, c_0=None,
attended=None, attention_mask=None):
if self.trainable_initial:
batch_size = inputs.shape[1]
if h_0 is None:
h_0 = expand_to_batch(self._h_0, batch_size)
if c_0 is None:
c_0 = expand_to_batch(self._c_0, batch_size)
attention_info = []
if self.gate.use_attention:
attention_info = [attended, self.gate.attention_u(attended),
attention_mask]
dropout_masks = [self.dropout.mask(h_0.shape)]
seqs, _ = theano.scan(
fn=self.step,
go_backwards=self.backwards,
sequences=[{'input': inputs, 'taps': [self.offset]},
{'input': inputs_mask, 'taps': [self.offset]}],
outputs_info=[h_0, c_0] + \
[None]*(1 if self.gate.use_attention else 0),
non_sequences=dropout_masks + attention_info + \
self.gate.parameters_list())
if self.backwards:
return tuple(seq[::-1] for seq in seqs)
else:
return seqs
class Sequence(Model):
def __init__(self, name, gate_type, backwards, *args,
dropout=0, trainable_initial=False, offset=0, **kwargs):
super().__init__(name)
self.backwards = backwards
self.trainable_initial = trainable_initial
self.offset = offset
self._step_fun = None
self._attention_u_fun = None
self.add(Dropout('dropout', dropout))
self.add(gate_type('gate', *args, **kwargs))
if self.trainable_initial:
for state in range(self.gate.n_states):
self.param('state_%d_0' % state, (self.gate.state_dims,),
init_f=init.Gaussian(fan_in=self.gate.state_dims))
def step(self, inputs, inputs_mask, *args):
states_tm1 = args[:self.gate.n_states]
h_mask = args[self.gate.n_states]
non_sequences = args[self.gate.n_states+1:]
# TODO: currently assume that dropout is applied only to states[0]
# through h_mask (which is passed through non_sequences and
# constant at each time step)
if self.gate.use_attention:
# attended is the
# src_sequence_length x batch_size x attention_dims
# matrix which we have attention on.
#
# attended_dot_u is the h_t-independent part of the final
# attention vectors, which is precomputed for efficiency.
#
# attention_mask is a binary mask over the valid elements of
# attended, which in practice is the same as the mask passed to
# the encoder that created attended. Size
# src_sequence_length x batch_size
states_attention = self.gate(
inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]),
attended=non_sequences[0],
attended_dot_u=non_sequences[1],
attention_mask=non_sequences[2])
states_t = states_attention[:-1]
attention = states_attention[-1]
return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1)
) + (attention,)
else:
states_t = self.gate(
inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]))
return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1))
def step_fun(self):
if self._step_fun is None:
inputs = T.matrix('inputs')
states_tm1 = [T.matrix('state_%d_tm1' % state)
for state in range(self.gate.n_states)]
if self.gate.use_attention:
attended=T.tensor3('attended')
attended_dot_u=T.tensor3('attended_dot_u')
attention_mask=T.matrix('attention_mask')
self._step_fun = function(
[inputs] + states_tm1 + [
attended, attended_dot_u, attention_mask],
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0]),
attended, attended_dot_u,
attention_mask])),
name='%s_step_fun'%self.name)
else:
self._step_fun = function(
[inputs] + states_tm1,
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0])])),
name='%s_step_fun'%self.name)
return self._step_fun
def attention_u_fun(self):
assert self.gate.use_attention
if self._attention_u_fun is None:
attended = T.tensor3('attended')
self._attention_u_fun = function(
[attended], self.gate.attention_u(attended),
name='%s_attention_u_fun'%self.name)
return self._attention_u_fun
def search(self, predict_fun, embeddings,
start_symbol, stop_symbol, max_length,
states_0=None, attended=None, attention_mask=None,
fixed=None,
beam_size=4):
if self.gate.use_attention:
attended_dot_u = self.attention_u_fun()(attended)
if self.trainable_initial:
if states_0 is None:
states_0 = [
getattr(self, '_state_%d_0' % state).get_value()[None,:]
for state in range(self.gate.n_states)]
def step(i, states, outputs, outputs_mask):
inputs = embeddings[outputs[-1]]
# TODO: is this the best way to add extra arguments?
if fixed is not None:
inputs = np.concatenate(
[inputs, fixed[None,:].repeat(0, axis=-1)],
axis=-1)
if self.gate.use_attention:
result = self.step_fun()(
*([inputs] + states + [
attended, attended_dot_u, attention_mask]))
else:
result = self.step_fun()(
*([inputs] + states))
states = result[:self.gate.n_states]
# NOTE: state[0] hard-coded
return states, predict_fun(states[0])
return search.beam(
step, states_0, states_0[0].shape[0],
start_symbol, stop_symbol,
max_length, beam_size=beam_size)
def __call__(self, inputs, inputs_mask, states_0=None,
attended=None, attention_mask=None):
if self.trainable_initial:
batch_size = inputs.shape[1]
if states_0 is None:
states_0 = [
expand_to_batch(getattr(self, '_state_%d_0' % state),
batch_size)
for state in range(self.gate.n_states)]
attention_info = []
if self.gate.use_attention:
attention_info = [attended, self.gate.attention_u(attended),
attention_mask]
dropout_masks = [self.dropout.mask(states_0[0].shape)]
seqs, _ = theano.scan(
fn=self.step,
go_backwards=self.backwards,
sequences=[{'input': inputs, 'taps': [self.offset]},
{'input': inputs_mask, 'taps': [self.offset]}],
outputs_info=list(states_0) + \
[None]*(1 if self.gate.use_attention else 0),
non_sequences=dropout_masks + attention_info + \
self.gate.parameters_list())
if self.backwards:
return tuple(seq[::-1] for seq in seqs)
else:
return seqs
# TODO: need to re-think how to handle attention in stacked models
class StackedSequence(Model):
def __init__(self, name, gate_type, backwards, n_layers,
input_dims, state_dims, *args,
dropout=0, trainable_initial=False, offset=0,
use_attention=False,
layer_fixed_size=None, **kwargs):
super().__init__(name)
self.backwards = backwards
self.trainable_initial = trainable_initial
self.offset = offset
self.n_layers = n_layers
self.layer_fixed_size = layer_fixed_size
self._step_fun = None
self._attention_u_fun = None
self.add(Dropout('dropout', dropout))
self.gates = []
for layer in range(n_layers):
total_input_dims = state_dims
if layer == 0:
total_input_dims += input_dims
if layer_fixed_size is not None:
total_input_dims += layer_fixed_size[layer]
gate = gate_type(
'gate%d' % layer,
total_input_dims,
state_dims,
*args,
**kwargs)
self.add(gate)
self.gates.append(gate)
if self.trainable_initial:
for state in range(self.gate0.n_states):
self.param('state_%d_%d_0' % (layer, state),
(self.gate0.state_dims,),
init_f=init.Gaussian(
fan_in=self.gate0.state_dims))
def step(self, inputs, inputs_mask, *args):
total_states = self.gate0.n_states*self.n_layers
layer_states_tm1 = [
args[layer*self.gate0.n_states:(layer+1)*self.gate0.n_states]
for layer in range(self.n_layers)]
n = total_states
h_mask = args[n]
n += 1
layer_fixed = None
if self.layer_fixed_size is not None:
layer_fixed = args[n:n+self.n_layers+1]
n += self.n_layers+1
non_sequences = args[n:]
layer_states_t = []
#states_tm1 = args[:self.gate.n_states]
#h_mask = args[self.gate.n_states]
#non_sequences = args[self.gate.n_states+1:]
# TODO: currently assume that dropout is applied only to states[0]
# through h_mask (which is passed through non_sequences and
# constant at each time step)
if self.gates[-1].use_attention:
raise NotImplementedError('Stacked RNN with attention')
# attended is the
# src_sequence_length x batch_size x attention_dims
# matrix which we have attention on.
#
# attended_dot_u is the h_t-independent part of the final
# attention vectors, which is precomputed for efficiency.
#
# attention_mask is a binary mask over the valid elements of
# attended, which in practice is the same as the mask passed to
# the encoder that created attended. Size
# src_sequence_length x batch_size
states_attention = self.gate(
inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]),
attended=non_sequences[0],
attended_dot_u=non_sequences[1],
attention_mask=non_sequences[2])
states_t = states_attention[:-1]
attention = states_attention[-1]
return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1)
) + (attention,)
else:
for layer in range(self.n_layers):
states_tm1 = layer_states_tm1[layer]
total_inputs = inputs if layer == 0 else layer_states_t[-1][0]
if layer_fixed is not None:
total_inputs = T.concatenate(
[total_inputs, layer_fixed[layer].repeat(
inputs.shape[0], axis=0)],
axis=-1)
states_t = getattr(self, 'gate%d' % layer)(
total_inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]))
layer_states_t.append(states_t)
return tuple(
T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for states_t, states_tm1 in zip(
layer_states_t,
layer_states_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1))
#states_t = self.gate(
# inputs,
# *((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
# states_tm1[1:]))
#return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
# for s_t, s_tm1 in zip(states_t, states_tm1))
def step_fun(self):
if self._step_fun is None:
inputs = T.matrix('inputs')
states_tm1 = [T.matrix('state_%d_%d_tm1' % (layer, state))
for layer in range(self.n_layers)
for state in range(self.gate0.n_states)]
if self.gates[-1].use_attention:
raise NotImplementedError('Stacked RNN with attention')
attended=T.tensor3('attended')
attended_dot_u=T.tensor3('attended_dot_u')
attention_mask=T.matrix('attention_mask')
self._step_fun = function(
[inputs] + states_tm1 + [
attended, attended_dot_u, attention_mask],
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0]),
attended, attended_dot_u,
attention_mask])),
name='%s_step_fun'%self.name)
else:
self._step_fun = function(
[inputs] + states_tm1,
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0])])),
name='%s_step_fun'%self.name)
return self._step_fun
def attention_u_fun(self):
assert self.gates[-1].use_attention
if self._attention_u_fun is None:
attended = T.tensor3('attended')
self._attention_u_fun = function(
[attended], self.gates[-1].attention_u(attended),
name='%s_attention_u_fun'%self.name)
return self._attention_u_fun
def search(self, predict_fun, embeddings,
start_symbol, stop_symbol, max_length,
layer_states_0=None, attended=None, attention_mask=None,
layer_fixed=None,
beam_size=4):
if self.gates[-1].use_attention:
attended_dot_u = self.attention_u_fun()(attended)
if self.trainable_initial:
if layer_states_0 is None:
layer_states_0 = [
getattr(self, '_state_%d_%d_0' % state).get_value()[None,:]
for layer in range(self.n_layers)
for state in range(self.gate0.n_states)]
def step(i, states, outputs, outputs_mask):
inputs = embeddings[outputs[-1]]
# TODO: need to give sizes of fixed arguments ...
# TODO: is this the best way to add extra arguments?
if layer_fixed is not None and layer_fixed[0] is not None:
# TODO: wasn't this buggy anyway? Why repeat(0, ...) ?
inputs = np.concatenate(
[inputs, layer_fixed[0][None,:]],
axis=-1)
if self.gates[-1].use_attention:
raise NotImplementedError('Stacked RNN with attention')
result = self.step_fun()(
*([inputs] + states + [
attended, attended_dot_u, attention_mask]))
else:
result = self.step_fun()(
*([inputs] + states))
states = result[:self.n_layers*self.gate0.n_states]
# NOTE: state[0] of the last layer hard-coded
return states, predict_fun(
states[(self.n_layers-1)*self.gate0.n_states])
return search.beam(
step, layer_states_0, layer_states_0[0][0].shape[0],
start_symbol, stop_symbol,
max_length, beam_size=beam_size)
def __call__(self, inputs, inputs_mask, layer_states_0=None,
attended=None, attention_mask=None):
if self.trainable_initial:
batch_size = inputs.shape[1]
if layer_states_0 is None:
layer_states_0 = [
expand_to_batch(getattr(self, '_state_%d_%d_0' % (
layer, state)),
batch_size)
for layer in range(self.n_layers)
for state in range(self.gate0.n_states)]
attention_info = []
if self.gates[-1].use_attention:
attention_info = [attended, self.gates[-1].attention_u(attended),
attention_mask]
dropout_masks = [self.dropout.mask(layer_states_0[0].shape)]
seqs, _ = theano.scan(
fn=self.step,
go_backwards=self.backwards,
sequences=[{'input': inputs, 'taps': [self.offset]},
{'input': inputs_mask, 'taps': [self.offset]}],
outputs_info=list(layer_states_0) + \
[None]*(1 if self.gate0.use_attention else 0),
non_sequences=dropout_masks + attention_info + \
sum([gate.parameters_list()
for gate in self.gates], []))
if self.backwards:
return tuple(seq[::-1] for seq in seqs)
else:
return seqs
class Dropout(Model):
"""Dropout layer.
name : str
Name of layer.
dropout : float
Dropout factor (equivalent to 1 - retention probability)
sequence : bool
If True, dropout is not performed on the last dimension. This is
useful for e.g. embedded symbol sequences, where either a symbol is
kept intact or it is completely zeroed out.
"""
def __init__(self, name, dropout, sequence=False):
super().__init__(name)
self.p = 1.0 - dropout
self.rng = RandomStreams()
self.sequence = sequence
def mask(self, shape):
"""Return a scaled mask for a (symbolic) shape.
This can be used for dropout in recurrent layers, where a fixed mask
is passed through the non_sequences argument to theano.scan().
"""
if self.p == 1: return T.ones(shape)
if self.sequence:
m = T.shape_padright(self.rng.binomial(shape[:-1], p=self.p)
).astype(theano.config.floatX)
else:
m = self.rng.binomial(shape, p=self.p).astype(theano.config.floatX)
return m / self.p
def __call__(self, inputs):
if self.p == 1: return inputs
m = self.mask(inputs.shape)
return ifelse(train_mode, inputs * m, inputs)
class LayerNormalization(Model):
"""Layer Normalization (Ba, Kiros and Hinton 2016)."""
def __init__(self, name, inputs_shape, g_init=None, axis=-1, epsilon=1e-6):
super().__init__(name)
self.inputs_shape = inputs_shape
self.axis = axis
self.epsilon = epsilon
if g_init is None: g_init = init.Constant(1.0)
self.param('g', (inputs_shape[self.axis],), init_f=g_init)
def __call__(self, inputs):
broadcast = ['x']*len(self.inputs_shape)
broadcast[self.axis] = 0
mean = inputs.mean(axis=self.axis, keepdims=True).astype(
theano.config.floatX)
std = inputs.std(axis=self.axis, keepdims=True).astype(
theano.config.floatX)
normed = (inputs - mean) / (std + self.epsilon)
return normed * self._g.dimshuffle(*broadcast)
class LinearSelection(Model):
def __init__(self, name, input_dims, output_dims, selector_dims,
parallel_dims,
w=None, w_init=None, w_regularizer=None,
b=None, b_init=None, b_regularizer=None,
sw=None, sw_init=None,
sb=None, sb_init=None,
input_select=False,
use_bias=True, dropout=0, layernorm=False):
super().__init__(name)
self.input_dims = input_dims
self.output_dims = output_dims
self.selector_dims = selector_dims
self.parallel_dims = parallel_dims
self.use_bias = use_bias
self.dropout = dropout
self.layernorm = layernorm
self.input_select = input_select
s_dims = selector_dims + (input_dims if input_select else 0)
if w_init is None: w_init = init.Gaussian(fan_in=input_dims)
if b_init is None: b_init = init.Constant(0.0)
if sw_init is None: sw_init = init.Gaussian(fan_in=s_dims)
if sb_init is None: sb_init = init.Constant(0.0)
self.param('w', (input_dims, output_dims*parallel_dims),
init_f=w_init, value=w)
self.regularize(self._w, w_regularizer)
if use_bias:
self.param('b', (output_dims*parallel_dims,),
init_f=b_init, value=b)
self.regularize(self._b, b_regularizer)
self.param('sw', (s_dims, output_dims*parallel_dims),
init_f=sw_init)
self.param('sb', (output_dims*parallel_dims,),
init_f=sb_init)
if dropout:
self.add(Dropout('dropout', dropout))
if layernorm:
self.add(LayerNormalization('ln', (None, output_dims)))
def __call__(self, inputs, selector, sequence=False):
par = T.dot(inputs, self._w)
if self.use_bias: par = par + self._b
if sequence:
par = par.reshape((par.shape[0], par.shape[1],
self.output_dims, self.parallel_dims))
else:
par = par.reshape((par.shape[0],
self.output_dims, self.parallel_dims))
# Note that par might be a 3D or 4D tensor, while sel is always 3D
if self.input_select and sequence:
# ...except if we condition on the input
selector = T.concatenate([
inputs,
T.repeat(selector.dimshuffle('x',0,1), inputs.shape[0],
axis=0)],
axis=-1)
sel = T.dot(selector, self._sw) + self._sb
sel = sel.reshape(
(sel.shape[0], sel.shape[1],
self.output_dims, self.parallel_dims))
sel = softmax_4d(sel)
outputs = (par * sel).sum(axis=-1)
else:
if self.input_select:
selector = T.concatenate([inputs, selector], axis=-1)
sel = T.dot(selector, self._sw) + self._sb
sel = sel.reshape(
(sel.shape[0], self.output_dims, self.parallel_dims))
sel = softmax_3d(sel)
if sequence:
outputs = (par * sel.dimshuffle('x',0,1,2)).sum(axis=-1)
else:
outputs = (par * sel).sum(axis=-1)
if self.layernorm: outputs = self.ln(outputs)
if self.dropout: outputs = self.dropout(outputs)
return outputs
| gpl-3.0 | -8,664,384,897,296,081,000 | 40.040388 | 85 | 0.531078 | false |
Exirel/python-xmlunittest | test.py | 1 | 55350 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import unittest
from lxml import etree
from xmlunittest import XmlTestCase
DEFAULT_NS = 'https://www.w3.org/XML'
TEST_NS = 'https://docs.python.org/3.4/library/unittest.html'
class TestXmlTestCase(unittest.TestCase):
"""Test the XmlTestCase.
The issue of testing TestCase can be tough as only error case can be
tested. So it is important to know what you really want to test.
XmlTestCase is developped using TDD: tests are written before functionnal
code. For each successful case a related error case is tested too.
"""
def test_assertXmlDocument(self):
"""Asserts assertXmlDocument raises when data is invalid.
At this time, assertXmlDocument only test XML data is a valid XML
document, but it can be a fragment of XML only. This does not test
the XML declaration nor any doctype declaration.
"""
test_case = XmlTestCase(methodName='assertXmlDocument')
data = b"""<root/>"""
root = test_case.assertXmlDocument(data)
self.assertIsInstance(root, etree._Element)
with self.assertRaises(test_case.failureException):
test_case.assertXmlDocument('not an XML document')
def test_assertXmlDocument_with_encoding(self):
"""Asserts assertXmlDocument works with utf-8 and other encoding."""
test_case = XmlTestCase(methodName='assertXmlDocument')
# utf-8
data = """<?xml version="1.0" encoding="UTF-8" ?>
<root>àéèçßù</root>"""
root = test_case.assertXmlDocument(data.encode('utf-8'))
self.assertIsInstance(root, etree._Element)
# Check we can raise AssertionError with this document to check
# formatting of error messages
with self.assertRaises(test_case.failureException):
test_case.assertXpathsExist(root, ['//something'])
# iso-8859-15
data = """<?xml version="1.0" encoding="iso-8859-15" ?>
<root>àéèçßù</root>"""
root = test_case.assertXmlDocument(data.encode('iso-8859-15'))
self.assertIsInstance(root, etree._Element)
# Check we can raise AssertionError with this document to check
# formatting of error messages
with self.assertRaises(test_case.failureException):
test_case.assertXpathsExist(root, ['//something'])
# -------------------------------------------------------------------------
def test_assertXmlPartial(self):
"""Asserts assertXmlPartial raises when data is invalid.
Method assertXmlPartial must be able to take a partial XML formated
string and returns a valid XML document, or raise an error.
"""
test_case = XmlTestCase(methodName='assertXmlPartial')
data = b"""<partial>1</partial>
<partial>2</partial>"""
root = test_case.assertXmlPartial(data)
self.assertIsInstance(root, etree._Element)
self.assertEqual(root.tag, test_case.default_partial_tag)
self.assertEqual(len(root), 2)
with self.assertRaises(test_case.failureException):
test_case.assertXmlPartial(b'<invalidChar>&</invalidChar>')
with self.assertRaises(test_case.failureException):
test_case.assertXmlPartial(b'not even a partial XML document')
with self.assertRaises(test_case.failureException):
test_case.assertXmlPartial(b'<missingEndTag>')
def test_assertXmlPartial_name(self):
"""Asserts assertXmlPartial raises when data is invalid.
Method assertXmlPartial accept a `root_tag` parameter to tell
method the root element's tag name.
"""
test_case = XmlTestCase(methodName='assertXmlPartial')
data = b"""<partial>1</partial>
<partial>2</partial>"""
root = test_case.assertXmlPartial(data, root_tag='customTag')
self.assertIsInstance(root, etree._Element)
self.assertEqual(root.tag, 'customTag')
self.assertEqual(len(root), 2)
with self.assertRaises(test_case.failureException):
test_case.assertXmlPartial(b'<invalidChar>&</invalidChar>',
root_tag='customTag')
with self.assertRaises(test_case.failureException):
test_case.assertXmlPartial(b'not even a partial XML document',
root_tag='customTag')
with self.assertRaises(test_case.failureException):
test_case.assertXmlPartial(b'<missingEndTag>',
root_tag='customTag')
# -------------------------------------------------------------------------
def test_assertXmlNamespace(self):
"""Asserts assertXmlNamespace raises namespace is invalid.
When an element declare an xml namespace, this element and each child
reference this namespace, and thus it can be tested.
"""
test_case = XmlTestCase(methodName='assertXmlNamespace')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns:ns="uri"/>"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlNamespace(root, 'ns', 'uri')
with self.assertRaises(test_case.failureException):
test_case.assertXmlNamespace(root, 'wrong_ns', 'uri')
with self.assertRaises(test_case.failureException):
test_case.assertXmlNamespace(root, 'ns', 'wrong_uri')
# -------------------------------------------------------------------------
def test_assertXmlHasAttribute(self):
"""Asserts assertXmlHasAttribute raises when attribute does not exist.
Method assertXmlHasAttribute can test if attribute exists or not, and
more - see other tests for that.
"""
test_case = XmlTestCase(methodName='assertXmlHasAttribute')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root att="value" />"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlHasAttribute(root, 'att')
with self.assertRaises(test_case.failureException):
test_case.assertXmlHasAttribute(root, 'no_att')
def test_assertXmlHasAttribute_value(self):
"""Asserts assertXmlHasAttribute raises when value is invalid.
With optional argument `expected_value`, assertXmlHasAttribute can
assert if attribute's value is the given expected value.
"""
test_case = XmlTestCase(methodName='assertXmlHasAttribute')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root att="value" />"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlHasAttribute(root, 'att', expected_value='value')
with self.assertRaises(test_case.failureException):
test_case.assertXmlHasAttribute(root, 'att',
expected_value='invalid')
def test_assertXmlHasAttribute_values(self):
"""Asserts assertXmlHasAttribute raises when value is invalid.
With optional argument `expected_values`, assertXmlHasAttribute can
assert if attribute's value is one of the given expected values.
"""
test_case = XmlTestCase(methodName='assertXmlHasAttribute')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>
<child att="1"/>
<child att="3"/>
</root>"""
root = test_case.assertXmlDocument(data)
for node in root.xpath('//child'):
test_case.assertXmlHasAttribute(node, 'att',
expected_values=['1', '3'])
with self.assertRaises(test_case.failureException):
test_case.assertXmlHasAttribute(node, 'att',
expected_values=['2', '4'])
# -------------------------------------------------------------------------
def test_assertXmlNode(self):
"""Asserts assertXmlNode raises when node is invalid.
Method assertXmlNode raise if node does not exists (None) or is not
an XML Element.
"""
test_case = XmlTestCase(methodName='assertXmlNode')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>text_value</root>"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlNode(root)
with self.assertRaises(test_case.failureException):
test_case.assertXmlNode(None)
# Text data is not handled
with self.assertRaises(test_case.failureException):
test_case.assertXmlNode('<root>text_value</root>')
def test_assertXmlNode_tag(self):
"""Asserts assertXmlNode raises when node is invalid.
Method assertXmlNode raise if node has not the expected tag name.
"""
test_case = XmlTestCase(methodName='assertXmlNode')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>text_value</root>"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlNode(root, tag='root')
with self.assertRaises(test_case.failureException):
test_case.assertXmlNode(root, tag='noRoot')
def test_assertXmlNode_text(self):
"""Asserts assertXmlNode raises when node is invalid.
Method assertXmlNode raise if node has not the expected text value.
"""
test_case = XmlTestCase(methodName='assertXmlNode')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>text_value</root>"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlNode(root, text='text_value')
with self.assertRaises(test_case.failureException):
test_case.assertXmlNode(root, text='invalid')
def test_assertXmlNode_tag_text(self):
"""Asserts assertXmlNode raises when node is invalid.
Method assertXmlNode raises if node has not the expected tag name
or the expected text value.
"""
test_case = XmlTestCase(methodName='assertXmlNode')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>text_value</root>"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlNode(root, tag='root', text='text_value')
with self.assertRaises(test_case.failureException):
test_case.assertXmlNode(root, tag='root', text='invalid')
with self.assertRaises(test_case.failureException):
test_case.assertXmlNode(root, tag='noRoot', text='text_value')
with self.assertRaises(test_case.failureException):
test_case.assertXmlNode(root, tag='noRoot', text='invalid')
def test_assertXmlNode_text_in(self):
"""Asserts assertXmlNode raises when node is invalid.
Method assertXmlNode raises if node's text value is not in the list
of valid values.
"""
test_case = XmlTestCase(methodName='assertXmlNode')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>valid</root>"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlNode(root, text_in=['valid', 'ok'])
with self.assertRaises(test_case.failureException):
test_case.assertXmlNode(root, text_in=['invalid', 'ok'])
# -------------------------------------------------------------------------
def test_assertXpathsExist(self):
"""Asserts assertXpathsExist raises when validation failed.
Method assertXpathsExist raises when any xpath does not select a least
one result.
"""
test_case = XmlTestCase(methodName='assertXpathsExist')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root att="exists">
<sub subAtt="input"/>
<sub/>
</root>"""
root = test_case.assertXmlDocument(data)
xpaths = ['@att', './sub', './sub[@subAtt="input"]']
test_case.assertXpathsExist(root, xpaths)
with self.assertRaises(test_case.failureException):
test_case.assertXpathsExist(root, ['@invalidAtt'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsExist(root, ['./invalidChild'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsExist(root, ['./sub[@subAtt="invalid"]'])
def test_assertXpathsExist_namespaces_default_prefix(self):
"""Asserts assertXpathsExist works with default namespaces."""
test_case = XmlTestCase(methodName='assertXpathsExist')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root att="exists" xmlns="https://www.w3.org/XML">
<sub subAtt="input"/>
<sub/>
</root>"""
root = test_case.assertXmlDocument(data)
xpaths = ['@att', './ns:sub', './ns:sub[@subAtt="input"]']
test_case.assertXpathsExist(root, xpaths)
with self.assertRaises(test_case.failureException):
test_case.assertXpathsExist(root, ['@invalidAtt'])
with self.assertRaises(test_case.failureException):
# Without the namespace prefix, it does not work
test_case.assertXpathsExist(root, ['./sub'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsExist(root, ['./ns:invalidChild'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsExist(root, ['./ns:sub[@subAtt="invalid"]'])
def test_assertXpathsExist_namespaces_custom_prefix(self):
"""Asserts assertXpathsExist works with custom default namespaces."""
test_case = XmlTestCase(methodName='assertXpathsExist')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root att="exists" xmlns="https://www.w3.org/XML">
<sub subAtt="input"/>
<sub/>
</root>"""
root = test_case.assertXmlDocument(data)
# With a custom default prefix
xpaths = ['@att', './custom:sub', './custom:sub[@subAtt="input"]']
test_case.assertXpathsExist(root, xpaths, default_ns_prefix='custom')
with self.assertRaises(test_case.failureException):
test_case.assertXpathsExist(root,
['@invalidAtt'],
default_ns_prefix='custom')
with self.assertRaises(test_case.failureException):
# Without the namespace prefix, it does not work
test_case.assertXpathsExist(root,
['./sub'],
default_ns_prefix='custom')
with self.assertRaises(test_case.failureException):
# With the wrong namespace it does not work either
test_case.assertXpathsExist(root,
['./ns:sub'],
default_ns_prefix='custom')
with self.assertRaises(test_case.failureException):
test_case.assertXpathsExist(root,
['./custom:invalidChild'],
default_ns_prefix='custom')
with self.assertRaises(test_case.failureException):
test_case.assertXpathsExist(root,
['./custom:sub[@subAtt="invalid"]'],
default_ns_prefix='custom')
def test_assertXpathsExist_namespaces(self):
"""Asserts assertXpathsExist works with namespaces."""
test_case = XmlTestCase(methodName='assertXpathsExist')
data = """<?xml version="1.0" encoding="UTF-8" ?>
<root att="exists" xmlns="%s" xmlns:test="%s">
<sub subAtt="DEFAULT_ATT" test:subAtt="NODE_NS-ATT"/>
<sub/>
<test:sub subAtt="NS-NODE_ATT" />
<test:sub test:subAtt="NS-NODE_NS-ATT" />
</root>""" % (DEFAULT_NS, TEST_NS)
root = test_case.assertXmlDocument(data.encode('utf-8'))
xpaths = [
# attribute without namespace
'@att',
# node with default namespace with a namespaced attribute
'./ns:sub[@test:subAtt="NODE_NS-ATT"]',
# namespaced node
'./test:sub',
# namespaced node with non-namespaced attribute
'./test:sub[@subAtt="NS-NODE_ATT"]',
# namespaced node with namespaced attribute
'./test:sub[@test:subAtt="NS-NODE_NS-ATT"]']
test_case.assertXpathsExist(root, xpaths)
with self.assertRaises(test_case.failureException):
# This attribute does not exist with such namespace
test_case.assertXpathsExist(root, ['@test:att'])
with self.assertRaises(test_case.failureException):
# This node with this attribute does not have this value,
# only the namespaced attribute of this node has this value.
test_case.assertXpathsExist(root, ['./ns:sub[@subAtt="NODE_NS-ATT"]'])
with self.assertRaises(test_case.failureException):
# We just make sure we use XPath properly and we don't hack the
# XML document with "ignore all namespaces". We are respectful of
# namespaces.
test_case.assertXpathsExist(root, ['./ns:sub[@test:subAtt="DEFAULT_ATT"]'])
with self.assertRaises(test_case.failureException):
# Really, we don't mess with namespaces.
test_case.assertXpathsExist(root, ['./ns:sub[@ns:subAtt="DEFAULT_ATT"]'])
# -------------------------------------------------------------------------
def test_assertXpathsOnlyOne(self):
"""Asserts assertXpathsOnlyOne raises when validation failed.
Method assertXpathsOnlyOne raises when one of XPath
expressions does not select one and exactly one result.
"""
test_case = XmlTestCase(methodName='assertXpathsOnlyOne')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>
<sub subAtt="unique" id="1" />
<sub subAtt="notUnique" id="2"/>
<sub subAtt="notUnique" id="3"/>
<uniqueSub/>
</root>"""
root = test_case.assertXmlDocument(data)
unique_for_each = ['./uniqueSub',
'./sub[@subAtt="unique"]']
test_case.assertXpathsOnlyOne(root, unique_for_each)
with self.assertRaises(test_case.failureException):
test_case.assertXpathsOnlyOne(root, ['./invalidChild'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsOnlyOne(root, ['./sub[@subAtt="notUnique"]'])
def test_assertXpathsOnlyOne_namespaces_default_prefix(self):
"""Asserts assertXpathsOnlyOne works with default namespace prefix"""
test_case = XmlTestCase(methodName='assertXpathsOnlyOne')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="https://www.w3.org/XML">
<sub subAtt="unique" id="1" />
<sub subAtt="notUnique" id="2"/>
<sub subAtt="notUnique" id="3"/>
<uniqueSub/>
</root>"""
root = test_case.assertXmlDocument(data)
unique_for_each = ['./ns:uniqueSub',
'./ns:sub[@subAtt="unique"]']
test_case.assertXpathsOnlyOne(root, unique_for_each)
with self.assertRaises(test_case.failureException):
test_case.assertXpathsOnlyOne(root, ['./ns:invalidChild'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsOnlyOne(root, ['./ns:sub[@subAtt="notUnique"]'])
def test_assertXpathsOnlyOne_namespaces_custom_prefix(self):
"""Asserts assertXpathsOnlyOne works with custom namespace prefix"""
test_case = XmlTestCase(methodName='assertXpathsOnlyOne')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="https://www.w3.org/XML">
<sub subAtt="unique" id="1" />
<sub subAtt="notUnique" id="2"/>
<sub subAtt="notUnique" id="3"/>
<uniqueSub/>
</root>"""
root = test_case.assertXmlDocument(data)
unique_for_each = ['./custom:uniqueSub',
'./custom:sub[@subAtt="unique"]']
test_case.assertXpathsOnlyOne(root,
unique_for_each,
default_ns_prefix='custom')
with self.assertRaises(test_case.failureException):
test_case.assertXpathsOnlyOne(root,
['./custom:invalidChild'],
default_ns_prefix='custom')
with self.assertRaises(test_case.failureException):
# Wrong namespace: the node exists but not with this namespace.
# That's why namespaces exist after all.
test_case.assertXpathsOnlyOne(root,
['./ns:uniqueSub'],
default_ns_prefix='custom')
with self.assertRaises(test_case.failureException):
test_case.assertXpathsOnlyOne(root, ['./custom:sub[@subAtt="notUnique"]'])
def test_assertXpathsOnlyOne_namespaces(self):
"""Asserts assertXpathsOnlyOne works with namespace"""
test_case = XmlTestCase(methodName='assertXpathsOnlyOne')
data = """<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="%s" xmlns:test="%s">
<sub subAtt="unique" id="1" />
<sub subAtt="notUnique" id="2"/>
<sub subAtt="notUnique" id="3"/>
<test:sub subAtt="notUnique" id="2"/>
<test:sub subAtt="notUnique" id="3"/>
<sub test:subAtt="unique" id="1" />
<uniqueSub/>
<test:uniqueSub/>
</root>""" % (DEFAULT_NS, TEST_NS)
root = test_case.assertXmlDocument(data.encode('utf-8'))
unique_for_each = ['./ns:sub[@subAtt="unique"]',
'./ns:sub[@test:subAtt="unique"]',
'./ns:uniqueSub',
'./test:uniqueSub']
test_case.assertXpathsOnlyOne(root, unique_for_each)
with self.assertRaises(test_case.failureException):
# ns:sub appears multiple time with subAtt == notUnique
test_case.assertXpathsOnlyOne(root, [
'./ns:sub[@subAtt="notUnique"]'
])
with self.assertRaises(test_case.failureException):
# test:sub appears multiple time with subAtt == notUnique
test_case.assertXpathsOnlyOne(root, [
'./test:sub[@subAtt="notUnique"]'
])
# -------------------------------------------------------------------------
def test_assertXpathsUniqueValue(self):
"""Asserts assertXpathsUniqueValue raises when validation failed.
Method assertXpathsUniqueValue raises when one of XPath expression
select does not returns unique results.
"""
test_case = XmlTestCase(methodName='assertXpathsUniqueValue')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>
<sub subAtt="unique" id="1">unique 1</sub>
<sub subAtt="notUnique" id="2">unique 2</sub>
<sub subAtt="notUnique" id="3">unique 3</sub>
<multiple>twice</multiple>
<multiple>twice</multiple>
</root>"""
root = test_case.assertXmlDocument(data)
test_case.assertXpathsUniqueValue(root, ['./sub/@id',
'./sub/text()'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsUniqueValue(root, ['./sub/@subAtt'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsUniqueValue(root, ['./multiple/text()'])
def test_assertXpathsUniqueValue_namespaces_default_prefix(self):
"""Asserts assertXpathsUniqueValue works with default namespace prefix."""
test_case = XmlTestCase(methodName='assertXpathsUniqueValue')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="http://www.w3c.org/XML">
<sub subAtt="unique" id="1">unique 1</sub>
<sub subAtt="notUnique" id="2">unique 2</sub>
<sub subAtt="notUnique" id="3">unique 3</sub>
<multiple>twice</multiple>
<multiple>twice</multiple>
</root>"""
root = test_case.assertXmlDocument(data)
test_case.assertXpathsUniqueValue(root,
['./ns:sub/@id', './ns:sub/text()'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsUniqueValue(root, ['./ns:sub/@subAtt'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsUniqueValue(root, ['./ns:multiple/text()'])
def test_assertXpathsUniqueValue_namespaces_custom_prefix(self):
"""Asserts assertXpathsUniqueValue works with custom namespace prefix.
"""
test_case = XmlTestCase(methodName='assertXpathsUniqueValue')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="http://www.w3c.org/XML">
<sub subAtt="unique" id="1">unique 1</sub>
<sub subAtt="notUnique" id="2">unique 2</sub>
<sub subAtt="notUnique" id="3">unique 3</sub>
<multiple>twice</multiple>
<multiple>twice</multiple>
</root>"""
root = test_case.assertXmlDocument(data)
test_case.assertXpathsUniqueValue(root,
['./custom:sub/@id',
'./custom:sub/text()'],
default_ns_prefix='custom')
with self.assertRaises(test_case.failureException):
test_case.assertXpathsUniqueValue(root, ['./custom:sub/@subAtt'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsUniqueValue(root,
['./custom:multiple/text()'])
def test_assertXpathsUniqueValue_namespaces(self):
"""Asserts assertXpathsUniqueValue works with namespace."""
test_case = XmlTestCase(methodName='assertXpathsUniqueValue')
data = """<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="%s" xmlns:test="%s">
<sub subAtt="unique" id="1">unique 1</sub>
<sub subAtt="notUnique" id="2">unique 2</sub>
<sub subAtt="notUnique" id="3">unique 3</sub>
<test:sub subAtt="unique" id="1">unique 1</test:sub>
<test:sub subAtt="notUnique" id="2">unique 2</test:sub>
<test:sub subAtt="notUnique" id="3">unique 3</test:sub>
<multiple>twice</multiple>
<multiple>twice</multiple>
<test:multiple>twice</test:multiple>
<test:multiple>twice</test:multiple>
</root>""" % (DEFAULT_NS, TEST_NS)
root = test_case.assertXmlDocument(data.encode('utf-8'))
# Note: the default namespace and test namespace create different nodes
# so their values and attributes are *not* in the same group.
# This is how XML namespaces work.
test_case.assertXpathsUniqueValue(root, [
# Node with default namespace: attribute ID
'./ns:sub/@id',
# Node with default namespace: text
'./ns:sub/text()',
# Node with "test" namespace: attribute ID
'./test:sub/@id',
# Node with "test" namespace: text
'./test:sub/text()',
])
with self.assertRaises(test_case.failureException):
# Not unique attribute subAtt on ns:sub
test_case.assertXpathsUniqueValue(root, ['./ns:sub/@subAtt'])
with self.assertRaises(test_case.failureException):
# Not unique text value of ns:multiple
test_case.assertXpathsUniqueValue(root,
['./ns:multiple/text()'])
with self.assertRaises(test_case.failureException):
# Not unique attribute subAtt on test:sub
test_case.assertXpathsUniqueValue(root, ['./test:sub/@subAtt'])
with self.assertRaises(test_case.failureException):
# Not unique text value of test:multiple
test_case.assertXpathsUniqueValue(root,
['./test:multiple/text()'])
# -------------------------------------------------------------------------
def test_assertXpathValues(self):
"""Asserts assertXpathValues raises when validation failed.
Method assertXpathValues raises when not each XPath expression's result
is in the expected values.
"""
test_case = XmlTestCase(methodName='assertXpathValues')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>
<sub id="1">a</sub>
<sub id="2">a</sub>
<sub id="3">b</sub>
<sub id="4">c</sub>
</root>"""
root = test_case.assertXmlDocument(data)
test_case.assertXpathValues(root, './sub/@id', ['1', '2', '3', '4'])
test_case.assertXpathValues(root, './sub/text()', ['a', 'b', 'c'])
# This pass because the XPath expression returns 0 element.
# So "all" the existing values are one of the expected values.
# One should use assertXpathsExist instead
test_case.assertXpathValues(root, './absentSub/@id', ['1', '2'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathValues(root, './sub/@id', ['1', '2'])
with self.assertRaises(test_case.failureException):
test_case.assertXpathValues(root, './sub/text()', ['a', 'b'])
def test_assertXpathValues_namespaces_default_prefix(self):
"""Asserts assertXpathValues works with default namespaces."""
test_case = XmlTestCase(methodName='assertXpathValues')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="http://www.w3c.org/XML">
<sub id="1">a</sub>
<sub id="2">a</sub>
<sub id="3">b</sub>
<sub id="4">c</sub>
</root>"""
root = test_case.assertXmlDocument(data)
test_case.assertXpathValues(root, './ns:sub/@id', ['1', '2', '3', '4'])
test_case.assertXpathValues(root, './ns:sub/text()', ['a', 'b', 'c'])
with self.assertRaises(test_case.failureException):
# @id in ['3', '4'] is missing
test_case.assertXpathValues(root, './ns:sub/@id', ['1', '2'])
with self.assertRaises(test_case.failureException):
# text() == c is missing
test_case.assertXpathValues(root, './ns:sub/text()', ['a', 'b'])
with self.assertRaises(test_case.failureException):
# Unknown namespace
test_case.assertXpathValues(root, './custom:sub/@id', ['1', '2', '3', '4'])
def test_assertXpathValues_namespaces_custom_prefix(self):
"""Asserts assertXpathValues works with custom namespaces."""
test_case = XmlTestCase(methodName='assertXpathValues')
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="http://www.w3c.org/XML">
<sub id="1">a</sub>
<sub id="2">a</sub>
<sub id="3">b</sub>
<sub id="4">c</sub>
</root>"""
root = test_case.assertXmlDocument(data)
# Attribute value
test_case.assertXpathValues(root,
'./custom:sub/@id',
['1', '2', '3', '4'],
default_ns_prefix='custom')
# Node text value
test_case.assertXpathValues(root,
'./custom:sub/text()',
['a', 'b', 'c'],
default_ns_prefix='custom')
with self.assertRaises(test_case.failureException):
test_case.assertXpathValues(root,
'./custom:sub/@id',
['1', '2'],
default_ns_prefix='custom')
with self.assertRaises(test_case.failureException):
test_case.assertXpathValues(root,
'./custom:sub/text()',
['a', 'b'],
default_ns_prefix='custom')
def test_assertXpathValues_namespaces(self):
"""Assert assertXpathValues works with namespaces."""
test_case = XmlTestCase(methodName='assertXpathValues')
data = """<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="%s" xmlns:test="%s">
<sub test:id="1">a</sub>
<sub id="2">a</sub>
<sub id="3">b</sub>
<sub id="4">c</sub>
<test:sub>ns-a</test:sub>
</root>""" % (DEFAULT_NS, TEST_NS)
root = test_case.assertXmlDocument(data.encode('utf-8'))
# Attribute value without namespace
test_case.assertXpathValues(root,
'./ns:sub/@id',
['2', '3', '4'])
test_case.assertXpathValues(root,
'./test:sub/text()',
['ns-a'])
with self.assertRaises(test_case.failureException):
# Only the test:id attribute has value 1
test_case.assertXpathValues(root,
'./ns:sub/@id',
['1'])
with self.assertRaises(test_case.failureException):
# There is only one test:id attribute, and its value is not here
test_case.assertXpathValues(root,
'./ns:sub/@test:id',
['2', '3', '4'])
# -------------------------------------------------------------------------
def test_assertXmlValidDTD(self):
"""Asserts assertXmlValidDTD raises when DTD does not valid XML."""
test_case = XmlTestCase(methodName='assertXmlValidDTD')
dtd = """<!ELEMENT root (child)>
<!ELEMENT child EMPTY>
<!ATTLIST child id ID #REQUIRED>
"""
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="child1"/>
</root>
"""
root = test_case.assertXmlDocument(data)
# Document is valid according to DTD
test_case.assertXmlValidDTD(root, dtd)
data_invalid = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="child1"/>
<child id="child1"/>
</root>
"""
root = test_case.assertXmlDocument(data_invalid)
# Document is invalid according to DTD (multiple child element)
with self.assertRaises(test_case.failureException):
test_case.assertXmlValidDTD(root, dtd)
def test_assertXmlValidDTD_filename(self):
"""Asserts assertXmlValidDTD accepts a filename as DTD."""
test_case = XmlTestCase(methodName='assertXmlValidDTD')
filename = 'test_assertXmlValidDTD_filename.dtd'
dtd = """<!ELEMENT root (child)>
<!ELEMENT child EMPTY>
<!ATTLIST child id ID #REQUIRED>
"""
with open(filename, 'w') as dtd_file:
dtd_file.write(dtd)
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="child1"/>
</root>
"""
root = test_case.assertXmlDocument(data)
# Document is valid according to DTD
try:
test_case.assertXmlValidDTD(root, filename=filename)
except:
os.unlink(filename)
raise
data_invalid = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="child1"/>
<child id="child1"/>
</root>
"""
root = test_case.assertXmlDocument(data_invalid)
try:
# Document is invalid according to DTD (multiple child element)
with self.assertRaises(test_case.failureException):
test_case.assertXmlValidDTD(root, filename=filename)
finally:
os.unlink(filename)
def test_assertXmlValidDTD_DTD(self):
"""Asserts assertXmlValidDTD accepts an LXML DTD object."""
test_case = XmlTestCase(methodName='assertXmlValidDTD')
dtd = """<!ELEMENT root (child)>
<!ELEMENT child EMPTY>
<!ATTLIST child id ID #REQUIRED>
"""
schema = etree.DTD(io.StringIO(dtd))
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="child1"/>
</root>
"""
root = test_case.assertXmlDocument(data)
# Document is valid according to DTD
test_case.assertXmlValidDTD(root, schema)
data_invalid = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="child1"/>
<child id="child1"/>
</root>
"""
root = test_case.assertXmlDocument(data_invalid)
# Document is invalid according to DTD (multiple child element)
with self.assertRaises(test_case.failureException):
test_case.assertXmlValidDTD(root, schema)
def test_assertXmlValidDTD_no_dtd(self):
"""Asserts assertXmlValidDTD raises ValueError without any DTD."""
test_case = XmlTestCase(methodName='assertXmlValidDTD')
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="child1"/>
</root>
"""
root = test_case.assertXmlDocument(data)
# No DTD: ValueError
with self.assertRaises(ValueError):
test_case.assertXmlValidDTD(root)
# -------------------------------------------------------------------------
def test_assertXmlValidXSchema(self):
"""Asserts assertXmlValidXSchema raises when schema does not valid XML.
"""
test_case = XmlTestCase(methodName='assertXmlValidXSchema')
xschema = b"""<?xml version="1.0" encoding="utf-8"?>
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="root">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="child" minOccurs="1" maxOccurs="1">
<xsd:complexType>
<xsd:simpleContent>
<xsd:extension base="xsd:string">
<xsd:attribute name="id" type="xsd:string" use="required" />
</xsd:extension>
</xsd:simpleContent>
</xsd:complexType>
</xsd:element>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
</xsd:schema>
"""
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
</root>
"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlValidXSchema(root, xschema)
data_invalid = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
<child id="tooManyChild"/>
</root>
"""
root = test_case.assertXmlDocument(data_invalid)
with self.assertRaises(test_case.failureException):
test_case.assertXmlValidXSchema(root, xschema)
def test_assertXmlValidXSchema_filename(self):
"""Asserts assertXmlValidXSchema raises when schema does not valid XML.
"""
test_case = XmlTestCase(methodName='assertXmlValidXSchema')
xschema = """<?xml version="1.0" encoding="utf-8"?>
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="root">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="child" minOccurs="1" maxOccurs="1">
<xsd:complexType>
<xsd:simpleContent>
<xsd:extension base="xsd:string">
<xsd:attribute name="id" type="xsd:string" use="required" />
</xsd:extension>
</xsd:simpleContent>
</xsd:complexType>
</xsd:element>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
</xsd:schema>
"""
filename = 'test_assertXmlValidXSchema_filename.xml'
with open(filename, 'w') as xchema_file:
xchema_file.write(xschema)
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
</root>
"""
root = test_case.assertXmlDocument(data)
try:
test_case.assertXmlValidXSchema(root, filename=filename)
except:
os.unlink(filename)
raise
data_invalid = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
<child id="tooManyChild"/>
</root>
"""
root = test_case.assertXmlDocument(data_invalid)
try:
with self.assertRaises(test_case.failureException):
test_case.assertXmlValidXSchema(root, filename=filename)
finally:
os.unlink(filename)
def test_assertXmlValidXSchema_xschema(self):
"""Asserts assertXmlValidXSchema raises when schema does not valid XML.
"""
test_case = XmlTestCase(methodName='assertXmlValidXSchema')
xschema = b"""<?xml version="1.0" encoding="utf-8"?>
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="root">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="child" minOccurs="1" maxOccurs="1">
<xsd:complexType>
<xsd:simpleContent>
<xsd:extension base="xsd:string">
<xsd:attribute name="id" type="xsd:string" use="required" />
</xsd:extension>
</xsd:simpleContent>
</xsd:complexType>
</xsd:element>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
</xsd:schema>
"""
xml_schema = etree.XMLSchema(etree.XML(xschema))
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
</root>
"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlValidXSchema(root, xml_schema)
data_invalid = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
<child id="tooManyChild"/>
</root>
"""
root = test_case.assertXmlDocument(data_invalid)
with self.assertRaises(test_case.failureException):
test_case.assertXmlValidXSchema(root, xml_schema)
def test_assertXmlValidXSchema_no_xchema(self):
"""Asserts assertXmlValidXSchema raises ValueError without any schema.
"""
test_case = XmlTestCase(methodName='assertXmlValidXSchema')
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="child1"/>
</root>
"""
root = test_case.assertXmlDocument(data)
# No DTD: ValueError
with self.assertRaises(ValueError):
test_case.assertXmlValidXSchema(root)
# -------------------------------------------------------------------------
def test_assertXmlValidRelaxNG(self):
"""Asserts assertXmlValidRelaxNG raises when schema does not valid XML.
"""
test_case = XmlTestCase(methodName='assertXmlValidRelaxNG')
relaxng = b"""<?xml version="1.0" encoding="utf-8"?>
<rng:element name="root" xmlns:rng="http://relaxng.org/ns/structure/1.0">
<rng:element name="child">
<rng:attribute name="id">
<rng:text />
</rng:attribute>
</rng:element>
</rng:element>
"""
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
</root>
"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlValidRelaxNG(root, relaxng)
data_invalid = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
<child id="tooManyChild"/>
</root>
"""
root = test_case.assertXmlDocument(data_invalid)
with self.assertRaises(test_case.failureException):
test_case.assertXmlValidRelaxNG(root, relaxng)
def test_assertXmlValidRelaxNG_filename(self):
"""Asserts assertXmlValidRelaxNG raises when schema does not valid XML.
"""
test_case = XmlTestCase(methodName='assertXmlValidRelaxNG')
relaxng = """<?xml version="1.0" encoding="utf-8"?>
<rng:element name="root" xmlns:rng="http://relaxng.org/ns/structure/1.0">
<rng:element name="child">
<rng:attribute name="id">
<rng:text />
</rng:attribute>
</rng:element>
</rng:element>
"""
filename = 'test_assertXmlValidRelaxNG_filename.xml'
with open(filename, 'w') as relaxng_file:
relaxng_file.write(relaxng)
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
</root>
"""
root = test_case.assertXmlDocument(data)
try:
test_case.assertXmlValidRelaxNG(root, filename=filename)
except:
os.unlink(filename)
raise
data_invalid = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
<child id="tooManyChild"/>
</root>
"""
root = test_case.assertXmlDocument(data_invalid)
try:
with self.assertRaises(test_case.failureException):
test_case.assertXmlValidRelaxNG(root, filename=filename)
finally:
os.unlink(filename)
def test_assertXmlValidRelaxNG_relaxng(self):
"""Asserts assertXmlValidRelaxNG raises when schema does not valid XML.
"""
test_case = XmlTestCase(methodName='assertXmlValidRelaxNG')
relaxng = b"""<?xml version="1.0" encoding="utf-8"?>
<rng:element name="root" xmlns:rng="http://relaxng.org/ns/structure/1.0">
<rng:element name="child">
<rng:attribute name="id">
<rng:text />
</rng:attribute>
</rng:element>
</rng:element>
"""
xml_relaxng = etree.RelaxNG(etree.XML(relaxng))
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
</root>
"""
root = test_case.assertXmlDocument(data)
test_case.assertXmlValidRelaxNG(root, xml_relaxng)
data_invalid = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="valid"/>
<child id="tooManyChild"/>
</root>
"""
root = test_case.assertXmlDocument(data_invalid)
with self.assertRaises(test_case.failureException):
test_case.assertXmlValidRelaxNG(root, xml_relaxng)
def test_assertXmlValidRelaxNG_no_relaxng(self):
"""Asserts assertXmlValidRelaxNG raises ValueError without any RelaxNG.
"""
test_case = XmlTestCase(methodName='assertXmlValidRelaxNG')
data = b"""<?xml version="1.0" encoding="utf-8"?>
<root>
<child id="child1"/>
</root>
"""
root = test_case.assertXmlDocument(data)
# No DTD: ValueError
with self.assertRaises(ValueError):
test_case.assertXmlValidRelaxNG(root)
# -------------------------------------------------------------------------
def test_assertXmlEquivalentOutputs(self):
"""Asserts assertXmlEquivalentOutputs raises when comparison failed.
Basic assertion: same document, with different order of attributes,
text with useless spaces, etc.
"""
test_case = XmlTestCase(methodName='assertXmlEquivalentOutputs')
# Same XML (with different spacings placements and attrs order)
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>
<tag foo="bar" bar="foo">foo</tag>
</root>"""
expected = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root><tag bar="foo" foo="bar"> foo </tag></root>"""
test_case.assertXmlEquivalentOutputs(data, expected)
# Not the right element given
wrong_element = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>
<notTag foo="bar" bar="foo">foo</notTag>
</root>"""
with self.assertRaises(test_case.failureException):
test_case.assertXmlEquivalentOutputs(wrong_element, expected)
# Too many tag elements
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root>
<tag foo="bar" bar="foo">foo</tag>
<tag foo="bar" bar="foo">foo</tag>
</root>"""
with self.assertRaises(test_case.failureException):
test_case.assertXmlEquivalentOutputs(wrong_element, expected)
def test_assertXmlEquivalentOutputs_namespaces(self):
"""Asserts assertXmlEquivalentOutputs raises when comparison failed.
Assertion with different namespaces: the namespace URI is the same,
but the prefix is different. In this case, the two XML are equivalents.
"""
test_case = XmlTestCase(methodName='assertXmlEquivalentOutputs')
# Same XML, but with different namespace prefixes
data = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns:foo="mynamespace">
<foo:tag>foo</foo:tag>
</root>"""
expected = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns:bar="mynamespace">
<bar:tag>foo</bar:tag>
</root>"""
test_case.assertXmlEquivalentOutputs(data, expected)
wrong_namespace = b"""<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns:foo="not_the_same_namespace">
<foo:tag>foo</foo:tag>
</root>
"""
with self.assertRaises(test_case.failureException):
test_case.assertXmlEquivalentOutputs(wrong_namespace, expected)
class TestIntegrationXmlTestCase(unittest.TestCase):
def test_full_document(self):
data = """<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="%s" xmlns:test="%s" rootAtt="attValue" test:rootAtt="nsValue">
<emptyElement />
<attrElement id="1" attr="simple" test:attr="namespaced" />
<textElement>assemblée</textElement>
<multipleElement />
<multipleElement />
<parent>
<emptyElement />
<attrElement id="2" attr="simple" test:attr="namespaced" uniqueAttr="" />
<textElement>text</textElement>
<multipleElement />
<multipleElement />
</parent>
<test:parent>
<emptyElement />
<attrElement id="3" attr="simple" test:attr="namespaced" />
<textElement>text</textElement>
<multipleElement />
<multipleElement />
</test:parent>
</root>""" % (DEFAULT_NS, TEST_NS)
test_case = XmlTestCase(methodName='assertXmlDocument')
# It is a valid document.
root = test_case.assertXmlDocument(data.encode('utf-8'))
# The root node has these namespaces
test_case.assertXmlNamespace(root, None, DEFAULT_NS)
test_case.assertXmlNamespace(root, 'test', TEST_NS)
# The root node has this attribute with this value
test_case.assertXmlHasAttribute(
root, 'rootAtt', expected_value='attValue')
# The root node has this test:attribute with this value
# Note that we can not use the test:rootAtt syntax and must rely on
# the {uri}rootAtt syntax instead.
# That's why XPath is better for us
test_case.assertXmlHasAttribute(
root, '{%s}rootAtt' % TEST_NS, expected_value='nsValue')
# Same tests on attributes with xpath
test_case.assertXpathsExist(root, [
'@rootAtt', # No default namespace on attribute
'@test:rootAtt', # rootAtt with test namespace
# There are many element with the ID attribute
'//@id',
# attrElement's attr
'./ns:attrElement/@attr',
'//ns:attrElement[@attr]',
'//ns:parent/ns:attrElement/@attr',
'//test:parent/ns:attrElement/@attr',
# Specific values
'@rootAtt="attValue"',
'@test:rootAtt="nsValue"',
])
# Let's play with XPath and attribute values
test_case.assertXpathsUniqueValue(root, [
# All ID are unique
'//@id',
# This takes only the direct children of <root>
'./ns:attrElement/@attr',
# This takes only the children of <ns:parent>
'//ns:parent/ns:attrElement/@attr',
# This takes only the children of <test:parent>
'//test:parent/ns:attrElement/@attr',
])
with self.assertRaises(test_case.failureException):
test_case.assertXpathsUniqueValue(root, [
# This take all attrElement in the tree
'//ns:attrElement/@attr',
])
# Some node exists once and only once - it depends on the expression
test_case.assertXpathsOnlyOne(root, [
# Direct child
'./ns:attrElement',
# All children, but with specific attribute's value
'//ns:attrElement[@id=1]',
'//ns:attrElement[@id=2]',
'//ns:attrElement[@id=3]',
# It is the only element with this attribute
'//ns:attrElement[@uniqueAttr]',
# This attribute is the only on under test:parent's node
'//test:parent/ns:attrElement',
'//test:parent/ns:attrElement[@id=3]',
])
# Let's check @id's values
test_case.assertXpathValues(root, '//@id', ['1', '2', '3'])
if __name__ == "__main__":
unittest.main()
| mit | -2,761,722,860,377,451,500 | 38.135078 | 100 | 0.555379 | false |
cyliustack/sofa | bin/sofa_analyze.py | 1 | 50661 | import argparse
import matplotlib
matplotlib.use('agg')
import csv
import json
import multiprocessing as mp
import os
import random
import re
import sys
from functools import partial
from operator import attrgetter, itemgetter
import networkx as nx
import numpy as np
import pandas as pd
import time
from sofa_aisi import *
from sofa_common import *
from sofa_config import *
from sofa_print import *
from matplotlib import pyplot as plt
import grpc
import potato_pb2
import potato_pb2_grpc
import socket
import random
import subprocess
from sofa_ml import hsg_v2
def random_generate_color():
rand = lambda: random.randint(0, 255)
return '#%02X%02X%02X' % (64, rand(), rand())
def get_top_k_events(cfg, df, topk):
topk_events=[]
gby = df.groupby(['name'])
df_agg = gby.aggregate(np.sum)
df_agg_sorted = df_agg.sort_values(by=['duration'],ascending=False)
#memcpy = ['copyKind_1_','copyKind_2_','copyKind_8_']
if cfg.verbose:
print("Top %d Events: "%topk)
print(df_agg_sorted[['duration']][0:topk])
eventName = df_agg_sorted[df_agg_sorted.columns[0:0]].head(topk).index.values.tolist()
return eventName
# input: pfv(performance feature vector), Pandas.DataFrame
# output: hint, docker_image
def get_hint(potato_server, features):
if len(features) > 0:
pfv = potato_pb2.PerformanceFeatureVector()
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
#print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
pfv.name.append(name)
pfv.value.append(value)
#print('Wait for response from POTATO server...')
myhostname = socket.gethostname()
channel = grpc.insecure_channel(potato_server)
stub = potato_pb2_grpc.HintStub(channel)
request = potato_pb2.HintRequest( hostname = myhostname,
pfv = pfv)
response = stub.Hint(request)
hint = response.hint
docker_image = response.docker_image
else:
hint = 'There is no pfv to get hints.'
docker_image = 'NA'
return hint, docker_image
def concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features):
if cfg.verbose:
print_title('Concurrency Breakdown Analysis')
total_elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
elapsed_time_ratio = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
total_interval_vector = []
total_performace_vector = []
if len(df_mpstat) == 0:
print_warning(cfg, 'no mpstat and perf traces!')
return features
t_begin = df_mpstat.iloc[0]['timestamp']
t_end = df_mpstat.iloc[-1]['timestamp']
t = t_begin
sample_time = (1 / float(cfg.sys_mon_rate))
while t < t_end:
t = t + sample_time
if cfg.roi_end > 0 and (t < cfg.roi_begin or t > cfg.roi_end):
continue
window_begin = t - sample_time
window_end = t
if len(df_cpu) > 0:
if df_cpu.iloc[0].timestamp > window_end:
continue
cond1 = (df_cpu['timestamp'] > window_begin)
cond2 = (df_cpu['timestamp'] <= window_end)
df_cpu_interval = df_cpu[ cond1 & cond2 ]
num_gpus = len(list(set(df_nvsmi['deviceId'])))
cond1 = (df_nvsmi['timestamp'] > window_begin)
cond2 = (df_nvsmi['timestamp'] <= window_end)
sm = df_nvsmi['event'] == int(0)
df_nvsmi_interval = df_nvsmi[ cond1 & cond2 & sm ]
cond1 = (df_mpstat['timestamp'] > window_begin)
cond2 = (df_mpstat['timestamp'] <= window_end)
df_mpstat_interval = df_mpstat[ cond1 & cond2 ]
cond1 = (df_bandwidth['timestamp'] > window_begin)
cond2 = (df_bandwidth['timestamp'] <= window_end)
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
df_tx_interval = df_bandwidth[ cond1 & cond2 & tx ]
df_rx_interval = df_bandwidth[ cond1 & cond2 & rx ]
mp_usr = []
mp_sys = []
mp_idl = []
mp_iow = []
usr = []
sys = []
irq = []
cpu_max = 0
cpu_min = 100
for i in range(len(df_mpstat_interval)):
ratios = df_mpstat_interval.iloc[i]['name'].split(':')[1].split('|')
#print(ratios)
mp_usr.append(sample_time*int(ratios[1])/100.0)
mp_sys.append(sample_time*int(ratios[2])/100.0)
mp_idl.append(sample_time*int(ratios[3])/100.0)
mp_iow.append(sample_time*int(ratios[4])/100.0)
usr.append(int(ratios[1]))
sys.append(int(ratios[2]))
irq.append(int(ratios[5]))
cpu_tmp = int(ratios[1]) + int(ratios[2]) + int(ratios[5])
if cpu_tmp > cpu_max:
cpu_max = cpu_tmp
if cpu_tmp < cpu_min:
cpu_min = cpu_tmp
mp_usr = np.asarray(mp_usr)
mp_sys = np.asarray(mp_sys)
mp_idl = np.asarray(mp_idl)
mp_iow = np.asarray(mp_iow)
usr = np.asarray(usr)
sys = np.asarray(sys)
irq = np.asarray(irq)
elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
if len(df_mpstat_interval) > 0:
elapsed_time['usr'] = mp_usr.max()
elapsed_time['sys'] = mp_sys.max()
elapsed_time['gpu'] = df_nvsmi_interval['duration'].max() * 0.01 * sample_time
elapsed_time['iow'] = mp_iow.max()
#print('gput,usrt = ', elapsed_time['gpu'], elapsed_time['usr'])
dominator = max(elapsed_time, key=elapsed_time.get)
#if elapsed_time['gpu'] > 0.1 :
# dominator = 'gpu'
if elapsed_time[dominator] > sample_time * int(cfg.is_idle_threshold)/100:
total_elapsed_time[dominator] = total_elapsed_time[dominator] + sample_time
else:
total_elapsed_time['idl'] += sample_time
if num_gpus > 0:
time_gpu_avg = df_nvsmi_interval['duration'].sum() * 0.01 * sample_time / num_gpus
else:
time_gpu_avg = 0
interval_vector = [mp_usr.max(),
mp_sys.max(),
mp_iow.max(),
mp_idl.max(),
time_gpu_avg,
df_tx_interval['bandwidth'].sum(),
df_rx_interval['bandwidth'].sum()]
total_interval_vector.append(tuple(interval_vector))
if num_gpus > 0:
sm_avg = df_nvsmi_interval['duration'].sum() / int(len(list(set(df_nvsmi_interval['deviceId']))))
else:
sm_avg = 0
performace_vector = [window_end,
df_nvsmi_interval['duration'].max(),
sm_avg,
df_nvsmi_interval['duration'].min(),
round((usr.mean() + sys.mean() + irq.mean()), 0),
cpu_max,
cpu_min]
total_performace_vector.append(tuple(performace_vector))
total_all_elapsed_time = sum(total_elapsed_time.values())
if total_all_elapsed_time > 0 :
elapsed_time_ratio['usr'] = 100 * total_elapsed_time['usr'] / total_all_elapsed_time
elapsed_time_ratio['sys'] = 100 * total_elapsed_time['sys'] / total_all_elapsed_time
elapsed_time_ratio['gpu'] = 100 * total_elapsed_time['gpu'] / total_all_elapsed_time
elapsed_time_ratio['idl'] = 100 * total_elapsed_time['idl'] / total_all_elapsed_time
elapsed_time_ratio['iow'] = 100 * total_elapsed_time['iow'] / total_all_elapsed_time
if cfg.verbose:
print('Elapsed Time = %.1lf ' % total_all_elapsed_time)
print('USR = %.1lf %%' % elapsed_time_ratio['usr'])
print('SYS = %.1lf %%' % elapsed_time_ratio['sys'])
if num_gpus > 0:
print('GPU = %.1lf %%' % elapsed_time_ratio['gpu'])
print('IDL = %.1lf %%' % elapsed_time_ratio['idl'])
print('IOW = %.1lf %%' % elapsed_time_ratio['iow'])
if cfg.spotlight_gpu:
elapsed_hotspot_time = cfg.roi_end - cfg.roi_begin
else:
elapsed_hotspot_time = 0
df = pd.DataFrame({ 'name':['elapsed_usr_time_ratio', 'elapsed_sys_time_ratio', 'elapsed_gpu_time_ratio',
'elapsed_iow_time_ratio', 'elapsed_hotspot_time'],
'value':[elapsed_time_ratio['usr'], elapsed_time_ratio['sys'], elapsed_time_ratio['gpu'],
elapsed_time_ratio['iow'], elapsed_hotspot_time ] },
columns=['name','value'])
features = pd.concat([features, df])
if len(total_performace_vector) > 0:
performance_table = pd.DataFrame(total_performace_vector, columns = ['time', 'max_gpu_util', 'avg_gpu_util', 'min_gpu_util', 'cpu_util', 'cpu_max', 'cpu_min'])
performance_table.to_csv('%s/performance.csv' % logdir)
vector_table = pd.DataFrame(total_interval_vector, columns = ['usr' , 'sys', 'iow', 'idl','gpu', 'net_tx', 'net_rx'])
pearson = vector_table.corr(method ='pearson').round(2)
if cfg.verbose:
print('Correlation Table :')
print(pearson)
df = pd.DataFrame({ 'name':['corr_gpu_usr', 'corr_gpu_sys', 'corr_gpu_iow', 'corr_gpu_ntx', 'corr_gpu_nrx'], 'value':[pearson['gpu'].usr, pearson['gpu'].sys, pearson['gpu'].iow, pearson['gpu'].net_tx, pearson['gpu'].net_rx]}, columns=['name','value'])
features = pd.concat([features, df])
return features
def payload_sum(df):
print((len(df)))
class Event:
def __init__(self, name, ttype, timestamp, duration):
self.name = name
self.ttype = ttype # 0 for begin, 1 for end
self.timestamp = timestamp
self.duration = duration
def __repr__(self):
return repr((self.name, self.ttype, self.timestamp, self.duration))
def nvsmi_profile(logdir, cfg, df_nvsmi, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('SM & MEM & ENCODE/DECODE Profiling')
if cfg.spotlight_gpu:
if cfg.roi_end == 0 :
print_warning(cfg, 'spotlight_gpu has no effects.')
else:
cond1 = (df_nvsmi['timestamp'] > cfg.roi_begin)
cond2 = (df_nvsmi['timestamp'] <= cfg.roi_end)
df_nvsmi = df_nvsmi[ cond1 & cond2 ]
sm_start = df_nvsmi.iloc[0].timestamp
sm_end = df_nvsmi.iloc[-1].timestamp
SM_time = sm_end - sm_start
result = df_nvsmi.groupby(['deviceId','event'])['duration'].mean()
result = result.astype(int)
gpu_sm_util = df_nvsmi.groupby(['event'])['duration'].mean()[0]
gpu_mem_util = df_nvsmi.groupby(['event'])['duration'].mean()[1]
if cfg.nvsmi_data:
gpu_enc_util = df_nvsmi.groupby(['event'])['duration'].mean()[2]
gpu_dec_util = df_nvsmi.groupby(['event'])['duration'].mean()[3]
else:
gpu_enc_util = 0
gpu_dec_util = 0
sm = df_nvsmi['event'] == int(0)
mem = df_nvsmi['event'] == int(1)
enc = df_nvsmi['event'] == int(2)
dec = df_nvsmi['event'] == int(3)
gpunum = list(set(df_nvsmi['deviceId']))
res = pd.DataFrame([], columns=['sm', 'mem', 'enc', 'dec'])
sm_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
mem_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
for i in gpunum:
gpuid = df_nvsmi['deviceId'] == int(i)
gpudata = [round(df_nvsmi[sm & gpuid]['duration'].mean(), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2),
round(df_nvsmi[enc & gpuid]['duration'].mean(), 2),
round(df_nvsmi[dec & gpuid]['duration'].mean(), 2)]
smdata = [round(df_nvsmi[sm & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[sm & gpuid]['duration'].mean(), 2)]
memdata = [round(df_nvsmi[mem & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2)]
gpu_tmp = pd.DataFrame([gpudata], columns=['sm', 'mem', 'enc', 'dec'], index=[i])
sm_tmp = pd.DataFrame([smdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
mem_tmp = pd.DataFrame([memdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
res = pd.concat([res, gpu_tmp])
sm_q = pd.concat([sm_q, sm_tmp])
mem_q = pd.concat([mem_q, mem_tmp])
res.index.name = 'gpu_id'
sm_q.index.name = 'gpu_id'
mem_q.index.name = 'gpu_id'
if not cfg.cluster_ip and cfg.verbose:
print('GPU Utilization (%):')
print(res)
print('\nGPU SM Quartile (%):')
print(sm_q)
print('\nGPU MEM Quartile (%):')
print(mem_q)
print('Overall Average SM Utilization (%): ', int(gpu_sm_util))
print('Overall Average MEM Utilization (%): ', int(gpu_mem_util))
print('Overall Average ENC Utilization (%): ', int(gpu_enc_util))
print('Overall Average DEC Utilization (%): ', int(gpu_dec_util))
print('Overall Active GPU Time (s): %.3lf' % (SM_time * gpu_sm_util/100.0))
df = pd.DataFrame({'name':['gpu_sm_util_q2', 'gpu_sm_util_q3', 'gpu_sm_util', 'gpu_mem_util_q2', 'gpu_mem_util_q3', 'gpu_mem_util'],
'value':[df_nvsmi[sm & gpuid]['duration'].quantile(0.5),
df_nvsmi[sm & gpuid]['duration'].quantile(0.75),
int(gpu_sm_util),
df_nvsmi[mem & gpuid]['duration'].quantile(0.5),
df_nvsmi[mem & gpuid]['duration'].quantile(0.75),
int(gpu_mem_util),
]},
columns=['name','value'])
features = pd.concat([features, df])
return features
def gpu_profile(logdir, cfg, df_gpu, features):
if cfg.verbose:
print_title('GPU Profiling')
print('Per-GPU time (s):')
groups = df_gpu.groupby("deviceId")["duration"]
gpu_time = 0
for key, item in groups:
gpuid = int(float(key))
per_gpu_time = groups.get_group(key).sum()
if cfg.verbose:
print("[%d]: %lf" % (gpuid, per_gpu_time))
gpu_time = gpu_time + per_gpu_time
num_gpus = len(groups)
kernel_time = 0
grouped_df = df_gpu.groupby("copyKind")["duration"]
for key, item in grouped_df:
if key == 0:
kernel_time = grouped_df.get_group(key).sum()
nccl_time = 0
grouped_df = df_gpu.groupby("name")["duration"]
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("nccl") != -1:
nccl_time = nccl_time + grouped_df.get_group(key).sum()
features = comm_profile(logdir, cfg, df_gpu, features)
get_top_k_events(cfg, df_gpu, 10)
df = pd.DataFrame({'name':['gpu_time', 'num_gpus', 'kernel_time', 'nccl_time'],
'value':[gpu_time, num_gpus, kernel_time, nccl_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def strace_profile(logdir, cfg, df, features):
print_title('STRACE Profiling:')
return features
def net_profile(logdir, cfg, df, features):
if not cfg.cluster_ip:
print_title("Network Profiling:")
grouped_df = df.groupby("name")["duration"]
net_time = 0
n_packets = 0
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("network:tcp:") != -1:
net_time = net_time + grouped_df.get_group(key).sum()
n_packets = n_packets + 1
#print(("total network time (s) = %.3lf" % net_time))
#print(("total amount of network packets = %d" % n_packets))
# total network packet
packet_num_matrix = df.groupby(['pkt_src','pkt_dst','payload']).size().unstack(level=1, fill_value=0)
# total network traffic
packet_sum_matrix = df.groupby(['pkt_src','pkt_dst'])["payload"].sum().unstack(level=1, fill_value=0)
# ================ change pandas table columns and index name ====
rename_index = packet_sum_matrix.index.tolist()
rename_index2 = packet_num_matrix.index.tolist()
rename_columns = packet_sum_matrix.columns.tolist()
rename_columns2 = packet_num_matrix.columns.tolist()
def zero(s):
if s[0:2] == '00':
s = s[2]
elif (s[0] == '0') and (s[1] != '0'):
s = s[1:3]
return(s)
def check_str(rename_list):
rename_list_new = []
for j in rename_list:
j = str(int(j))
a = j[-9:-6]
b = j[-6:-3]
c = j[-3:]
j = j[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_list_new.append(j)
return(rename_list_new)
def check_str2(rename_list):
rename_columns_2 = []
for i in rename_list:
i = str(int(i[0]))
a = i[-9:-6]
b = i[-6:-3]
c = i[-3:]
i = i[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_columns_2.append(i)
return(rename_columns_2)
rename_index_new = check_str(rename_index)
rename_index_new = dict(zip(rename_index, rename_index_new))
rename_index2_new = check_str2(rename_index2)
rename_index2_final = list(set(rename_index2_new))
rename_index2_final.sort(key=rename_index2_new.index)
rename_columns_new = check_str(rename_columns)
rename_columns_new = dict(zip(rename_columns, rename_columns_new))
rename_columns2_new = check_str(rename_columns2)
rename_columns2_new = dict(zip(rename_columns2, rename_columns2_new))
# rename here
packet_sum_matrix = packet_sum_matrix.rename(columns=rename_columns_new)
packet_num_matrix = packet_num_matrix.rename(columns=rename_columns2_new)
packet_sum_matrix = packet_sum_matrix.rename(index=rename_index_new)
packet_num_matrix.index.set_levels(rename_index2_final , level = 0, inplace = True)
if cfg.verbose:
print("total amount of network traffic : ", convertbyte(df['payload'].sum()), '\n', packet_sum_matrix.to_string(), "\n")
print("total amount of network packets = %d\n" % packet_num_matrix.sum().sum() ,packet_num_matrix.to_string(), "\n")
network_value = []
src = []
dst = []
final = []
for index in packet_sum_matrix.index:
for column in packet_sum_matrix.columns:
src.append(index)
dst.append(column)
network_value.append(packet_sum_matrix[column][index])
record = list(zip(src, dst, network_value))
record.sort(key=lambda tup:tup[2], reverse=True)
for src, dst, value in record:
if value == 0:
pass
else:
item = [src, dst, convertbyte(value), round(value / df['payload'].sum(), 2)]
final.append(item)
summary = pd.DataFrame(final, columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary.to_csv(logdir + 'netrank.csv',
mode='w',
header=True,
index=False)
df = pd.DataFrame({'name':['net_time'],
'value':[net_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def convertbyte(B):
B = int(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{} Bytes'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB)
def convertbytes(B):
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{0:.2f} B/s'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB/s'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB/s'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB/s'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB/s'.format(B/TB)
def netbandwidth_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('Network Bandwidth Profiling:')
tx = df['event'] == float(0)
rx = df['event'] == float(1)
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
with open('%s/netstat.txt' % logdir) as f:
lines = f.readlines()
first_line = lines[0]
last_line = lines[-1]
tx_begin = first_line.split(',')[1]
rx_begin = first_line.split(',')[2]
tx_end = last_line.split(',')[1]
rx_end = last_line.split(',')[2]
tx_amount = int(last_line.split(',')[1]) - int(first_line.split(',')[1])
rx_amount = int(last_line.split(',')[2]) - int(first_line.split(',')[2])
if not cfg.cluster_ip:
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
if cfg.verbose:
print('Amount of Network Traffic : %s' % (convertbyte(tx_amount + rx_amount)))
print('Amount of tx : %s' % convertbyte(tx_amount))
print('Amount of rx : %s' % convertbyte(rx_amount))
print('Bandwidth Quartile :')
print('Q1 tx : %s, rx : %s' % ( convertbytes(bw_tx_q1), convertbytes(bw_rx_q1)))
print('Q2 tx : %s, rx : %s' % ( convertbytes(bw_tx_q2), convertbytes(bw_rx_q2)))
print('Q3 tx : %s, rx : %s' % ( convertbytes(bw_tx_q3), convertbytes(bw_rx_q3)))
print('Avg tx : %s, rx : %s'% ( convertbytes(bw_tx_mean), convertbytes(bw_rx_mean)))
#network chart part
all_time = df[tx]['timestamp'].tolist()
all_tx = df[tx]['bandwidth'].tolist()
all_rx = df[rx]['bandwidth'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(all_time, all_tx, c='red', alpha=0.5, label='tx')
plt.plot(all_time, all_rx, c='blue', alpha=0.5, label='rx')
plt.legend(loc='upper right')
plt.title("Network Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Bandwidth (bytes)", fontsize=16)
fig.savefig("%s/network_report.pdf" % logdir, bbox_inches='tight')
if not cfg.cluster_ip and cfg.verbose:
print('Network Bandwidth Chart is saved at %s/network_report.pdf' %logdir)
df_feature = pd.DataFrame({ 'name':['bw_tx_q2', 'bw_tx_q3', 'bw_rx_q2', 'bw_rx_q3'],
'value':[bw_tx_q2, bw_tx_q3, bw_rx_q2, bw_rx_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def blktrace_latency_profile(logdir, cfg, df, features):
with open('%s/btt.txt' % logdir) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if '==================== All Devices ====================' in line:
start = i
if '==================== Device Merge Information ====================' in line:
end = i
break
bttoutput_result = lines[start:end]
df_offset = pd.read_table('%s/offset_all.txt' % logdir, delim_whitespace=True, names=('time', 'start', 'end'))
time = df_offset['time'].tolist()
start_b = df_offset['start'].tolist()
end_b = df_offset['end'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(time, start_b, c='red', marker='o', alpha=0.3, label='Start block')
plt.legend(loc='upper right')
plt.title("Block Offset Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Block Number", fontsize=16)
fig.savefig("%s/offset_of_device_report.pdf" % logdir, bbox_inches='tight')
print('Offset of Device Report is saved at %s/offset_of_device_report.pdf' %logdir)
if cfg.verbose:
print_title('Storage Profiling:')
print('Blktracae Latency (s):')
for btt in bttoutput_result:
print(btt[:-1])
blktrace_latency = df['event'] == 'C'
blktrace_latency_q1 = df[blktrace_latency]['duration'].quantile(0.25)
blktrace_latency_q2 = df[blktrace_latency]['duration'].quantile(0.5)
blktrace_latency_q3 = df[blktrace_latency]['duration'].quantile(0.75)
blktrace_latency_mean = df[blktrace_latency]['duration'].mean()
df_feature = pd.DataFrame({ 'name':['blktrace_latency_q1','blktrace_latency_q2','blktrace_latency_q3'],
'value': [blktrace_latency_q1, blktrace_latency_q2, blktrace_latency_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def diskstat_profile(logdir, cfg, df, features):
#diskstat_dev = list(set(df['dev']))
diskstat_r_q1 = df.groupby('dev')['d_read'].quantile(0.25)
diskstat_w_q1 = df.groupby('dev')['d_write'].quantile(0.25)
diskstat_q1 = df.groupby('dev')['d_disk_total'].quantile(0.25)
diskstat_r_q2 = df.groupby('dev')['d_read'].quantile(0.5)
diskstat_w_q2 = df.groupby('dev')['d_write'].quantile(0.5)
diskstat_q2 = df.groupby('dev')['d_disk_total'].quantile(0.5)
diskstat_r_q3 = df.groupby('dev')['d_read'].quantile(0.75)
diskstat_w_q3 = df.groupby('dev')['d_write'].quantile(0.75)
diskstat_q3 = df.groupby('dev')['d_disk_total'].quantile(0.75)
diskstat_r_avg = df.groupby('dev')['d_read'].mean()
diskstat_w_avg = df.groupby('dev')['d_write'].mean()
diskstat_avg = df.groupby('dev')['d_disk_total'].mean()
diskstat_r_iops = df.groupby('dev')['r_iops'].mean()
diskstat_w_iops = df.groupby('dev')['w_iops'].mean()
diskstat_iops = df.groupby('dev')['iops'].mean()
diskstat_wait = df.groupby('dev')['await_time'].mean()
diskstat_table = pd.concat([diskstat_r_q1, diskstat_r_q2, diskstat_r_q3, diskstat_r_avg,
diskstat_w_q1, diskstat_w_q2, diskstat_w_q3, diskstat_w_avg,
diskstat_q1, diskstat_q2, diskstat_q3, diskstat_avg,
diskstat_r_iops, diskstat_w_iops, diskstat_iops,
diskstat_wait], axis=1, sort=False)
diskstat_columns = ['Q1 throughput(Read)', 'Q2 throughput(Read)', 'Q3 throughput(Read)', 'Avg throughput(Read)',
'Q1 throughput(Write)', 'Q2 throughput(Write)', 'Q3 throughput(Write)', 'Avg throughput(Write)',
'Q1 throughput(R+W)', 'Q2 throughput(R+W)', 'Q3 throughput(R+W)', 'Avg throughput(R+W)',
'Avg IOPS(Read)', 'Avg IOPS(Write)', 'Avg IOPS(R+W)', 'Avg Await time(ms)']
diskstat_table.columns = diskstat_columns
diskstat_dev = diskstat_table.index.format()
final_table = pd.DataFrame(columns=diskstat_columns)
for j, dev in enumerate(diskstat_dev):
tmp_list = []
for i in diskstat_columns[:-4]:
tmp_list.append(convertbytes(diskstat_table.iloc[j][i]))
for i in diskstat_columns[-4:-1]:
tmp_list.append('%d' % int(diskstat_table.iloc[j][i]))
tmp_list.append('%.3lf ms' % diskstat_table.iloc[j][-1])
tmp_table = pd.DataFrame([tuple(tmp_list)],
columns=diskstat_columns,
index=[dev])
final_table = pd.concat([final_table, tmp_table])
if cfg.verbose:
print_title('DISKSTAT Profiling:')
print('Disk Throughput Quartile :')
print(final_table.T)
df_feature = pd.DataFrame({ 'name':['diskstat_q1','diskstat_q2','diskstat_q3'],
'value': [diskstat_q1.mean(), diskstat_q2.mean(), diskstat_q3.mean()] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def cpu_profile(logdir, cfg, df):
if cfg.verbose:
print_title('CPU Profiling:')
print('elapsed_time (s) = %.6lf' % cfg.elapsed_time)
grouped_df = df.groupby("deviceId")["duration"]
total_exec_time = 0
for key, item in grouped_df:
print(("[%d]: %lf" % (key, grouped_df.get_group(key).sum())))
total_exec_time = total_exec_time + grouped_df.get_group(key).sum()
print("total execution time (s) = %.3lf" % total_exec_time)
cpu_detail_profile_df = df[['timestamp','duration','name']]
cpu_detail_profile_df = cpu_detail_profile_df.sort_values(by=['duration'], ascending=False)
cpu_detail_profile_df['ratio(%)'] = cpu_detail_profile_df['duration']/total_exec_time * 100
cpu_detail_profile_df = cpu_detail_profile_df[['timestamp','ratio(%)','duration','name']]
print(cpu_detail_profile_df[:20].to_string(index=False))
def vmstat_profile(logdir, cfg, df, features):
_,_,_,_,_,_,df['si'],df['so'],df['bi'],df['bo'],df['in'],df['cs'],_,_,_,_,_=df['name'].str.split('|').str
for col_name in ('si','so','bi','bo','in','cs'):
df[col_name] = df[col_name].str[3:]
vmstat_traces = df[['si','so','bi','bo','in','cs']].astype(float)
vm_bi = vmstat_traces['bi'].mean()
vm_bo = vmstat_traces['bo'].mean()
vm_cs = vmstat_traces['cs'].mean()
vm_in = vmstat_traces['in'].mean()
if cfg.verbose:
print_title('VMSTAT Profiling:')
print('average bi/s: %d' % int(vm_cs))
print('average bo/s: %d' % int(vm_in))
print('average cs/s: %d' % int(vm_bi))
print('average in/s: %d' % int(vm_bo))
df_feature = pd.DataFrame({ 'name':['vm_bi', 'vm_bo', 'vm_cs', 'vm_in' ],
'value':[vm_bi, vm_bo, vm_cs, vm_in] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def mpstat_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('MPSTAT Profiling:')
num_cores = int(df['deviceId'].max() + 1)
df_summary = pd.DataFrame( np.zeros((num_cores,5)), columns=['USR','SYS','IDL','IOW','IRQ'])
_,_,_,_,_,df['USR'],df['SYS'],df['IDL'],df['IOW'],df['IRQ'],_ = df["name"].str.split('|').str
df[['USR','SYS','IDL','IOW','IRQ']] = df[['USR','SYS','IDL','IOW','IRQ']].astype(float)
df["dt_all"] = np.where(df["IDL"]==100, 0.1, df["duration"]/((100-df["IDL"])/100.0))
df["t_USR"] = df['dt_all'] * df['USR']/100.0
df["t_SYS"] = df['dt_all'] * df['SYS']/100.0
df["t_IDL"] = df['dt_all'] * df['IDL']/100.0
df["t_IOW"] = df['dt_all'] * df['IOW']/100.0
df["t_IRQ"] = df['dt_all'] * df['IRQ']/100.0
dfs=[]
for i in range(num_cores):
dfs.append(df.loc[df['deviceId'] == float(i)])
for index,dff in enumerate(dfs):
df_summary.iloc[index]['USR'] = dff['t_USR'].sum()
df_summary.iloc[index]['SYS'] = dff['t_SYS'].sum()
df_summary.iloc[index]['IDL'] = dff['t_IDL'].sum()
df_summary.iloc[index]['IRQ'] = dff['t_IRQ'].sum()
df_summary.iloc[index]['IOW'] = dff['t_IOW'].sum()
if not cfg.cluster_ip and cfg.verbose:
print('CPU Utilization (%):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%3d\t%3d\t%3d\t%3d\t%3d'%(i,int(100.0*df_summary.iloc[i]['USR']/t_sum),
int(100.0*df_summary.iloc[i]['SYS']/t_sum),
int(100.0*df_summary.iloc[i]['IDL']/t_sum),
int(100.0*df_summary.iloc[i]['IOW']/t_sum),
int(100.0*df_summary.iloc[i]['IRQ']/t_sum) ))
if not cfg.cluster_ip and cfg.verbose:
print('CPU Time (s):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%.2lf\t%.2lf\t%.2lf\t%.2lf\t%.2lf'%(i,
df_summary.iloc[i]['USR'],
df_summary.iloc[i]['SYS'],
df_summary.iloc[i]['IDL'],
df_summary.iloc[i]['IOW'],
df_summary.iloc[i]['IRQ'] ))
total_cpu_time = df_summary[['USR','SYS','IRQ']].sum().sum()
cpu_util = int(100*total_cpu_time / (num_cores*cfg.elapsed_time))
if not cfg.cluster_ip and cfg.verbose:
print('Active CPU Time (s): %.3lf' % total_cpu_time)
print('Active CPU ratio (%%): %3d' % cpu_util)
df_feature = pd.DataFrame({ 'name':['num_cores', 'cpu_util'],
'value':[num_cores, cpu_util] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def sofa_analyze(cfg):
print_main_progress('SOFA analyzing...')
filein = []
df_cpu = pd.DataFrame([], columns=cfg.columns)
df_gpu = pd.DataFrame([], columns=cfg.columns)
df_net = pd.DataFrame([], columns=cfg.columns)
df_mpstat = pd.DataFrame([], columns=cfg.columns)
df_vmstat = pd.DataFrame([], columns=cfg.columns)
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
df_blktrace = pd.DataFrame([], columns=cfg.columns)
df_diskstat = pd.DataFrame([], columns=cfg.columns)
df_nvsmi = pd.DataFrame([], columns=cfg.columns)
iter_summary = None
logdir = cfg.logdir
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
filein_gpu = logdir + "gputrace.csv"
filein_cpu = logdir + "cputrace.csv"
filein_net = logdir + "nettrace.csv"
filein_vmstat = logdir + "vmstat.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_strace = logdir + "strace.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
filein_blktrace = logdir + "blktrace.csv"
filein_diskstat = logdir + "diskstat_vector.csv"
if os.path.isfile('%s/nvlink_topo.txt' % logdir):
with open(logdir + 'nvlink_topo.txt') as f:
lines = f.readlines()
if len(lines) > 0:
title = lines[0]
num_gpus = 1
for word in title.split():
if re.match(r'GPU', word) != None :
num_gpus = num_gpus + 1
print_info(cfg,'# of GPUs: ' + str(num_gpus) )
edges = []
if len(lines) >= num_gpus+1:
for i in range(num_gpus):
connections = lines[1+i].split()
for j in range(len(connections)):
if connections[j] == 'NV1' or connections[j] == 'NV2':
edges.append((i,j-1))
#print('%d connects to %d' % (i, j-1))
ring_found = False
G = nx.DiGraph(edges)
# Try to find ring with its length of num_gpus
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus:
if cfg.verbose:
print('One of the recommended ring having length of %d' % len(cycle))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Try to find ring with its length of num_gpus/2
if not ring_found:
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus/2:
print(("One of the recommended ring having length of %d" % len(cycle) ))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Construct Performance Features
features = pd.DataFrame({'name':['elapsed_time'], 'value':[cfg.elapsed_time]}, columns=['name','value'])
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
if not df_nvsmi.empty and cfg.spotlight_gpu:
state = 0
sm_high = 0
trigger = 10
for i in range(len(df_nvsmi)):
if df_nvsmi.iloc[i].event == 0 and df_nvsmi.iloc[i].deviceId == 0 :
if df_nvsmi.iloc[i].duration >= 50:
sm_high = min(trigger, sm_high + 1)
if df_nvsmi.iloc[i].duration < 10:
sm_high = max(0, sm_high - 1)
if state == 0 and sm_high == trigger:
state = 1
cfg.roi_begin = df_nvsmi.iloc[i].timestamp
elif state == 1 and sm_high == 0:
state = 0
cfg.roi_end = df_nvsmi.iloc[i].timestamp
#print('sm_high=%d state=%d' % (sm_high, state))
if cfg.roi_end - cfg.roi_begin < 0:
cfg.roi_end = 0
cfg.roi_begin = 0
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_cpu = pd.read_csv(filein_cpu)
if not df_cpu.empty:
if cfg.verbose:
cpu_profile(logdir, cfg, df_cpu)
if cfg.enable_swarms and len(df_cpu) > cfg.num_swarms:
df_cpu, swarms = hsg_v2(cfg, df_cpu)
except IOError as e:
df_cpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_cpu)
try:
df_strace = pd.read_csv(filein_strace)
if not df_strace.empty:
features = strace_profile(logdir, cfg, df_strace, features)
except IOError as e:
df_strace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_strace)
try:
df_net = pd.read_csv(filein_net)
if not df_net.empty:
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
if not df_bandwidth.empty:
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
try:
df_blktrace = pd.read_csv(filein_blktrace)
if not df_blktrace.empty:
features = blktrace_latency_profile(logdir, cfg, df_blktrace, features)
except IOError as e:
df_blktrace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_blktrace)
try:
df_diskstat = pd.read_csv(filein_diskstat)
if not df_diskstat.empty:
features = diskstat_profile(logdir, cfg, df_diskstat, features)
except IOError as e:
df_diskstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_diskstat)
try:
df_vmstat = pd.read_csv(filein_vmstat)
if not df_vmstat.empty:
features = vmstat_profile(logdir, cfg, df_vmstat, features)
except IOError as e:
df_vmstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_vmstat)
try:
df_mpstat = pd.read_csv(filein_mpstat)
if not df_mpstat.empty:
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_gpu = pd.read_csv(filein_gpu)
if not df_gpu.empty:
features = gpu_profile(logdir, cfg, df_gpu, features)
except IOError:
df_gpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found. If there is no need to profile GPU, just ignore it." % filein_gpu)
try:
if len(df_mpstat)>0:
df_nvsmi.append(df_mpstat.iloc[0])
features = concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features)
except IOError as e:
print_warning(cfg, "Some files are not found, which are needed for concurrency_breakdown analysis")
if cfg.enable_aisi:
selected_pattern, iter_summary, features = sofa_aisi(logdir, cfg, df_cpu, df_gpu, df_strace, df_mpstat, features)
if 'IS_SOFA_ON_HAIHUB' not in os.environ or os.environ['IS_SOFA_ON_HAIHUB'] == 'no':
print_title('Final Performance Features')
print('%s%s%s%s' % ('ID'.ljust(10),'Feature'.ljust(30),'Value'.ljust(20),'Unit'.ljust(20)) )
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
if cfg.spotlight_gpu:
try:
print('Elapsed hotspot time: %.3lf' % features[features.name=='elapsed_hotspot_time'].value)
except:
print_warning(cfg, 'elpased_hostspot_time is not defined.')
if cfg.potato_server:
if cfg.potato_server.find(':') == -1:
cfg.potato_server = cfg.potato_server + ':50051'
hint, docker_image = get_hint(cfg.potato_server, features)
df_report = pd.read_json(hint, orient='table')
file_potato_report = cfg.logdir + 'potato_report.html'
# Export report to HTML file.
df_report.to_html(file_potato_report )
with open(file_potato_report, 'a') as f:
f.write('<head><link rel=stylesheet type="text/css" href="potato_report.css"></head>')
print_title('POTATO Feedback')
print('%s%s%s%s' % ('ID'.ljust(5), 'Metric'.ljust(20), 'Value'.ljust(10), 'Reference-Value'.ljust(30) ) )
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
value = df_report.iloc[i]['Value']
ref_value = df_report.iloc[i]['ReferenceValue']
print('%s%s%s%s' % (str(i).ljust(5), metric.ljust(20), ('%.3lf'%value).ljust(20), str(ref_value).ljust(30)))
print('\n')
print_hint('General Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
print('\n')
print_hint('Framework-specific Optimization Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric == 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
#print(df_report[['Metric', 'Value', 'Reference Value']])
#print(df_report[['Suggestion']])
#print('Tag of optimal image recommended from POTATO: ' + highlight(docker_image))
print('\n')
print_hint('Please re-launch KubeFlow Jupyter-notebook to have suggested images or resources if necessary.')
sofa_home = os.path.dirname(os.path.realpath(__file__))
subprocess.Popen(
['bash', '-c', 'cp %s/../sofaboard/* %s;' % (sofa_home, cfg.logdir)])
subprocess.Popen(['sleep', '2'])
print('\n\n')
print('Complete!!')
def cluster_analyze(cfg):
if cfg.verbose:
print_title('Cluster Network Profiling :')
cluster = cfg.cluster_ip.split(',')
summary_net = pd.DataFrame([], columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary_compute = pd.DataFrame([], columns=['gpu_sm_util','gpu_mem_util','cpu_util'])
summary_band = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
all = []
for i, ip in enumerate(cluster):
features = pd.DataFrame({'name':['elapsed_time'],
'value':[cfg.elapsed_time]},
columns=['name','value'])
node = 'node ' + str(i)
if cfg.verbose:
print('node ' + str(i) + ' is ' + ip)
logdir = tmp_dir[0:-1] + '-' + ip + '/'
filein_net = logdir + "nettrace.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
try:
df_net = pd.read_csv(filein_net)
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_mpstat = pd.read_csv(filein_mpstat)
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
sm = int(features[features['name'] == 'gpu_sm_util']['value'])
mem = int(features[features['name'] == 'gpu_mem_util']['value'])
cpu = int(features[features['name'] == 'cpu_util']['value'])
sm_mem_cpu = [sm, mem, cpu]
compute_tmp = pd.DataFrame([sm_mem_cpu], columns = ['gpu_sm_util', 'gpu_mem_util', 'cpu_util'])
summary_compute = pd.concat([summary_compute, pd.concat([compute_tmp], keys=[node])])
net_tmp = pd.read_csv(logdir + "netrank.csv")
summary_net = pd.concat([summary_net, pd.concat([net_tmp], keys=[node])])
# for bandwidth report
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
tx_tmp = [convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[tx]['bandwidth'].mean())]
rx_tmp = [convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[rx]['bandwidth'].mean())]
band_tmp = pd.DataFrame([tx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['tx'])
rx_pd = pd.DataFrame([rx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['rx'])
band_tmp = pd.concat([band_tmp, rx_pd])
summary_band = pd.concat([summary_band, pd.concat([band_tmp], keys=[node])])
if cfg.verbose:
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print('Ranked Network Traffic : \n', summary_net, '\n')
print('Cluster Bandwidth Quartile: \n', summary_band)
print_title('Cluster Computation Profiling:')
print(summary_compute)
| apache-2.0 | 536,236,055,129,862,100 | 43.556728 | 259 | 0.541718 | false |
kenmcc/mypywws | src/pywws/Process.py | 1 | 29489 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-14 Jim Easterbrook [email protected]
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Generate hourly, daily & monthly summaries of raw weather station
data
::
%s
This module takes raw weather station data (typically sampled every
five or ten minutes) and generates hourly, daily and monthly summary
data, which is useful when creating tables and graphs.
Before computing the data summaries, raw data is "calibrated" using a
user-programmable function. See :doc:`pywws.calib` for details.
The hourly data is derived from all the records in one hour, e.g. from
18:00:00 to 18:59:59, and is given the index of the last complete
record in that hour.
The daily data summarises the weather over a 24 hour period typically
ending at 2100 or 0900 hours, local (non DST) time, though midnight is
another popular convention. It is also indexed by the last complete
record in the period. Daytime and nightime, as used when computing
maximum and minimum temperatures, are assumed to start at 0900 and
2100 local time, or 1000 and 2200 when DST is in effect, regardless of
the meteorological day.
To adjust the meteorological day to your preference, or that used by
your local official weather station, edit the "day end hour" line in
your ``weather.ini`` file, then run Reprocess.py to regenerate the
summaries.
Monthly summary data is computed from the daily summary data. If the
meteorological day does not end at midnight, then each month may begin
and end up to 12 hours before or after midnight.
Wind speed data is averaged over the hour (or day) and the maximum
gust speed during the hour (or day) is recorded. The predominant wind
direction is calculated using vector arithmetic.
Rainfall is converted from the raw "total since last reset" figure to
a more useful total in the last hour, day or month.
"""
from __future__ import absolute_import
__docformat__ = "restructuredtext en"
__usage__ = """
usage: python -m pywws.Process [options] data_dir
options are:
-h or --help display this help
-v or --verbose increase number of informative messages
data_dir is the root directory of the weather data
"""
__doc__ %= __usage__
__usage__ = __doc__.split('\n')[0] + __usage__
from collections import deque
from datetime import date, datetime, timedelta
import getopt
import logging
import math
import os
import sys
from .calib import Calib
from . import DataStore
from .Logger import ApplicationLogger
from .TimeZone import STDOFFSET, HOUR
SECOND = timedelta(seconds=1)
TIME_ERR = timedelta(seconds=45)
MINUTEx5 = timedelta(minutes=5)
HOURx3 = timedelta(hours=3)
DAY = timedelta(hours=24)
WEEK = timedelta(days=7)
class Average(object):
"""Compute average of multiple data values."""
def __init__(self):
self.acc = 0.0
self.count = 0
def add(self, value):
if value is None:
return
self.acc += value
self.count += 1
def result(self):
if self.count == 0:
return None
return self.acc / float(self.count)
class Minimum(object):
"""Compute minimum value and timestamp of multiple data values."""
def __init__(self):
self.value = None
self.time = None
def add(self, value, time):
if not self.time or value <= self.value:
self.value = value
self.time = time
def result(self):
if self.time:
return self.value, self.time
return None, None
class Maximum(object):
"""Compute maximum value and timestamp of multiple data values."""
def __init__(self):
self.value = None
self.time = None
def add(self, value, time):
if not self.time or value > self.value:
self.value = value
self.time = time
def result(self):
if self.time:
return self.value, self.time
return None, None
sin_LUT = map(
lambda x: math.sin(math.radians(float(x * 360) / 16.0)), range(16))
cos_LUT = map(
lambda x: math.cos(math.radians(float(x * 360) / 16.0)), range(16))
class WindFilter(object):
"""Compute average wind speed and direction.
The wind speed and direction of each data item is converted to a
vector before averaging, so the result reflects the dominant wind
direction during the time period covered by the data.
Setting the ``decay`` parameter converts the filter from a simple
averager to one where the most recent sample carries the highest
weight, and earlier samples have a lower weight according to how
long ago they were.
This process is an approximation of "exponential smoothing". See
`Wikipedia <http://en.wikipedia.org/wiki/Exponential_smoothing>`_
for a detailed discussion.
The parameter ``decay`` corresponds to the value ``(1 - alpha)``
in the Wikipedia description. Because the weather data being
smoothed may not be at regular intervals this parameter is the
decay over 5 minutes. Weather data at other intervals will have
its weight scaled accordingly.
The return value is a (speed, direction) tuple.
:param decay: filter coefficient decay rate.
:type decay: float
:rtype: (float, float)
"""
def __init__(self, decay=1.0):
self.decay = decay
self.Ve = 0.0
self.Vn = 0.0
self.total = 0.0
self.weight = 1.0
self.total_weight = 0.0
self.last_idx = None
def add(self, data):
direction = data['wind_dir']
speed = data['wind_ave']
if direction is None or speed is None:
return
if self.last_idx and self.decay != 1.0:
interval = data['idx'] - self.last_idx
assert interval.days == 0
decay = self.decay
if interval != MINUTEx5:
decay = decay ** (float(interval.seconds) /
float(MINUTEx5.seconds))
self.weight = self.weight / decay
self.last_idx = data['idx']
speed = speed * self.weight
if isinstance(direction, int):
self.Ve -= speed * sin_LUT[direction]
self.Vn -= speed * cos_LUT[direction]
else:
direction = math.radians(float(direction) * 22.5)
self.Ve -= speed * math.sin(direction)
self.Vn -= speed * math.cos(direction)
self.total += speed
self.total_weight += self.weight
def result(self):
if self.total_weight == 0.0:
return (None, None)
return (self.total / self.total_weight,
(math.degrees(math.atan2(self.Ve, self.Vn)) + 180.0) / 22.5)
class HourAcc(object):
"""'Accumulate' raw weather data to produce hourly summary.
Compute average wind speed and maximum wind gust, find dominant
wind direction and compute total rainfall.
"""
def __init__(self, last_rain):
self.logger = logging.getLogger('pywws.Process.HourAcc')
self.last_rain = last_rain
self.copy_keys = ['idx', 'hum_in', 'temp_in', 'hum_out', 'temp_out',
'abs_pressure', 'rel_pressure', 'temp_bedroom','temp_kitchen', "temp_bed2"]
self.reset()
def reset(self):
self.wind_fil = WindFilter()
self.wind_gust = (-2.0, None)
self.rain = 0.0
self.retval = {'idx' : None, 'temp_out' : None}
def add_raw(self, data):
idx = data['idx']
self.wind_fil.add(data)
wind_gust = data['wind_gust']
if wind_gust is not None and wind_gust > self.wind_gust[0]:
self.wind_gust = (wind_gust, idx)
rain = data['rain']
if rain is not None:
if self.last_rain is not None:
diff = rain - self.last_rain
if diff < -0.001:
self.logger.warning(
'%s rain reset %.1f -> %.1f', str(idx), self.last_rain, rain)
elif diff > float(data['delay'] * 5):
# rain exceeds 5mm / minute, assume corrupt data and ignore it
self.logger.warning(
'%s rain jump %.1f -> %.1f', str(idx), self.last_rain, rain)
else:
self.rain += max(0.0, diff)
self.last_rain = rain
# copy some current readings
if 'illuminance' in data and not 'illuminance' in self.copy_keys:
self.copy_keys.append('illuminance')
self.copy_keys.append('uv')
# if near the end of the hour, ignore 'lost contact' readings
if (data['idx'].minute < 45 or data['temp_out'] is not None or
self.retval['temp_out'] is None):
for key in self.copy_keys:
if key in data:
self.retval[key] = data[key]
def result(self):
if not self.retval['idx']:
return None
self.retval['wind_ave'], self.retval['wind_dir'] = self.wind_fil.result()
if self.wind_gust[1]:
self.retval['wind_gust'] = self.wind_gust[0]
else:
self.retval['wind_gust'] = None
self.retval['rain'] = self.rain
return self.retval
class DayAcc(object):
"""'Accumulate' weather data to produce daily summary.
Compute average wind speed, maximum wind gust and daytime max &
nighttime min temperatures, find dominant wind direction and
compute total rainfall.
Daytime is assumed to be 0900-2100 and nighttime to be 2100-0900,
local time (1000-2200 and 2200-1000 during DST), regardless of the
"day end hour" setting.
"""
def __init__(self):
self.logger = logging.getLogger('pywws.Process.DayAcc')
self.has_illuminance = False
self.ave = {}
self.max = {}
self.min = {}
self.reset()
def reset(self):
self.wind_fil = WindFilter()
self.wind_gust = (-1.0, None)
self.rain = 0.0
for i in ('temp_in', 'temp_out', 'hum_in', 'hum_out',
'abs_pressure', 'rel_pressure', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
self.ave[i] = Average()
self.max[i] = Maximum()
self.min[i] = Minimum()
for i in ('illuminance', 'uv'):
self.ave[i] = Average()
self.max[i] = Maximum()
self.retval = dict()
def add_raw(self, data):
idx = data['idx']
local_hour = (idx + STDOFFSET).hour
wind_gust = data['wind_gust']
if wind_gust is not None and wind_gust > self.wind_gust[0]:
self.wind_gust = (wind_gust, idx)
for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
#if i in data:
try:
temp = data[i]
except:
temp = 0
if temp is not None:
self.ave[i].add(temp)
if local_hour >= 9 and local_hour < 21:
# daytime max temperature
self.max[i].add(temp, idx)
else:
# nighttime min temperature
self.min[i].add(temp, idx)
for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'):
if i in data:
value = data[i]
if value is not None:
self.ave[i].add(value)
self.max[i].add(value, idx)
self.min[i].add(value, idx)
if 'illuminance' in data:
self.has_illuminance = True
for i in ('illuminance', 'uv'):
if i in data:
value = data[i]
if value is not None:
self.ave[i].add(value)
self.max[i].add(value, idx)
def add_hourly(self, data):
self.wind_fil.add(data)
rain = data['rain']
if rain is not None:
self.rain += rain
self.retval['idx'] = data['idx']
def result(self):
if not self.retval:
return None
self.retval['wind_ave'], self.retval['wind_dir'] = self.wind_fil.result()
if self.wind_gust[1]:
self.retval['wind_gust'] = self.wind_gust[0]
else:
self.retval['wind_gust'] = None
self.retval['wind_gust_t'] = self.wind_gust[1]
self.retval['rain'] = self.rain
for i in ('temp_in', 'temp_out', 'hum_in', 'hum_out',
'abs_pressure', 'rel_pressure', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
self.retval['%s_ave' % i] = self.ave[i].result()
(self.retval['%s_max' % i],
self.retval['%s_max_t' % i]) = self.max[i].result()
(self.retval['%s_min' % i],
self.retval['%s_min_t' % i]) = self.min[i].result()
if self.has_illuminance:
for i in ('illuminance', 'uv'):
self.retval['%s_ave' % i] = self.ave[i].result()
(self.retval['%s_max' % i],
self.retval['%s_max_t' % i]) = self.max[i].result()
return self.retval
class MonthAcc(object):
"""'Accumulate' daily weather data to produce monthly summary.
Compute daytime max & nighttime min temperatures.
"""
def __init__(self, rain_day_threshold):
self.rain_day_threshold = rain_day_threshold
self.has_illuminance = False
self.ave = {}
self.min = {}
self.max = {}
self.min_lo = {}
self.min_hi = {}
self.min_ave = {}
self.max_lo = {}
self.max_hi = {}
self.max_ave = {}
self.reset()
def reset(self):
for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
self.ave[i] = Average()
self.min_lo[i] = Minimum()
self.min_hi[i] = Maximum()
self.min_ave[i] = Average()
self.max_lo[i] = Minimum()
self.max_hi[i] = Maximum()
self.max_ave[i] = Average()
for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'):
self.ave[i] = Average()
self.max[i] = Maximum()
self.min[i] = Minimum()
for i in ('illuminance', 'uv'):
self.ave[i] = Average()
self.max_lo[i] = Minimum()
self.max_hi[i] = Maximum()
self.max_ave[i] = Average()
self.wind_fil = WindFilter()
self.wind_gust = (-1.0, None)
self.rain = 0.0
self.rain_days = 0
self.valid = False
def add_daily(self, data):
self.idx = data['idx']
for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
try:
temp = data['%s_ave' % i]
except:
temp = 0
if temp is not None:
self.ave[i].add(temp)
try:
temp = data['%s_min' % i]
except:
temp = 0
if temp is not None:
try:
self.min_lo[i].add(temp, data['%s_min_t' % i])
except:
self.min_lo[i].add(temp, 0)
try:
self.min_hi[i].add(temp, data['%s_min_t' % i])
except:
self.min_hi[i].add(temp, 0)
self.min_ave[i].add(temp)
try:
temp = data['%s_max' % i]
except:
temp = 0
if temp is not None:
try:
self.max_lo[i].add(temp, data['%s_max_t' % i])
except:
self.max_lo[i].add(temp, 0)
try:
self.max_hi[i].add(temp, data['%s_max_t' % i])
except:
self.max_hi[i].add(temp, 0)
self.max_ave[i].add(temp)
for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'):
value = data['%s_ave' % i]
if value is not None:
self.ave[i].add(value)
value = data['%s_min' % i]
if value is not None:
self.min[i].add(value, data['%s_min_t' % i])
value = data['%s_max' % i]
if value is not None:
self.max[i].add(value, data['%s_max_t' % i])
self.wind_fil.add(data)
wind_gust = data['wind_gust']
if wind_gust is not None and wind_gust > self.wind_gust[0]:
self.wind_gust = (wind_gust, data['wind_gust_t'])
if 'illuminance_ave' in data:
self.has_illuminance = True
for i in ('illuminance', 'uv'):
value = data['%s_ave' % i]
if value is not None:
self.ave[i].add(value)
value = data['%s_max' % i]
if value is not None:
self.max_lo[i].add(value, data['%s_max_t' % i])
self.max_hi[i].add(value, data['%s_max_t' % i])
self.max_ave[i].add(value)
self.rain += data['rain']
if data['rain'] >= self.rain_day_threshold:
self.rain_days += 1
self.valid = True
def result(self):
if not self.valid:
return None
result = {}
result['idx'] = self.idx
result['rain'] = self.rain
result['rain_days'] = self.rain_days
for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
result['%s_ave' % i] = self.ave[i].result()
result['%s_min_ave' % i] = self.min_ave[i].result()
(result['%s_min_lo' % i],
result['%s_min_lo_t' % i]) = self.min_lo[i].result()
(result['%s_min_hi' % i],
result['%s_min_hi_t' % i]) = self.min_hi[i].result()
result['%s_max_ave' % i] = self.max_ave[i].result()
(result['%s_max_lo' % i],
result['%s_max_lo_t' % i]) = self.max_lo[i].result()
(result['%s_max_hi' % i],
result['%s_max_hi_t' % i]) = self.max_hi[i].result()
for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'):
result['%s_ave' % i] = self.ave[i].result()
(result['%s_max' % i],
result['%s_max_t' % i]) = self.max[i].result()
(result['%s_min' % i],
result['%s_min_t' % i]) = self.min[i].result()
result['wind_ave'], result['wind_dir'] = self.wind_fil.result()
if self.wind_gust[1]:
result['wind_gust'] = self.wind_gust[0]
else:
result['wind_gust'] = None
result['wind_gust_t'] = self.wind_gust[1]
if self.has_illuminance:
for i in ('illuminance', 'uv'):
result['%s_ave' % i] = self.ave[i].result()
result['%s_max_ave' % i] = self.max_ave[i].result()
(result['%s_max_lo' % i],
result['%s_max_lo_t' % i]) = self.max_lo[i].result()
(result['%s_max_hi' % i],
result['%s_max_hi_t' % i]) = self.max_hi[i].result()
return result
def calibrate_data(logger, params, raw_data, calib_data):
"""'Calibrate' raw data, using a user-supplied function."""
start = calib_data.before(datetime.max)
if start is None:
start = datetime.min
before = raw_data.before(start)
start = raw_data.after(start)# + SECOND)
if start is None and before is None:
return start
else:
start = before
del calib_data[start:]
calibrator = Calib(params, raw_data)
count = 0
for data in raw_data[start:]:
idx = data['idx']
count += 1
if count % 10000 == 0:
logger.info("calib: %s", idx.isoformat(' '))
elif count % 500 == 0:
logger.debug("calib: %s", idx.isoformat(' '))
calib_data[idx] = calibrator.calib(data)
return start
def generate_hourly(logger, calib_data, hourly_data, process_from):
"""Generate hourly summaries from calibrated data."""
start = hourly_data.before(datetime.max)
if start is None:
start = datetime.min
start = calib_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
# set start of hour in local time (not all time offsets are integer hours)
start += STDOFFSET + timedelta(minutes=5)
start = start.replace(minute=0, second=0)
start -= STDOFFSET
#del hourly_data[start:]
# preload pressure history, and find last valid rain
prev = None
pressure_history = deque()
last_rain = None
for data in calib_data[start - HOURx3:start]:
if data['rel_pressure']:
pressure_history.append((data['idx'], data['rel_pressure']))
if data['rain'] is not None:
last_rain = data['rain']
prev = data
# iterate over data in one hour chunks
stop = calib_data.before(datetime.max)
hour_start = start
acc = HourAcc(last_rain)
count = 0
while hour_start <= stop:
count += 1
if count % 1008 == 0:
logger.info("hourly: %s", hour_start.isoformat(' '))
elif count % 24 == 0:
logger.debug("hourly: %s", hour_start.isoformat(' '))
hour_end = hour_start + HOUR
acc.reset()
for data in calib_data[hour_start:hour_end]:
if data['rel_pressure']:
pressure_history.append((data['idx'], data['rel_pressure']))
if prev:
err = data['idx'] - prev['idx']
#if abs(err - timedelta(minutes=data['delay'])) > TIME_ERR:
# logger.info('unexpected data interval %s %s',
# data['idx'].isoformat(' '), str(err))
acc.add_raw(data)
prev = data
new_data = acc.result()
if new_data and new_data['idx'].minute >= 1: # was 9
# compute pressure trend
new_data['pressure_trend'] = None
if new_data['rel_pressure']:
target = new_data['idx'] - HOURx3
while (len(pressure_history) >= 2 and
abs(pressure_history[0][0] - target) >
abs(pressure_history[1][0] - target)):
pressure_history.popleft()
if (pressure_history and
abs(pressure_history[0][0] - target) < HOUR):
new_data['pressure_trend'] = (
new_data['rel_pressure'] - pressure_history[0][1])
# store new hourly data
t = new_data['idx']# + timedelta(minutes=5)
# round up to the next hour
t = t +timedelta(minutes=60)
t = t.replace(minute=0, second=0)
print "INDEX:", t
new_data['idx'] = t
hourly_data[t] = new_data
hour_start = hour_end
return start
def generate_daily(logger, day_end_hour,
calib_data, hourly_data, daily_data, process_from):
"""Generate daily summaries from calibrated and hourly data."""
start = daily_data.before(datetime.max)
if start is None:
start = datetime.min
start = calib_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
# round to start of this day, in local time
start += STDOFFSET
if start.hour < day_end_hour:
start = start - DAY
start = start.replace(hour=day_end_hour, minute=0, second=0)
start -= STDOFFSET
del daily_data[start:]
stop = calib_data.before(datetime.max)
day_start = start
acc = DayAcc()
count = 0
while day_start <= stop:
count += 1
if count % 30 == 0:
logger.info("daily: %s", day_start.isoformat(' '))
else:
logger.debug("daily: %s", day_start.isoformat(' '))
day_end = day_start + DAY
acc.reset()
for data in calib_data[day_start:day_end]:
acc.add_raw(data)
for data in hourly_data[day_start:day_end]:
acc.add_hourly(data)
new_data = acc.result()
if new_data:
new_data['start'] = day_start
daily_data[new_data['idx']] = new_data
day_start = day_end
return start
def generate_monthly(logger, rain_day_threshold, day_end_hour,
daily_data, monthly_data, process_from):
"""Generate monthly summaries from daily data."""
start = monthly_data.before(datetime.max)
if start is None:
start = datetime.min
start = daily_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
# set start to start of first day of month (local time)
start += STDOFFSET
start = start.replace(day=1, hour=day_end_hour, minute=0, second=0)
if day_end_hour >= 12:
# month actually starts on the last day of previous month
start -= DAY
start -= STDOFFSET
del monthly_data[start:]
stop = daily_data.before(datetime.max)
month_start = start
acc = MonthAcc(rain_day_threshold)
count = 0
while month_start <= stop:
count += 1
if count % 12 == 0:
logger.info("monthly: %s", month_start.isoformat(' '))
else:
logger.debug("monthly: %s", month_start.isoformat(' '))
month_end = month_start + WEEK
if month_end.month < 12:
month_end = month_end.replace(month=month_end.month+1)
else:
month_end = month_end.replace(month=1, year=month_end.year+1)
month_end = month_end - WEEK
acc.reset()
for data in daily_data[month_start:month_end]:
acc.add_daily(data)
new_data = acc.result()
if new_data:
new_data['start'] = month_start
monthly_data[new_data['idx']] = new_data
month_start = month_end
return start
def Process(params,
raw_data, calib_data, hourly_data, daily_data, monthly_data):
"""Generate summaries from raw weather station data.
The meteorological day end (typically 2100 or 0900 local time) is
set in the preferences file ``weather.ini``. The default value is
2100 (2200 during DST), following the historical convention for
weather station readings.
"""
logger = logging.getLogger('pywws.Process')
logger.info('Generating summary data')
# get time of last record
last_raw = raw_data.before(datetime.max)
print "LAST RAW is ", last_raw
if last_raw is None:
raise IOError('No data found. Check data directory parameter.')
# get daytime end hour (in local time)
day_end_hour = eval(params.get('config', 'day end hour', '21')) % 24
# get other config
rain_day_threshold = eval(params.get('config', 'rain day threshold', '0.2'))
# calibrate raw data
start = calibrate_data(logger, params, raw_data, calib_data)
# generate hourly data
print "Generating hourly data from ", start
start = generate_hourly(logger, calib_data, hourly_data, start)
# generate daily data
start = generate_daily(logger, day_end_hour,
calib_data, hourly_data, daily_data, start)
# generate monthly data
generate_monthly(logger, rain_day_threshold, day_end_hour,
daily_data, monthly_data, start)
return 0
def main(argv=None):
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "hv", ['help', 'verbose'])
except getopt.error, msg:
print >>sys.stderr, 'Error: %s\n' % msg
print >>sys.stderr, __usage__.strip()
return 1
# process options
verbose = 0
for o, a in opts:
if o in ('-h', '--help'):
print __usage__.strip()
return 0
elif o in ('-v', '--verbose'):
verbose += 1
# check arguments
if len(args) != 1:
print >>sys.stderr, 'Error: 1 argument required\n'
print >>sys.stderr, __usage__.strip()
return 2
logger = ApplicationLogger(verbose)
data_dir = args[0]
return Process(DataStore.params(data_dir),
DataStore.data_store(data_dir),
DataStore.calib_store(data_dir),
DataStore.hourly_store(data_dir),
DataStore.daily_store(data_dir),
DataStore.monthly_store(data_dir))
if __name__ == "__main__":
sys.exit(main())
| gpl-2.0 | -5,708,486,361,000,318,000 | 35.953634 | 101 | 0.555733 | false |
xolox/python-linux-utils | linux_utils/tabfile.py | 1 | 2307 | # linux-utils: Linux system administration tools for Python.
#
# Author: Peter Odding <[email protected]>
# Last Change: February 9, 2020
# URL: https://linux-utils.readthedocs.io
"""Generic parsing of Linux configuration files like ``/etc/fstab`` and ``/etc/crypttab``."""
# Standard library modules.
import re
# External dependencies.
from property_manager import PropertyManager, mutable_property
# Modules included in our package.
from linux_utils import coerce_context
# Public identifiers that require documentation.
__all__ = (
'TabFileEntry',
'parse_tab_file',
)
def parse_tab_file(filename, context=None, encoding='UTF-8'):
"""
Parse a Linux configuration file like ``/etc/fstab`` or ``/etc/crypttab``.
:param filename: The absolute pathname of the file to parse (a string).
:param context: See :func:`.coerce_context()` for details.
:param encoding: The name of the text encoding of the file (a string).
:returns: A generator of :class:`TabFileEntry` objects.
This function strips comments (the character ``#`` until the end of
the line) and splits each line into tokens separated by whitespace.
"""
context = coerce_context(context)
contents = context.read_file(filename).decode(encoding)
for line_number, line in enumerate(contents.splitlines(), start=1):
# Strip comments.
line = re.sub('#.*', '', line)
# Tokenize input.
tokens = line.split()
if tokens:
yield TabFileEntry(
context=context,
configuration_file=filename,
line_number=line_number,
tokens=tokens,
)
class TabFileEntry(PropertyManager):
"""Container for the results of :func:`parse_tab_file()`."""
@mutable_property
def context(self):
"""The execution context from which the configuration file was retrieved."""
@mutable_property
def configuration_file(self):
"""The name of the configuration file from which this entry was parsed (a string)."""
@mutable_property
def line_number(self):
"""The line number from which this entry was parsed (an integer)."""
@mutable_property
def tokens(self):
"""The tokens split on whitespace (a nonempty list of strings)."""
| mit | 3,480,510,046,944,304,600 | 31.492958 | 93 | 0.662765 | false |
sassoftware/rmake3 | rmake/worker/resolvesource.py | 1 | 30654 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
from conary.deps import deps
from conary.local import deptable
from conary.conaryclient import resolve
from conary.repository import trovesource
from rmake.lib import flavorutil
class TroveSourceMesh(trovesource.SearchableTroveSource):
def __init__(self, extraSource, mainSource, repos):
trovesource.SearchableTroveSource.__init__(self)
self.extraSource = extraSource
self.mainSource = mainSource
self.repos = repos
trovesource.SearchableTroveSource.__init__(self)
self.searchAsRepository()
for source in self.mainSource, self.repos, self.extraSource:
if not source:
continue
self._allowNoLabel = source._allowNoLabel
self._bestFlavor = source._bestFlavor
self._getLeavesOnly = source._getLeavesOnly
self._flavorCheck = source._flavorCheck
break
self.sources = [ self.extraSource]
if self.mainSource:
self.sources.append(self.mainSource)
if self.repos:
self.sources.append(self.repos)
def __getattr__(self, key):
if self.repos:
return getattr(self.repos, key)
return getattr(self.mainSource, key)
def getFileVersions(self, *args, **kw):
if self.repos:
return self.repos.getFileVersions(*args, **kw)
return self.mainSource.getFileVersions(*args, **kw)
def close(self):
pass
def hasTroves(self, troveList):
if self.repos:
results = self.repos.hasTroves(troveList)
if isinstance(results, dict):
results = [ results[x] for x in troveList ]
else:
results = [ False for x in troveList ]
if self.extraSource:
hasTroves = self.extraSource.hasTroves(troveList)
results = [ x[0] or x[1] for x in itertools.izip(results,
hasTroves) ]
if self.mainSource:
hasTroves = self.mainSource.hasTroves(troveList)
results = [ x[0] or x[1] for x in itertools.izip(results,
hasTroves) ]
return dict(itertools.izip(troveList, results))
def trovesByName(self, name):
if self.mainSource:
return list(set(self.mainSource.trovesByName(name))
| set(self.extraSource.trovesByName(name)))
else:
return self.extraSource.trovesByName(name)
def getTroves(self, troveList, *args, **kw):
if self.repos:
return self.repos.getTroves(troveList, *args, **kw)
else:
return self.mainSource.getTroves(troveList, *args, **kw)
def _mergeTroveQuery(self, resultD, response):
if isinstance(resultD, dict):
for troveName, troveVersions in response.iteritems():
if not resultD.has_key(troveName):
resultD[troveName] = {}
versionDict = resultD[troveName]
for version, flavors in troveVersions.iteritems():
if version not in versionDict:
versionDict[version] = []
resultD[troveName][version].extend(flavors)
else:
if not resultD:
for resultList in response:
resultD.append(list(resultList))
else:
for idx, resultList in enumerate(response):
resultD[idx].extend(resultList)
return resultD
def _mergeListTroveQuery(self, resultList, result2, altFlavors, altFlavors2,
map, query):
newMap = []
newQuery = []
for idx, items in enumerate(result2):
if not items:
newMap.append(map[idx])
newQuery.append(query[idx])
if altFlavors2:
altFlavors[map[idx]].extend(altFlavors2[idx])
else:
resultList[map[idx]].extend(items)
altFlavors[map[idx]] = []
return newMap, newQuery
def _call(self, fn, query, *args, **kw):
if not isinstance(query, dict):
query = list(query)
result, altFlavors = getattr(self.extraSource, fn)(query,
*args, **kw)
map = []
newQuery = []
for idx, item in enumerate(result):
if not item:
map.append(idx)
newQuery.append(query[idx])
if self.mainSource:
result2, altFlavors2 = getattr(self.mainSource, fn)(newQuery,
*args, **kw)
newQuery, map = self._mergeListTroveQuery(result, result2,
altFlavors,
altFlavors2,
map, newQuery)
if self.repos:
result3, altFlavors3 = getattr(self.repos, fn)(newQuery,
*args, **kw)
newQuery, map = self._mergeListTroveQuery(result, result3,
altFlavors,
altFlavors3,
map, newQuery)
result = result, altFlavors
else:
query = dict(query)
d1 = getattr(self.extraSource, fn)(query, *args, **kw)
result = {}
self._mergeTroveQuery(result, d1)
for name in result:
query.pop(name)
if self.mainSource:
d2 = getattr(self.mainSource, fn)(query, *args, **kw)
self._mergeTroveQuery(result, d2)
if self.repos:
d3 = getattr(self.repos, fn)(query, *args, **kw)
self._mergeTroveQuery(result, d3)
return result
def _addLabelsToQuery(self, query):
if isinstance(query, dict):
newQuery = query.copy()
names = query
for name in query:
labels = set(x[1].trailingLabel() for x in
self.extraSource.trovesByName(name))
#asserts there is only one flavorList
flavorList, = set(x and tuple(x) for x in query[name].values())
for label in labels:
if label not in query[name]:
newQuery[name][label] = flavorList
map = None
else:
map = {}
newQuery = list(query)
names = [(x[0], x[1][0], x[1][2]) for x in enumerate(query)]
for idx, name, flavor in names:
labels = set(x[1].trailingLabel() for x in
self.extraSource.trovesByName(name))
for label in labels:
map[len(newQuery)] = idx
newQuery.append((name, label, flavor))
return newQuery, map
def _compressResults(self, results, map):
if map is None:
return results
results, altFlavors = results
finalResults = []
for idx, result in enumerate(results):
if idx in map:
if result:
finalResults[map[idx]].extend(result)
altFlavors[map[idx]] = []
else:
altFlavors[map[idx]].extend(altFlavors)
else:
finalResults.append(result)
return finalResults, altFlavors
def getTroveLatestByLabel(self, query, *args, **kw):
map = None
if self.expandLabelQueries:
query, map = self._addLabelsToQuery(query)
results = self._call('getTroveLatestByLabel', query, *args, **kw)
return self._compressResults(results, map)
def getTroveLeavesByLabel(self, query, *args, **kw):
map = None
if self.expandLabelQueries:
query, map = self._addLabelsToQuery(query)
results = self._call('getTroveLeavesByLabel', query, *args, **kw)
return self._compressResults(results, map)
def getTroveVersionsByLabel(self, query, *args, **kw):
map = None
if self.expandLabelQueries:
query, map = self._addLabelsToQuery(query)
results = self._call('getTroveVersionsByLabel', query, *args, **kw)
return self._compressResults(results, map)
def getTroveLeavesByBranch(self, query, *args, **kw):
return self._call('getTroveLeavesByBranch', query, *args, **kw)
def getTroveVersionsByBranch(self, query, *args, **kw):
return self._call('getTroveVersionsByBranch', query, *args, **kw)
def getTroveVersionFlavors(self, query, *args, **kw):
return self._call('getTroveVersionFlavors', query, *args, **kw)
def findTroves(self, labelPath, troveSpecs, defaultFlavor=None,
acrossLabels=False, acrossFlavors=False,
affinityDatabase=None, allowMissing=False,
bestFlavor=None, getLeaves=None,
troveTypes=trovesource.TROVE_QUERY_PRESENT,
exactFlavors=False,
**kw):
if self.mainSource is None:
return trovesource.SearchableTroveSource.findTroves(self,
labelPath, troveSpecs,
defaultFlavor=defaultFlavor,
acrossLabels=acrossLabels,
acrossFlavors=acrossFlavors,
affinityDatabase=affinityDatabase,
troveTypes=troveTypes,
exactFlavors=exactFlavors,
allowMissing=True,
**kw)
results = {}
if bestFlavor is not None:
kw.update(bestFlavor=bestFlavor)
if getLeaves is not None:
kw.update(getLeaves=getLeaves)
for source in self.sources:
if source == self.repos:
# we need the labelPath for repos, otherwise
# we allow other algorithms to determine which
# version of a particular trove to use - the same ones
# used during dep resolution. Sometimes this will not
# be a package on the ILP.
searchLabelPath = labelPath
else:
searchLabelPath = None
foundTroves = source.findTroves(searchLabelPath, troveSpecs,
defaultFlavor=defaultFlavor,
acrossLabels=acrossLabels,
acrossFlavors=acrossFlavors,
affinityDatabase=affinityDatabase,
troveTypes=troveTypes,
exactFlavors=exactFlavors,
allowMissing=True,
**kw)
for troveSpec, troveTups in foundTroves.iteritems():
results.setdefault(troveSpec, []).extend(troveTups)
if not allowMissing:
for troveSpec in troveSpecs:
assert(troveSpec in results)
return results
def resolveDependencies(self, label, depList, *args, **kw):
sugg = self.extraSource.resolveDependencies(label, depList, *args, **kw)
sugg2 = self.repos.resolveDependencies(label, depList, *args, **kw)
for depSet, trovesByDep in sugg.iteritems():
for idx, troveList in enumerate(trovesByDep):
if not troveList:
troveList.extend(sugg2[depSet][idx])
return sugg
def resolveDependenciesByGroups(self, troveList, depList):
sugg = self.extraSource.resolveDependencies(None, depList)
sugg2 = self.repos.resolveDependenciesByGroups(troveList, depList)
for depSet, trovesByDep in sugg.iteritems():
for idx, troveList in enumerate(trovesByDep):
if not troveList:
troveList.extend(sugg2[depSet][idx])
return sugg
class DepHandlerSource(TroveSourceMesh):
def __init__(self, builtTroveSource, troveListList, repos=None,
useInstallLabelPath=True, expandLabelQueries=False):
if repos:
flavorPrefs = repos._flavorPreferences
else:
flavorPrefs = []
stack = trovesource.TroveSourceStack()
stack.searchWithFlavor()
stack.setFlavorPreferenceList(flavorPrefs)
self.setFlavorPreferenceList(flavorPrefs)
self.expandLabelQueries = expandLabelQueries
self.resolveTroveSource = None
if isinstance(troveListList, trovesource.SimpleTroveSource):
troveListList.setFlavorPreferenceList(flavorPrefs)
self.stack.addSource(troveListList)
self.resolveTroveSource = troveListList
else:
if troveListList:
for troveList in troveListList:
allTroves = [ x.getNameVersionFlavor() for x in troveList ]
childTroves = itertools.chain(*
(x.iterTroveList(weakRefs=True,
strongRefs=True)
for x in troveList))
allTroves.extend(childTroves)
source = trovesource.SimpleTroveSource(allTroves)
source.searchWithFlavor()
source.setFlavorPreferenceList(flavorPrefs)
stack.addSource(source)
self.resolveTroveSource = stack
if not useInstallLabelPath:
repos = None
if not stack.sources:
stack = None
TroveSourceMesh.__init__(self, builtTroveSource, stack, repos)
def __repr__(self):
return 'DepHandlerSource(%r,%r,%r)' % (self.extraSource, self.mainSource, self.repos)
def copy(self):
inst = self.__class__(self.source, None, self.repos)
inst.repos = self.repos
return inst
class BuiltTroveSource(trovesource.SimpleTroveSource):
"""
Trove source that is used for dep resolution and buildreq satisfaction
only - it does not contain references to the changesets that are added
"""
def __init__(self, troves, repos):
self.depDb = deptable.DependencyDatabase()
trovesource.SimpleTroveSource.__init__(self)
self.setFlavorPreferenceList(repos._flavorPreferences)
self.idMap = []
self.idx = 0
for trove in troves:
self.addTrove(trove.getNameVersionFlavor(), trove.getProvides(),
trove.getRequires())
self.searchWithFlavor()
def close(self):
self.depDb.db.close()
def __del__(self):
self.depDb.db.close()
def addTrove(self, troveTuple, provides, requires):
self._trovesByName.setdefault(troveTuple[0],set()).add(troveTuple)
self.idMap.append(troveTuple)
self.depDb.add(self.idx, provides, requires)
self.idx += 1
def addChangeSet(self, cs):
for idx, trvCs in enumerate(cs.iterNewTroveList()):
self.addTrove(trvCs.getNewNameVersionFlavor(), trvCs.getProvides(),
trvCs.getRequires())
def resolveDependencies(self, label, depList, leavesOnly=False):
suggMap = self.depDb.resolve(label, depList)
for depSet, solListList in suggMap.iteritems():
newSolListList = []
for solList in solListList:
if not self._allowNoLabel and label:
newSolListList.append([ self.idMap[x] for x in solList if self.idMap[x][1].trailingLabel == label])
else:
newSolListList.append([ self.idMap[x] for x in solList ])
suggMap[depSet] = newSolListList
return suggMap
class ResolutionMesh(resolve.BasicResolutionMethod):
def __init__(self, cfg, extraMethod, mainMethod):
resolve.BasicResolutionMethod.__init__(self, cfg, None)
self.extraMethod = extraMethod
self.mainMethod = mainMethod
def prepareForResolution(self, depList):
self.depList = [ x[1] for x in depList]
self.extraMethod.prepareForResolution(depList)
return self.mainMethod.prepareForResolution(depList)
def resolveDependencies(self):
suggMap = self.extraMethod.resolveDependencies()
suggMap2 = self.mainMethod.resolveDependencies()
for depSet in self.depList:
if depSet not in suggMap:
suggMap[depSet] = [[] for x in depSet.iterDeps() ]
if depSet not in suggMap2:
suggMap2[depSet] = [[] for x in depSet.iterDeps() ]
for depSet, results in suggMap.iteritems():
mainResults = suggMap2[depSet]
for troveList1, troveList2 in itertools.izip(results, mainResults):
troveList2.extend(troveList1)
return suggMap2
def searchLeavesOnly(self):
self.extraMethod.searchLeavesOnly()
self.mainMethod.searchLeavesOnly()
def searchLeavesFirst(self):
self.extraMethod.searchLeavesFirst()
self.mainMethod.searchLeavesFirst()
def searchAllVersions(self):
self.extraMethod.searchAllVersions()
self.mainMethod.searchAllVersions()
def selectResolutionTrove(self, requiredBy, dep, depClass,
troveTups, installFlavor, affFlavorDict):
"""
determine which of the given set of troveTups is the
best choice for installing on this system. Because the
repository didn't try to determine which flavors are best for
our system, we have to filter the troves locally.
"""
#NOTE: this method should be a match exactly for the one in
# conary.repository.resolvemethod for conary 1.2 and later.
# when we drop support for earlier conary's we can drop this method.
# we filter the troves in the following ways:
# 1. prefer troves that match affinity flavor + are on the affinity
# label. (And don't drop an arch)
# 2. fall back to troves that match the install flavor.
# If we don't match an affinity flavor + label, then use flavor
# preferences and flavor scoring to select the best flavor.
# We'll have to check
# Within these two categories:
# 1. filter via flavor preferences for each trove (this may result
# in an older version for some troves)
# 2. only leave the latest version for each trove
# 3. pick the best flavor out of the remaining
affinityMatches = []
affinityFlavors = []
otherMatches = []
otherFlavors = []
if installFlavor is not None and not installFlavor.isEmpty():
flavoredList = []
for troveTup in troveTups:
label = troveTup[1].trailingLabel()
affTroves = affFlavorDict[troveTup[0]]
found = False
if affTroves:
for affName, affVersion, affFlavor in affTroves:
if affVersion.trailingLabel() != label:
continue
newFlavor = deps.overrideFlavor(installFlavor,
affFlavor,
mergeType=deps.DEP_MERGE_TYPE_PREFS)
# implement never drop an arch for dep resolution
currentArch = deps.getInstructionSetFlavor(affFlavor)
if not troveTup[2].stronglySatisfies(currentArch):
continue
if newFlavor.satisfies(troveTup[2]):
affinityMatches.append((newFlavor, troveTup))
affinityFlavors.append(troveTup[2])
found = True
if not found and not affinityMatches:
if installFlavor.satisfies(troveTup[2]):
otherMatches.append((installFlavor, troveTup))
otherFlavors.append(troveTup[2])
else:
otherMatches = [ (None, x) for x in troveTups ]
otherFlavors = [x[2] for x in troveTups]
if affinityMatches:
allFlavors = affinityFlavors
flavoredList = affinityMatches
else:
allFlavors = otherFlavors
flavoredList = otherMatches
# Now filter by flavor preferences.
newFlavors = []
if self.flavorPreferences:
for flavor in self.flavorPreferences:
for trvFlavor in allFlavors:
if trvFlavor.stronglySatisfies(flavor):
newFlavors.append(trvFlavor)
if newFlavors:
break
if newFlavors:
flavoredList = [ x for x in flavoredList if x[1][2] in newFlavors ]
return self._selectMatchingResolutionTrove(requiredBy, dep,
depClass, flavoredList)
def _selectMatchingResolutionTrove(self, requiredBy, dep, depClass,
flavoredList):
# this function should be an exact match of
# resolvemethod._selectMatchingResolutionTrove from conary 1.2 and
# later.
# finally, filter by latest then score.
trovesByNL = {}
for installFlavor, (n,v,f) in flavoredList:
l = v.trailingLabel()
myTimeStamp = v.timeStamps()[-1]
if installFlavor is None:
myScore = 0
else:
# FIXME: we should cache this scoring from before.
myScore = installFlavor.score(f)
if (n,l) in trovesByNL:
curScore, curTimeStamp, curTup = trovesByNL[n,l]
if curTimeStamp > myTimeStamp:
continue
if curTimeStamp == myTimeStamp:
if myScore < curScore:
continue
trovesByNL[n,l] = (myScore, myTimeStamp, (n,v,f))
scoredList = sorted(trovesByNL.itervalues())
if not scoredList:
return None
else:
# highest score, then latest timestamp, then name.
return scoredList[-1][-1]
if hasattr(resolve.BasicResolutionMethod,
'_selectMatchingResolutionTrove'):
selectResolutionTrove = resolve.BasicResolutionMethod.selectResolutionTrove
_selectMatchingResolutionTrove = resolve.BasicResolutionMethod._selectMatchingResolutionTrove
class rMakeResolveSource(ResolutionMesh):
"""
Resolve by trove list first and then resort back to label
path. Also respects intra-trove deps. If foo:runtime
requires foo:lib, it requires exactly the same version of foo:lib.
"""
def __init__(self, cfg, builtTroveSource, resolveTroveSource,
troveLists, repos):
self.removeFileDependencies = False
self.builtTroveSource = builtTroveSource
self.troveLists = troveLists
self.resolveTroveSource = resolveTroveSource
self.repos = repos
self.cfg = cfg
self.repos = repos
self.flavor = cfg.flavor
sources = []
builtResolveSource = resolve.BasicResolutionMethod(cfg, None)
builtResolveSource.setTroveSource(builtTroveSource)
sources = []
if troveLists:
troveListSources = [resolve.DepResolutionByTroveList(cfg, None, x)
for x in troveLists]
[ x.setTroveSource(self.repos) for x in troveListSources ]
sources.extend(troveListSources)
mainMethod = resolve.ResolutionStack(*sources)
flavorPreferences = self.repos._flavorPreferences
for source in sources:
source.setFlavorPreferences(flavorPreferences)
ResolutionMesh.__init__(self, cfg, builtResolveSource, mainMethod)
self.setFlavorPreferences(flavorPreferences)
def close(self):
self.builtTroveSource.close()
def setLabelPath(self, labelPath):
if labelPath:
source = resolve.DepResolutionByLabelPath(self.cfg, None, labelPath)
source.setTroveSource(self.repos)
self.mainMethod.addSource(source)
def prepareForResolution(self, depList):
# need to get intratrove deps while we still have the full dependency
# request information - including what trove the dep arises from.
intraDeps = self._getIntraTroveDeps(depList)
self.intraDeps = intraDeps
return ResolutionMesh.prepareForResolution(self, depList)
def _resolveIntraTroveDeps(self, intraDeps):
trovesToGet = []
for depSet, deps in intraDeps.iteritems():
for dep, troveTups in deps.iteritems():
trovesToGet.extend(troveTups)
hasTroves = self.troveSource.hasTroves(trovesToGet)
if isinstance(hasTroves, list):
hasTroves = dict(itertools.izip(trovesToGet, hasTroves))
results = {}
for depSet, deps in intraDeps.iteritems():
d = {}
results[depSet] = d
for dep, troveTups in deps.iteritems():
d[dep] = [ x for x in troveTups if hasTroves[x] ]
return results
def resolveDependencies(self):
sugg = ResolutionMesh.resolveDependencies(self)
intraDepSuggs = self._resolveIntraTroveDeps(self.intraDeps)
for depSet, intraDeps in self.intraDeps.iteritems():
for idx, (depClass, dep) in enumerate(depSet.iterDeps(sort=True)):
if depClass.tag == deps.DEP_CLASS_TROVES:
if (dep in intraDepSuggs[depSet]
and intraDepSuggs[depSet][dep]):
sugg[depSet][idx] = intraDepSuggs[depSet][dep]
return sugg
def _getIntraTroveDeps(self, depList):
suggsByDep = {}
intraDeps = {}
for troveTup, depSet in depList:
pkgName = troveTup[0].split(':', 1)[0]
for dep in depSet.iterDepsByClass(deps.TroveDependencies):
if (dep.name.startswith(pkgName)
and dep.name.split(':', 1)[0] == pkgName):
troveToGet = (dep.name, troveTup[1], troveTup[2])
l = suggsByDep.setdefault(dep, [])
l.append(troveToGet)
intraDeps.setdefault(depSet, {}).setdefault(dep, l)
return intraDeps
def filterDependencies(self, depList):
if self.removeFileDependencies:
depList = [(x[0], flavorutil.removeFileDeps(x[1]))
for x in depList ]
return [ x for x in depList if not x[1].isEmpty() ]
return depList
def _selectMatchingResolutionTrove(self, requiredBy, dep, depClass,
flavoredList):
# if all packages are the same and only their flavor score or timestamp
# is keeping one from being picked over the other, prefer the
# newly built package.
builtTroves = []
resolveTroves = []
newList = flavoredList
if self.resolveTroveSource:
minResolveIdx = len(self.resolveTroveSource.sources)
ilp = self.cfg.installLabelPath
for installFlavor, troveTup in flavoredList:
if self.extraMethod.troveSource.hasTrove(*troveTup):
branch = troveTup[1].branch()
if branch.hasParentBranch():
label = branch.parentBranch().label()
else:
label = branch.label()
list = builtTroves
elif (self.resolveTroveSource
and self.resolveTroveSource.hasTrove(*troveTup)):
# if a package is both in the resolveTroves list
# and found via ILP, it might be in this list even
# though it was not found via resolveTroves. So we
# limit results to ones found as early as possible
# in the resolveTroves list
for resolveIdx, source in enumerate(self.resolveTroveSource.sources):
if source.hasTrove(*troveTup):
if resolveIdx < minResolveIdx:
resolveTroves = []
minResolveIdx = resolveIdx
break
if resolveIdx > minResolveIdx:
continue
list = resolveTroves
label = troveTup[1].trailingLabel()
else:
continue
if label in ilp:
index = ilp.index(label)
else:
index = len(ilp)
list.append((index, (installFlavor, troveTup)))
if builtTroves:
minIndex = sorted(builtTroves, key=lambda x: x[0])[0][0]
newList = [ x[1] for x in builtTroves if x[0] == minIndex ]
elif resolveTroves:
minIndex = sorted(resolveTroves, key=lambda x: x[0])[0][0]
newList = [ x[1] for x in resolveTroves if x[0] == minIndex ]
return ResolutionMesh._selectMatchingResolutionTrove(self, requiredBy,
dep,
depClass, newList)
| apache-2.0 | -3,809,415,997,018,213,000 | 41.872727 | 119 | 0.560057 | false |
songyi199111/sentry | src/sentry/event_manager.py | 2 | 19299 | """
sentry.event_manager
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import math
import six
from datetime import datetime, timedelta
from django.conf import settings
from django.db import IntegrityError, transaction
from django.utils import timezone
from hashlib import md5
from raven.utils.encoding import to_string
from uuid import uuid4
from sentry.app import buffer, tsdb
from sentry.constants import (
CLIENT_RESERVED_ATTRS, LOG_LEVELS, DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH,
MAX_TAG_VALUE_LENGTH
)
from sentry.interfaces.base import get_interface
from sentry.models import (
Activity, Event, EventMapping, Group, GroupHash, GroupStatus, Project,
Release, UserReport
)
from sentry.plugins import plugins
from sentry.signals import regression_signal
from sentry.utils.logging import suppress_exceptions
from sentry.tasks.index import index_event
from sentry.tasks.merge import merge_group
from sentry.tasks.post_process import post_process_group
from sentry.utils.db import get_db_engine
from sentry.utils.safe import safe_execute, trim, trim_dict
def count_limit(count):
# TODO: could we do something like num_to_store = max(math.sqrt(100*count)+59, 200) ?
# ~ 150 * ((log(n) - 1.5) ^ 2 - 0.25)
for amount, sample_rate in settings.SENTRY_SAMPLE_RATES:
if count <= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_RATE
def time_limit(silence): # ~ 3600 per hour
for amount, sample_rate in settings.SENTRY_SAMPLE_TIMES:
if silence >= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_TIME
def md5_from_hash(hash_bits):
result = md5()
for bit in hash_bits:
result.update(to_string(bit))
return result.hexdigest()
def get_hashes_for_event(event):
interfaces = event.get_interfaces()
for interface in interfaces.itervalues():
result = interface.compute_hashes(event.platform)
if not result:
continue
return result
return [[event.message]]
def get_hashes_from_fingerprint(event, fingerprint):
default_values = set(['{{ default }}', '{{default}}'])
if any(d in fingerprint for d in default_values):
default_hashes = get_hashes_for_event(event)
hash_count = len(default_hashes)
else:
hash_count = 1
hashes = []
for idx in xrange(hash_count):
result = []
for bit in fingerprint:
if bit in default_values:
result.extend(default_hashes[idx])
else:
result.append(bit)
hashes.append(result)
return hashes
if not settings.SENTRY_SAMPLE_DATA:
def should_sample(current_datetime, last_seen, times_seen):
return False
else:
def should_sample(current_datetime, last_seen, times_seen):
silence_timedelta = current_datetime - last_seen
silence = silence_timedelta.days * 86400 + silence_timedelta.seconds
if times_seen % count_limit(times_seen) == 0:
return False
if times_seen % time_limit(silence) == 0:
return False
return True
def plugin_is_regression(group, event):
project = event.project
for plugin in plugins.for_project(project):
result = safe_execute(plugin.is_regression, group, event,
version=1, _with_transaction=False)
if result is not None:
return result
return True
class ScoreClause(object):
def __init__(self, group):
self.group = group
def __int__(self):
# Calculate the score manually when coercing to an int.
# This is used within create_or_update and friends
return self.group.get_score()
def prepare_database_save(self, unused):
return self
def prepare(self, evaluator, query, allow_joins):
return
def evaluate(self, node, qn, connection):
engine = get_db_engine(getattr(connection, 'alias', 'default'))
if engine.startswith('postgresql'):
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
elif engine.startswith('mysql'):
sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = int(self)
return (sql, [])
@classmethod
def calculate(self, times_seen, last_seen):
return math.log(times_seen) * 600 + float(last_seen.strftime('%s'))
class EventManager(object):
logger = logging.getLogger('sentry.events')
def __init__(self, data, version='5'):
self.data = data
self.version = version
def normalize(self):
# TODO(dcramer): store http.env.REMOTE_ADDR as user.ip
# First we pull out our top-level (non-data attr) kwargs
data = self.data
if not isinstance(data.get('level'), (six.string_types, int)):
data['level'] = logging.ERROR
elif data['level'] not in LOG_LEVELS:
data['level'] = logging.ERROR
if not data.get('logger'):
data['logger'] = DEFAULT_LOGGER_NAME
else:
data['logger'] = trim(data['logger'], 64)
if data.get('platform'):
data['platform'] = trim(data['platform'], 64)
timestamp = data.get('timestamp')
if not timestamp:
timestamp = timezone.now()
if isinstance(timestamp, datetime):
# We must convert date to local time so Django doesn't mess it up
# based on TIME_ZONE
if settings.TIME_ZONE:
if not timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=timezone.utc)
elif timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=None)
timestamp = float(timestamp.strftime('%s'))
data['timestamp'] = timestamp
if not data.get('event_id'):
data['event_id'] = uuid4().hex
data.setdefault('message', None)
data.setdefault('culprit', None)
data.setdefault('time_spent', None)
data.setdefault('server_name', None)
data.setdefault('site', None)
data.setdefault('checksum', None)
data.setdefault('fingerprint', None)
data.setdefault('platform', None)
data.setdefault('extra', {})
data.setdefault('errors', [])
tags = data.get('tags')
if not tags:
tags = []
# full support for dict syntax
elif isinstance(tags, dict):
tags = tags.items()
# prevent [tag, tag, tag] (invalid) syntax
elif not all(len(t) == 2 for t in tags):
tags = []
else:
tags = list(tags)
data['tags'] = []
for key, value in tags:
key = six.text_type(key).strip()
value = six.text_type(value).strip()
if not (key and value):
continue
if len(value) > MAX_TAG_VALUE_LENGTH:
continue
data['tags'].append((key, value))
if not isinstance(data['extra'], dict):
# throw it away
data['extra'] = {}
trim_dict(
data['extra'], max_size=settings.SENTRY_MAX_EXTRA_VARIABLE_SIZE)
# TODO(dcramer): more of validate data needs stuffed into the manager
for key in data.keys():
if key in CLIENT_RESERVED_ATTRS:
continue
value = data.pop(key)
try:
interface = get_interface(key)()
except ValueError:
continue
try:
inst = interface.to_python(value)
data[inst.get_path()] = inst.to_json()
except Exception:
pass
data['version'] = self.version
# TODO(dcramer): find a better place for this logic
exception = data.get('sentry.interfaces.Exception')
stacktrace = data.get('sentry.interfaces.Stacktrace')
if exception and len(exception['values']) == 1 and stacktrace:
exception['values'][0]['stacktrace'] = stacktrace
del data['sentry.interfaces.Stacktrace']
if 'sentry.interfaces.Http' in data:
# default the culprit to the url
if not data['culprit']:
data['culprit'] = data['sentry.interfaces.Http']['url']
if data['time_spent']:
data['time_spent'] = int(data['time_spent'])
if data['culprit']:
data['culprit'] = trim(data['culprit'], MAX_CULPRIT_LENGTH)
if data['message']:
data['message'] = trim(
data['message'], settings.SENTRY_MAX_MESSAGE_LENGTH)
return data
@suppress_exceptions
def save(self, project, raw=False):
# TODO: culprit should default to "most recent" frame in stacktraces when
# it's not provided.
project = Project.objects.get_from_cache(id=project)
data = self.data.copy()
# First we pull out our top-level (non-data attr) kwargs
event_id = data.pop('event_id')
message = data.pop('message')
level = data.pop('level')
culprit = data.pop('culprit', None) or ''
time_spent = data.pop('time_spent', None)
logger_name = data.pop('logger', None)
server_name = data.pop('server_name', None)
site = data.pop('site', None)
checksum = data.pop('checksum', None)
fingerprint = data.pop('fingerprint', None)
platform = data.pop('platform', None)
release = data.pop('release', None)
date = datetime.fromtimestamp(data.pop('timestamp'))
date = date.replace(tzinfo=timezone.utc)
kwargs = {
'message': message,
'platform': platform,
}
event = Event(
project=project,
event_id=event_id,
data=data,
time_spent=time_spent,
datetime=date,
**kwargs
)
tags = data.get('tags') or []
tags.append(('level', LOG_LEVELS[level]))
if logger_name:
tags.append(('logger', logger_name))
if server_name:
tags.append(('server_name', server_name))
if site:
tags.append(('site', site))
if release:
# TODO(dcramer): we should ensure we create Release objects
tags.append(('sentry:release', release))
for plugin in plugins.for_project(project, version=None):
added_tags = safe_execute(plugin.get_tags, event,
_with_transaction=False)
if added_tags:
tags.extend(added_tags)
# XXX(dcramer): we're relying on mutation of the data object to ensure
# this propagates into Event
data['tags'] = tags
# prioritize fingerprint over checksum as its likely the client defaulted
# a checksum whereas the fingerprint was explicit
if fingerprint:
hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint))
elif checksum:
hashes = [checksum]
else:
hashes = map(md5_from_hash, get_hashes_for_event(event))
group_kwargs = kwargs.copy()
group_kwargs.update({
'culprit': culprit,
'logger': logger_name,
'level': level,
'last_seen': date,
'first_seen': date,
'time_spent_total': time_spent or 0,
'time_spent_count': time_spent and 1 or 0,
})
if release:
release = Release.get_or_create(
project=project,
version=release,
date_added=date,
)
group_kwargs['first_release'] = release
Activity.objects.create(
type=Activity.RELEASE,
project=project,
ident=release,
data={'version': release},
datetime=date,
)
group, is_new, is_regression, is_sample = safe_execute(
self._save_aggregate,
event=event,
hashes=hashes,
**group_kwargs
)
using = group._state.db
event.group = group
event.group_id = group.id
# store a reference to the group id to guarantee validation of isolation
event.data.bind_ref(event)
try:
with transaction.atomic():
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
self.logger.info('Duplicate EventMapping found for event_id=%s', event_id)
return event
UserReport.objects.filter(
project=project, event_id=event_id,
).update(group=group)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic():
event.save()
except IntegrityError:
self.logger.info('Duplicate Event found for event_id=%s', event_id)
return event
if is_new and release:
buffer.incr(Release, {'new_groups': 1}, {
'id': release.id,
})
safe_execute(Group.objects.add_tags, group, tags,
_with_transaction=False)
if not raw:
post_process_group.delay(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
)
else:
self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)
index_event.delay(event)
# TODO: move this to the queue
if is_regression and not raw:
regression_signal.send_robust(sender=Group, instance=group)
return event
def _find_hashes(self, project, hash_list):
matches = []
for hash in hash_list:
ghash, _ = GroupHash.objects.get_or_create(
project=project,
hash=hash,
)
matches.append((ghash.group_id, ghash.hash))
return matches
def _ensure_hashes_merged(self, group, hash_list):
# TODO(dcramer): there is a race condition with selecting/updating
# in that another group could take ownership of the hash
bad_hashes = GroupHash.objects.filter(
project=group.project,
hash__in=hash_list,
).exclude(
group=group,
)
if not bad_hashes:
return
for hash in bad_hashes:
merge_group.delay(
from_group_id=hash.group_id,
to_group_id=group.id,
)
return GroupHash.objects.filter(
project=group.project,
hash__in=bad_hashes,
).update(
group=group,
)
def _save_aggregate(self, event, hashes, **kwargs):
time_spent = event.time_spent
project = event.project
# attempt to find a matching hash
all_hashes = self._find_hashes(project, hashes)
try:
existing_group_id = (h[0] for h in all_hashes if h[0]).next()
except StopIteration:
existing_group_id = None
# XXX(dcramer): this has the opportunity to create duplicate groups
# it should be resolved by the hash merging function later but this
# should be better tested/reviewed
if existing_group_id is None:
kwargs['score'] = ScoreClause.calculate(1, kwargs['last_seen'])
group, group_is_new = Group.objects.create(
project=project,
**kwargs
), True
else:
group = Group.objects.get(id=existing_group_id)
group_is_new = False
# If all hashes are brand new we treat this event as new
is_new = False
new_hashes = [h[1] for h in all_hashes if h[0] is None]
if new_hashes:
affected = GroupHash.objects.filter(
project=project,
hash__in=new_hashes,
group__isnull=True,
).update(
group=group,
)
if affected != len(new_hashes):
self._ensure_hashes_merged(group, new_hashes)
elif group_is_new and len(new_hashes) == len(all_hashes):
is_new = True
# XXX(dcramer): it's important this gets called **before** the aggregate
# is processed as otherwise values like last_seen will get mutated
can_sample = should_sample(event.datetime, group.last_seen, group.times_seen)
if not is_new:
is_regression = self._process_existing_aggregate(group, event, kwargs)
else:
is_regression = False
# Determine if we've sampled enough data to store this event
if is_new or is_regression:
is_sample = False
else:
is_sample = can_sample
tsdb.incr_multi([
(tsdb.models.group, group.id),
(tsdb.models.project, project.id),
])
return group, is_new, is_regression, is_sample
def _process_existing_aggregate(self, group, event, data):
date = max(event.datetime, group.last_seen)
extra = {
'last_seen': date,
'score': ScoreClause(group),
}
if event.message and event.message != group.message:
extra['message'] = event.message
if group.level != data['level']:
extra['level'] = data['level']
if group.culprit != data['culprit']:
extra['culprit'] = data['culprit']
is_regression = False
if group.is_resolved() and plugin_is_regression(group, event):
is_regression = bool(Group.objects.filter(
id=group.id,
# ensure we cant update things if the status has been set to
# muted
status__in=[GroupStatus.RESOLVED, GroupStatus.UNRESOLVED],
).exclude(
# add to the regression window to account for races here
active_at__gte=date - timedelta(seconds=5),
).update(
active_at=date,
# explicitly set last_seen here as ``is_resolved()`` looks
# at the value
last_seen=date,
status=GroupStatus.UNRESOLVED
))
group.active_at = date
group.status = GroupStatus.UNRESOLVED
group.last_seen = extra['last_seen']
update_kwargs = {
'times_seen': 1,
}
if event.time_spent:
update_kwargs.update({
'time_spent_total': event.time_spent,
'time_spent_count': 1,
})
buffer.incr(Group, update_kwargs, {
'id': group.id,
}, extra)
return is_regression
| bsd-3-clause | -1,338,582,620,076,449,300 | 31.654822 | 97 | 0.56604 | false |
theodoregoetz/wernher | sandbox/KRPC Testing.py | 1 | 3144 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
%run -i 'KRPC.ipynb'
# <codecell>
conn = krpc.connect(name='laptop', address='192.168.1.9')
ksc = conn.space_center
vessel = ksc.active_vessel
obt = vessel.orbit
ap = vessel.auto_pilot
con = vessel.control
vrf = vessel.reference_frame
srfrf = vessel.surface_reference_frame
vobtrf = vessel.orbital_reference_frame
obtrf = obt.body.reference_frame
obtorf = obt.body.orbital_reference_frame
obtnrrf = obt.body.non_rotating_reference_frame
flight = lambda rf: vessel.flight(rf)
# <codecell>
t = ksc.ut
o = KeplerOrbit(obt)
f = flight(obtorf)
print(obt.time_to_apoapsis, obt.time_to_periapsis)
print(f.longitude)
print(o.Ω * 180/π)
print(o.ν * 180/π)
# <codecell>
speed = conn.add_stream(getattr, flight(srfrf), 'speed')
altitude = conn.add_stream(getattr, flight(obtrf), 'mean_altitude')
apoapsis = conn.add_stream(getattr, obt, 'apoapsis_altitude')
# <codecell>
con.throttle = 0.6
ap.set_rotation(90, 90, roll=90)
time.sleep(1)
con.activate_next_stage()
while flight(obtrf).speed < 100.:
time.sleep(0.1)
ap.set_rotation(80, 90, roll=90)
while flight(obtrf).mean_altitude < 5000.:
time.sleep(0.1)
ap.disengage()
ap.sas = True
ap.sas_mode = ksc.SASMode.prograde
while obt.apoapsis_altitude < 80000:
time.sleep(0.1)
ap.sas_mode = ksc.SASMode.stability_assist
ap.sas = False
while abs(obt.eccentricity) > 0.1:
obt.apoapsis
ap.set_direction(, 90, roll=90)
ap.disengage()
con.throttle = 0.
# <codecell>
ksc.SASMode.prograde
# <codecell>
speed.remove()
altitude.remove()
apoapsis.remove()
# <codecell>
def prelaunch(conn):
ksc = conn.space_center
vessel = ksc.active_vessel
obtbody_rf = vessel.orbit.body.reference_frame
flight = vessel.flight
ap = vessel.auto_pilot
cont = vessel.control
vessel
ut = conn.add_stream(getattr, ksc, 'ut')
mean_altitude = conn.add_stream(getattr, flight(), 'mean_altitude')
#position = conn.add_stream(vessel.position, obtbody_rf)
timestamp = []
altitude = []
t0 = ut()
alt = mean_altitude()
while alt < 80000:
t1 = ut()
alt = mean_altitude()
if abs(t1 - t0) > 0.001:
timestamp.append(t1)
altitude.append(alt)
t0 = t1
time.sleep(1./25.)
# <codecell>
print(ut())
# <codecell>
pyplot.plot(timestamp,altitude)
# <codecell>
print(vessel.name)
print(vessel.met)
print(vessel.mass)
print(vessel.position(vessel.orbit.body.reference_frame))
# <codecell>
def latlon(vessel):
x,y,z = vessel.position(vessel.orbit.body.reference_frame)
r = np.sqrt(x*x + y*y + z*z)
lat = 90. - np.arccos(y / r) * 180. / np.pi
lon = np.arctan2(z, x) * 180. / np.pi
return lat,lon
# <codecell>
data = []
# <codecell>
image = pyplot.imread('/home/goetz/kerbin.jpg')
fig, ax = pyplot.subplots(figsize=(15,7))
im = ax.imshow(image)
ax.set_autoscale_on(False)
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
lat,lon = latlon(vessel)
xmap = ((lon + 180.) / 360.) * (xmax - xmin) + xmin
ymap = ((lat + 90.) / 180.) * (ymax - ymin) + ymin
pt = ax.plot(xmap,ymap, marker='o', color='cyan')
| gpl-3.0 | 4,061,962,225,214,920,700 | 17.690476 | 67 | 0.660828 | false |
saltzm/yadi | yadi/datalog2sql/parse2tokens/parser_tests.py | 1 | 6246 | from .Parser import Parser
p = Parser()
#Tests to check syntax
print(p.parsesentence("q.")) # Atom, zero arity
print(p.parsesentence("q(x).")) # Atom, one var
print(p.parsesentence("q('3').")) # Atom, string
print(p.parsesentence("q(x,y).")) # Atom, two-arity
print(p.parsesentence("q(_,x).")) # Atom, anonymous variable
print(p.parsesentence("_ab(a).")) # Predicate symbol with underscore
print(p.parsesentence("q2(x,z,b,'a').")) # Predicate symbol with number
print(p.parsesentence("__ab_55(a,b,c).")) # Predicate symbol with number and underscore
print(p.parsesentence("q(x,y) :- k(x,y).")) # Rule with one literal
print(p.parsesentence("q(x,y) :- a(foo_foo).")) # Rule with one literal using constant
print(p.parsesentence("q(x,y) :- k(_ab).")) # Rule with one literal with constant starting with underscore
print(p.parsesentence("q(x,y) :- k(X).")) # Rule with one literal with one variable
print(p.parsesentence("q(x,y) :- k(x,h), _v3(n,k).")) # Rule with two literals
print(p.parsesentence("q(x,y) :- a;b.")) # Rule with disjunction of two zero-arity atoms
print(p.parsesentence("q(x,y) :- a(x);b(x).")) # Rule with disjunction of two 1-arity atoms
print(p.parsesentence("q(x,y) :- a division b.")) # Rule with division of two zero-arity atoms
print(p.parsesentence("q(x,y) :- a(x,y) division b(x,y).")) # Rule with division of two two-arity atoms
print(p.parsesentence("q(x,y,z) :- a(x),a;b.")) # Rule with one-arity atom, disjunction of two zero-arity atoms
print(p.parsesentence("q(x,y) :- a(x), t>5.")) # Rule with one-arity atom, boolean comparison
print(p.parsesentence("q(x,y) :- a(x), t<5.")) # Rule with one-arity atom, boolean comparison
print(p.parsesentence("q(x,y) :- a(x), t>=5.")) # Rule with one-arity atom, boolean comparison
print(p.parsesentence("q(x,y) :- a(x), t<=5.")) # Rule with one-arity atom, boolean comparison
print(p.parsesentence("q(x,y) :- a(x), gd=5.")) # Rule with one-arity atom, boolean comparison
print(p.parsesentence("q(x,y,z) :- a(x), t=4.0.")) # Rule with one-arity atom, comparison using float
print(p.parsesentence("q(x,y,z) :- a(x), t=4.0E6.")) # Rule with one-arity atom, comparison using float+E
print(p.parsesentence("q(x,y,z) :- a(x), t=4.0E+6.")) # Rule with one-arity atom, comparison using float+E+'+'
print(p.parsesentence("q(x,y,z) :- a(x), t=4.0E-6.")) # Rule with one-arity atom, comparison using float+E+'-'
print(p.parsesentence("q(x,y,z) :- a(x), t=4.0, k(x).")) # Rule with one-arity atom, comparison, atom
print(p.parsesentence("q(x) :- x(g), not(a(x,y)).")) # Rule with one-arity atom, negation
print(p.parsesentence("q(x,y). k(x).")) # Two facts in a line.
print(p.parsesentence("q(x,y). q(x,y) :- a(b,c).")) # A fact and a rule in a line.
print(p.parsesentence("q(x,y). q(x,y) :- a(b,c). a(b).")) # A fact, a rule and a fact in a line.
print(p.parsesentence("q(x,y) :- a(b), X=3; Y>5.")) # Rule with one-arity atom, disjunctive comparison.
print(p.parsesentence("q(x,y) :- a(b), X=3, Y>5.")) # Rule with one-arity atom, conjunctive comparison.
print(p.parsesentence("q(x,y) :- a(b), X=3, Y>5, X=3; Y>5.")) # Rule with one-arity atom, two two-term comparisons.
print(p.parsesentence("r(X) :- not(t(Y)), X = Y, s(Y).")) # Rule with a negation in front.
print(p.parsesentence("r(x) :- r(a,X); not(q(X,b)), lj(a,b,x).")) # Rule with a left join
print(p.parsesentence("q(X,Z) :- s(X,Y), not(t(X)), Y=Z."))
print(p.parsesentence("q(X,Z) :- t>5, s(X,Y), not(t(X)), Y=Z."))
print(p.parsesentence("q(X,Y):- s(X).\nq(X,Y):- s(Y).")) # Two statements broken down in two lines.
print(p.parsesentence("q(x,y) :- a(b), X=3, 3>Y, X=3; 5>X.")) # Rule with one-arity atom, two two-term comparisons.
print(p.parsesentence("q(X,Y), s(x).")) # Temporary view
print(p.parsesentence("q(X,Y), not(x(t,y)).")) # Temporary view
print(p.parsesentence("q(X,Y):- s(X).\nq(X,Y):- s(X).\nq(X,Y):- s(X)."))
print(p.parsesentence("q(X,3) :- s(X)."))
#Incorporation of all elements
print(p.parsesentence("a45(x,Y,_343,a) :- x43A(k,5,x), A>=4; t=5, a(q,x);r(x,Y), a division y. q(x,y)."))
#Rules (that actually make sense)
print(p.parsesentence("q(X,Y):- s(X)."))
print(p.parsesentence("q(X):- s(X)."))
print(p.parsesentence("q(X):- s(X), not(t(U))."))
print(p.parsesentence("q(X):- s(X,U), not(t(U))."))
print(p.parsesentence("q(X):- s(X), not(t(U)), U = 2."))
print(p.parsesentence("q(X):- s(X), not(t(U)), U < 2."))
print(p.parsesentence("q(X):- s(X), not(t(U)), U = X."))
print(p.parsesentence("q(X):- s(X), Y < 3."))
print(p.parsesentence("q(X):- s(X,Y), Y < 3."))
print(p.parsesentence("q(X):- s(X), not(t(Y)), X = Y."))
print(p.parsesentence("q(X,Z):- s(X,Y), not(t(A,Z)), Z = Y."))
print(p.parsesentence("q(X):- s(X), X = 2."))
print(p.parsesentence("q(X):- s(X, Y), Y = 2."))
print(p.parsesentence("q(X):- s(X, Y, Z), Y = 2, Z = Y."))
print(p.parsesentence("q(X) :- not(s(Y)), X = 2, X = Y."))
print(p.parsesentence("q(X) :- not(s(Y)), X = Y, X = 2."))
print(p.parsesentence("q(X) :- s(X), X = Y."))
print(p.parsesentence("q(X) :- s(X), P = Y."))
print(p.parsesentence("r(X) :- s(X), 3=X, X>2."))
print(p.parsesentence("r(Y) :- s(X), Y=X, X=2, Y =4."))
print(p.parsesentence("r(X,Y,Z,_,2) :- s(X), Y=X, X=2."))
print(p.parsesentence("q(X,Y) :- s(_,Y), t(X,_), u(_), v(_,_)."))
print(p.parsesentence("q(x,y)."))
print(p.parsesentence("q(X,Y) :- s(_,Y), t(X,_), u(_), v(_,_)."))
#Errors
#print(p.parsesentence("q(x,y,,)."))
#print(p.parsesentence("r(Title1,Title2,Release_date):-movie(Title1,,,Release_date),movie(Title2,,,Release_date)."))
#print(p.parsesentence("r(x):-q(x),s(x,,,,,)."))
#print(p.parsesentence("q(x,)."))
| bsd-3-clause | -3,213,480,592,657,898,000 | 72.482353 | 127 | 0.553154 | false |
blaisb/cfdemUtilities | cylinderPorosity/pythons/getParticlePositionsFOAM.py | 1 | 2828 | # This program converts OpenFOAM raw data to a text file containing information on the particles
# in the format that can be read by the porosity code
#
# position (x y z) and radius
# THIS PROGRAM REQUIRES A DIRECTORY particles in the main folder
#In the current form of the software the radius must be fixed byu the user
# Author : Bruno Blais
# Last modified : 15-01-2014
#Python imports
#----------------
import os
import sys
import numpy
#----------------
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
#Initial time of simulation, final time and time increment must be specified by user
t0=5
tf=115.0
dT=5
radius = 0.0007485
height=0.05
ri = 0.0064
ro = 0.0238
#====================
# READER
#====================
#This function reads an OpenFOAM raw file and extract a table of the data
def readf(fname):
infile = open(fname,'r')
if (infile!=0):
#Clear garbage lines
for i in range(0,17):
infile.readline()
#Read number of cell centers
n=int(infile.readline())
#Pre-allocate memory
x=numpy.zeros([n])
y=numpy.zeros([n])
z=numpy.zeros([n])
#Clear garbage line "("
infile.readline()
#read current property "xu"
for i in range(0,n,1):
number_str=infile.readline()
number2_str=number_str.split("(")
number3_str=number2_str[1].split(")")
number4_str=number3_str[0].split()
x[i]=float(number4_str[0])
y[i]=float(number4_str[1])
z[i]=float(number4_str[2])
else:
print "File %s could not be opened" %fname
infile.close();
return n,x,y,z
#======================
# MAIN
#======================
#Name of the files to be considered
inname= ['lagrangian/particleCloud/positions']
os.chdir("./") # go to directory
nt=int((tf-t0)/dT)
t=t0
for i in range(0,nt):
#Current case
print "Post-processing time ", t
#Go to the directory corresponding to the timestep
if (t>0.99999 and t<1.0000001) : os.chdir("1")
elif (t==0) : os.chdir("0")
elif ((numpy.abs(numpy.mod(t,1)))<0.01): os.chdir(str(int(t)))
else :os.chdir(str(t))
#Create output file back in main folder
outname="../particlesInfo/particlesInfo_%s" %str(i)
outfile=open(outname,'w')
#Read each variables to be able to dimensionalise final array
[n,x,y,z] = readf(inname[0])
#Write header
outfile.write("%i\n" %nt)
outfile.write("%5.5e\n" %height)
outfile.write("%5.5e\n" %ri)
outfile.write("%5.5e\n" %ro)
outfile.write("%i\n" %n)
outfile.write("%5.5e\n" %t)
outfile.write("**************************************************\n")
for j in range(0,n):
outfile.write("%5.5e %5.5e %5.5e %5.5e \n" %(x[j],y[j],z[j],radius))
outfile.close()
t += dT
#Go back to CFD directory
os.chdir("..") #
print "Post-processing over"
| lgpl-3.0 | -5,991,427,169,702,565,000 | 22.966102 | 96 | 0.594413 | false |
travistang/late_fyt | model.py | 1 | 10888 | from keras.models import *
from keras.layers import *
from keras.layers.advanced_activations import *
from keras.callbacks import *
from keras.optimizers import Adam
from keras.initializers import *
import tensorflow as tf
from utils import huber_loss
def guide_v1():
S = Input(shape = (64,64,12))
x = Convolution2D(32,8,8,subsample = (4,4),activation = 'relu')(S)
x = BatchNormalization()(x)
x = Convolution2D(32,4,4,subsample = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Convolution2D(64,4,4,subsample = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Flatten()(x)
# z = Dense(128,init = 'uniform',activation = 'relu',name = 'ls_1',trainable = False)(x)
# ls = Dense(29,init = 'uniform',activation = 'relu',name = 'ls_2',trainable = False)(z)
y = Dense(300,activation = 'relu',name = 'act_1')(x)
Steering = Dense(1,activation = 'linear',name = 'act_2')(y)
#Steering = Dense(1,weights = [np.random.uniform(-1e-8,1e-8,(512,1)),np.zeros((1,))], name='Steering')(lrn4)
model = Model(S,Steering)
adam = Adam(lr=0.00000001,decay = 1e-6)
K.get_session().run([adam.beta_1.initializer,adam.beta_2.initializer])
model.compile(loss='mse', optimizer=adam)
if weight_files:
model.load_weights(weight_files)
return model, model.trainable_weights, S
def guide_v2():
S = Input(shape = (64,64,4))
x = Convolution2D(32,8,8,subsample = (4,4),activation = 'relu')(S)
x = BatchNormalization()(x)
x = Convolution2D(32,4,4,subsample = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Convolution2D(32,4,4,subsample = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Flatten()(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(8,activation = 'linear',name = 'act_2')(x)
model = Model(S,x)
adam = Adam(lr = 0.0001,decay = 1e-6)
model.compile(loss = 'categorial_accuracy',optimizer = adam)
return model
def low_guide_v1(lr = 0.0001,num_output = 9):
S = Input(shape = (116,))
x = Dense(300,activation = ELU())(S)
x = Dense(600,activation = ELU())(x)
x = Dense(num_output,activation = 'linear',init=lambda shape: normal(shape, scale=1e-4))(x)
model = Model(S,x)
adam = Adam(lr = lr,decay = 1e-6,clipnorm=0.5)
model.compile(loss = huber_loss(0.5),optimizer = adam)
return model
def low_guide_v2(num_action = 1,num_ob = 1):
# the actor
S = Input(shape = (1,num_ob))
x = Flatten()(S)
x = Dense(300,activation = 'relu')(x)
x = Dense(600,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
model = Model(S,x)
# the critic
A = Input(shape = (num_action,))
S = Input(shape = (1,num_ob))
s = Flatten()(S)
x = merge([A,s],mode = 'concat')
x = Dense(300,activation = 'relu')(x)
x = Dense(600,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return model,critic
def img_guide_v1(num_action = 1):
S = Input(shape = (1,64,64,3))
x = Reshape((64,64,3))(S)
x = Conv2D(16,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = Dense(600,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,64,64,3))
A = Input(shape = (num_action,))
x = Reshape((64,64,3))(S)
x = Conv2D(16,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = merge([A,x],mode = 'concat')
x = Dense(600,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic
def img_guide_v2(num_action = 1,hist_len = 4):
S = Input(shape = (1,64,64,3 * hist_len))
x = Reshape((64,64,3 * hist_len))(S)
x = Conv2D(32,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = Dense(800,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,64,64,3 * hist_len))
A = Input(shape = (num_action,))
x = Reshape((64,64,3 * hist_len))(S)
x = Conv2D(32,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = merge([A,x],mode = 'concat')
x = Dense(800,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic
def img_guide_v3(num_action = 1,hist_len = 4):
S = Input(shape = (1,hist_len,64,64,3))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(32,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = Flatten()(x)
x = Dense(800,activation = 'relu')(x)
x = Dense(400,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,hist_len,64,64,3))
A = Input(shape = (num_action,))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(32,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = Flatten()(x)
x = merge([A,x],mode = 'concat')
x = Dense(800,activation = 'relu')(x)
x = Dense(400,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic
def stack_model(num_action = 1,hist_len = 4, num_filters = 16):
S = Input(shape = (1,64,64,3 * hist_len))
x = Reshape((64,64,3 * hist_len))(S)
x = Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = Dense(600 if num_filters == 16 else 800,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,64,64,3 * hist_len))
A = Input(shape = (num_action,))
x = Reshape((64,64,3 * hist_len))(S)
x = Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = merge([A,x],mode = 'concat')
x = Dense(600 if num_filters == 16 else 800,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic
def fork_model(num_action = 1,hist_len = 4, num_filters = 16):
S = Input(shape = (1,hist_len,64,64,3))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = Flatten()(x)
x = Dense(600 if num_filters == 16 else 800,activation = 'relu')(x)
x = Dense(400,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,hist_len,64,64,3))
A = Input(shape = (num_action,))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = Flatten()(x)
x = merge([A,x],mode = 'concat')
x = Dense(800 if num_filters == 16 else 1200,activation = 'relu')(x)
x = Dense(400,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic
def LSTM_model(num_action = 1,hist_len = 4, num_filters = 16):
S = Input(shape = (1,hist_len,64,64,3))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(Flatten())(x)
x = LSTM(100 if num_filters == 16 else 200,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,hist_len,64,64,3))
A = Input(shape = (num_action,))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(Flatten())(x)
x = LSTM(100 if num_filters == 16 else 200,activation = 'relu')(x)
x = merge([A,x],mode = 'concat')
x = Dense(50,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic | mit | 4,400,644,651,622,553,000 | 40.090566 | 116 | 0.591293 | false |
wummel/linkchecker-gui | linkcheck_gui/syntax.py | 1 | 3578 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2011-2016 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from PyQt4 import QtCore, QtGui
def format (color, style=''):
"""Return a QTextCharFormat with the given attributes."""
format = QtGui.QTextCharFormat()
format.setForeground(getattr(QtCore.Qt, color))
if 'bold' in style:
format.setFontWeight(QtGui.QFont.Bold)
if 'italic' in style:
format.setFontItalic(True)
return format
class Highlighter (QtGui.QSyntaxHighlighter):
"""Base class for all highlighters."""
def __init__ (self, document):
"""Initialize rules and styles."""
super(Highlighter, self).__init__(document)
self.rules = []
self.styles = {}
def highlightBlock(self, text):
"""Highlight a text block."""
for expression, format in self.rules:
# get first match
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
# jump to next match
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
def addRule (self, pattern, style):
"""Add a rule pattern with given style."""
self.rules.append((QtCore.QRegExp(pattern), self.styles[style]))
class XmlHighlighter (Highlighter):
"""XML syntax highlighter."""
def __init__(self, document):
"""Set XML syntax rules."""
super(XmlHighlighter, self).__init__(document)
self.styles.update({
'keyword': format('darkBlue'),
'attribute': format('darkGreen'),
'comment': format('darkYellow'),
'string': format('darkMagenta'),
})
# keywords
for reg in ('/>', '>', '<!?[a-zA-Z0-9_]+'):
self.addRule(reg, 'keyword')
# attributes
self.addRule(r"\b[A-Za-z0-9_]+(?=\s*\=)", 'attribute')
# double-quoted string, possibly containing escape sequences
self.addRule(r'"[^"\\]*(\\.[^"\\]*)*"', 'string')
# single-quoted string, possibly containing escape sequences
self.addRule(r"'[^'\\]*(\\.[^'\\]*)*'", 'string')
# comments
self.addRule(r"<!--[^>]*-->", 'comment')
# Treat HTML as XML
HtmlHighlighter = XmlHighlighter
class IniHighlighter (Highlighter):
"""INI syntax highlighter."""
def __init__(self, document):
"""Set INI syntax rules."""
super(IniHighlighter, self).__init__(document)
self.styles.update({
'section': format('darkBlue'),
'property': format('darkGreen'),
'comment': format('darkYellow'),
})
self.addRule(r'\b\[[a-zA-Z0-9_]+\]\b', 'section')
self.addRule(r'\b[a-zA-Z0-9_]+\](?=\s*\=)', 'property')
self.addRule(r'#[^\n]*', 'comment')
| gpl-3.0 | 4,945,731,341,848,713,000 | 35.510204 | 73 | 0.604807 | false |
boisde/Greed_Island | business_logic/order_collector/transwarp/validate.py | 1 | 2044 | #!/usr/bin/env python
# coding:utf-8
import logging
RECORD_NORMAL = 0
RECORD_DELETED = 1
RECORD_CHOICE = (
(RECORD_NORMAL, u'正常'),
(RECORD_DELETED, u'已删除'),
)
def is_valid_kw(obj, is_update=False, **kw):
mappings = obj.__mappings__
if is_update and kw.get('deleted', None) == RECORD_DELETED:
raise ValueError("Illegal operation: Try to mark %s as deleted with update api." % obj.__name__)
elif is_update:
pass
# 检查是否要求存在的参数都存在
else:
args = set(kw.keys())
required = {key_name for key_name, orm_val in mappings.iteritems() if orm_val.nullable is False and orm_val.primary_key is False}
required -= {'deleted', 'create_time', 'update_time'}
if not required.issubset(args):
raise ValueError("Not providing required args: %s." % list(required-args))
# 检查参数类型
for key_name, kv in kw.iteritems():
if key_name in mappings:
orm_val = mappings[key_name]
if orm_val.ddl.find('int') != -1:
try:
int(kv)
except ValueError:
raise ValueError("[%s]:[%s][%s] should be type of [%s]." % (key_name, unicode(kv), type(kv), orm_val.ddl))
elif orm_val.ddl.find('char') != -1:
char_len = int(orm_val.ddl[orm_val.ddl.find('(') + 1:orm_val.ddl.find(')')])
if (not kv) and orm_val.nullable is True: # 参数值设置可以为空且传入参数就是空
continue
elif not isinstance(kv, unicode) and not isinstance(kv, str):
raise ValueError("[%s]:[%s][%s] should be type of str." % (key_name, unicode(kv), type(kv)))
elif kv and len(kv) > char_len:
raise ValueError("[%s]:[%s] should be str of length[%s]." % (key_name, unicode(kv), char_len))
else:
logging.warning("[%s]:[%s] won't be passed since [%s] is not valid." % (key_name, unicode(kv), key_name)) | mit | 302,318,775,476,008,200 | 43.568182 | 137 | 0.555612 | false |
Rbeuque74/brie-aurore | Brie/brie/websetup.py | 1 | 1479 | # -*- coding: utf-8 -*-
"""Setup the Brie application"""
import logging
import transaction
from tg import config
from brie.config.environment import load_environment
__all__ = ['setup_app']
log = logging.getLogger(__name__)
def setup_app(command, conf, vars):
"""Place any commands to setup brie here"""
load_environment(conf.global_conf, conf.local_conf)
# Load the models
from brie import model
print "Creating tables"
model.metadata.create_all(bind=config['pylons.app_globals'].sa_engine)
manager = model.User()
manager.user_name = u'manager'
manager.display_name = u'Example manager'
manager.email_address = u'[email protected]'
manager.password = u'managepass'
model.DBSession.add(manager)
group = model.Group()
group.group_name = u'managers'
group.display_name = u'Managers Group'
group.users.append(manager)
model.DBSession.add(group)
permission = model.Permission()
permission.permission_name = u'manage'
permission.description = u'This permission give an administrative right to the bearer'
permission.groups.append(group)
model.DBSession.add(permission)
editor = model.User()
editor.user_name = u'editor'
editor.display_name = u'Example editor'
editor.email_address = u'[email protected]'
editor.password = u'editpass'
model.DBSession.add(editor)
model.DBSession.flush()
transaction.commit()
print "Successfully setup"
| bsd-2-clause | -5,205,969,752,594,157,000 | 24.947368 | 90 | 0.698445 | false |
nexdatas/configtool | test/__main__.py | 1 | 4436 | #!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <[email protected]>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file runtest.py
# the unittest runner
#
import sys
import unittest
from PyQt5.QtWidgets import QApplication
import AttributeDlg_test
import ConnectDlg_test
import DimensionsDlg_test
import DefinitionDlg_test
import GroupDlg_test
import FieldDlg_test
import NodeDlg_test
import ComponentItem_test
import ComponentModel_test
import DomTools_test
import RichAttributeDlg_test
import LinkDlg_test
import StrategyDlg_test
import LabeledObject_test
import CommonDataSourceDlg_test
import DataSourceDlg_test
import CommonDataSource_test
import DataSource_test
import DataSourceMethods_test
try:
__import__("PyTango")
# if module PyTango avalable
PYTANGO_AVAILABLE = True
except ImportError as e:
PYTANGO_AVAILABLE = False
print("PyTango is not available: %s" % e)
# list of available databases
DB_AVAILABLE = []
# main function
def main():
# test server
# ts = None
# test suit
suite = unittest.TestSuite()
app = QApplication([])
NodeDlg_test.app = app
DefinitionDlg_test.app = app
AttributeDlg_test.app = app
ConnectDlg_test.app = app
DimensionsDlg_test.app = app
GroupDlg_test.app = app
ComponentItem_test.app = app
DomTools_test.app = app
RichAttributeDlg_test.app = app
LinkDlg_test.app = app
StrategyDlg_test.app = app
LabeledObject_test.app = app
CommonDataSourceDlg_test.app = app
DataSourceDlg_test.app = app
CommonDataSource_test.app = app
DataSource_test.app = app
DataSourceMethods_test.app = app
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(AttributeDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(LinkDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(StrategyDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(ConnectDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(DimensionsDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(DefinitionDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(GroupDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(FieldDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(RichAttributeDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(NodeDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(ComponentItem_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(ComponentModel_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(DomTools_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(LabeledObject_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(
CommonDataSourceDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(DataSourceDlg_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(CommonDataSource_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(DataSource_test))
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(DataSourceMethods_test))
# test runner
runner = unittest.TextTestRunner()
# test result
result = runner.run(suite).wasSuccessful()
sys.exit(not result)
# if ts:
# ts.tearDown()
if __name__ == "__main__":
main()
| gpl-3.0 | -8,829,642,283,114,767,000 | 26.725 | 79 | 0.732867 | false |
chrisxue815/leetcode_python | problems/test_0093_iterative_post_validate.py | 1 | 1973 | import unittest
class Solution:
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
result = []
slen = len(s)
# [0,p1) [p1,p2) [p2,p3) [p3,slen)
for p1 in range(1, 4):
for p2 in range(p1 + 1, p1 + 4):
for p3 in range(p2 + 1, p2 + 4):
l4 = slen - p3
if l4 < 1 or l4 > 3:
continue
if p1 > 1 and s[0] == '0':
continue
l2 = p2 - p1
if l2 > 1 and s[p1] == '0':
continue
l3 = p3 - p2
if l3 > 1 and s[p2] == '0':
continue
if l4 > 1 and s[p3] == '0':
continue
s1 = s[0:p1]
b1 = int(s1)
if b1 > 255:
continue
s2 = s[p1:p2]
b2 = int(s2)
if b2 > 255:
continue
s3 = s[p2:p3]
b3 = int(s3)
if b3 > 255:
continue
s4 = s[p3:slen]
b4 = int(s4)
if b4 > 255:
continue
result.append(s1 + '.' + s2 + '.' + s3 + '.' + s4)
return result
class Test(unittest.TestCase):
def test(self):
self._test('25525511135', [
'255.255.11.135',
'255.255.111.35',
])
self._test('10999', [
'10.9.9.9',
'1.0.99.9',
'1.0.9.99',
# Should not contain 1.9.9.9
])
def _test(self, s, expected):
actual = Solution().restoreIpAddresses(s)
self.assertCountEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| unlicense | 2,302,830,249,286,819,600 | 27.594203 | 70 | 0.331475 | false |
papallas/baxter_cashier | scripts/baxter_cashier_manipulation/src/environment_factory.py | 1 | 6798 | #!/usr/bin/env python
"""
Factory for Environments.
This file contains some static classes that represents environments in real
life. If Baxter for example is placed somewhere in a real environment let's
name it "Robotics Lab" then we wish to define obstacles around Baxter in this
specific environment. In this class we achieve exactly this, for each
environment that Baxter can be, we define the obstacles around him and using
the Factory Pattern and Template design pattern we are able to have
extensibility with a very nice way.
If you need to define a new environment here are the steps:
1. Define a similar class with the one listed below: `RoboticsLabEnvironment`
but make sure the obstacles implemented in `RoboticsLabEnvironment` match
you own obstacles in your environment, and make sure you give a sensible
name for the class.
2. In `EnvironmentFactory` class, define a top-level attribute with the name
of your new class (see the one already there: `__robotics_lab_environment`)
3. Implement your getter, as like `def get_robotics_lab_environment():` and use
similar logic to return your new class back.
4. In `moveit_controller.py` find the line
`EnvironmentFactory.get_robotics_lab_environment()` and change it to match
your new getter method.
Copyright (C) 2016/2017 The University of Leeds and Rafael Papallas
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import copy
from geometry_msgs.msg import PoseStamped
class EnvironmentFactory:
"""
Environment Factory implementing the design pattern.
In here are defined the getters for the different environments and is the
class used in other scripts to generate the class environments required.
"""
_robotics_lab_environment = None
@staticmethod
def initialize():
"""Initialise each environment."""
EnvironmentFactory._robotics_lab_environment = RoboticsLabEnvironment()
@staticmethod
def get_robotics_lab_environment():
"""Will return the robotics lab environment."""
return EnvironmentFactory._robotics_lab_environment.clone()
class Obstacle:
"""This represent an obstacle in real world."""
def __init__(self, obstalce_name, x, y, z, shape_size):
"""
Will configure the obstacle details and set it's attributes.
- obstalce_name: is the name of the obstacle.
- x, y and z: is the position or pose of the obstacle in the world.
- shape_size: is a triple tuple with height, width and depth of the
object or obstacle.
"""
self.name = obstalce_name
# The pose of where the obstacle is
self.pose = PoseStamped()
self.pose.pose.position.x = x
self.pose.pose.position.y = y
self.pose.pose.position.z = z
# Pose Header Frame ID is None because it needs to be set for the
# specific scene, which is not available at the time the obstacle
# is created.
self.pose.header.frame_id = None
# This is a triple tuple (h, w, z) representing the size of the
# obstacle
self.size = shape_size
def set_frame_id(self, id):
"""
Will set the pose's header frame ID.
It is important, for the obstacle to appear in the MoveIt Rviz to set
this to `robot.get_planning_frame()`, since we don't have this info
in here, we need to set this later. Make sure you have set this
otherwise you will not be able to visualise the obstacle in Rviz.
"""
self.pose.header.frame_id = id
class Environment:
"""This is the template class of the Template design pattern."""
# Obstacles represents a list of obstacles
_obstacles = None
def clone(self):
"""
Clone itself.
Required method to clone itself when Factory is used to get the
instance.
"""
pass
def get_obstacles(self):
"""Will return the list with obstacles."""
return self._obstacles
class RoboticsLabEnvironment(Environment):
"""
This class represent's University of Leeds, Robotic's Laboratory.
The obstacles defiend here are specifically to that environment. This is
a subclass of the environment template of the Template design pattern.
"""
def __init__(self):
"""
Default constructor.
Will initialise the obstacles attribute to empty list and will call the
method to create the obstacles.
"""
self._obstacles = []
self._create_obstalces()
def _create_obstalces(self):
"""
Generate and append the obstacles to the class.
In here are the obstacles relevant to this specific environment.
"""
side_wall = Obstacle(obstalce_name="side_wall",
x=0.6,
y=1,
z=0,
shape_size=(4, 0.2, 3))
self._obstacles.append(side_wall)
back_wall = Obstacle(obstalce_name="back_wall",
x=-1,
y=0,
z=0,
shape_size=(0.2, 4, 3))
self._obstacles.append(back_wall)
table = Obstacle(obstalce_name="table",
x=0.7,
y=-0.1,
z=-0.53,
shape_size=(0.8, 1.2, 0.7))
self._obstacles.append(table)
camera_tripod = Obstacle(obstalce_name="camera_tripod",
x=0.6,
y=-1.2,
z=-0.54,
shape_size=(1, 0.3, 1.8))
self._obstacles.append(camera_tripod)
# width, length, height
def clone(self):
"""Required method for the Template design pattern."""
return copy.copy(self)
| gpl-3.0 | 8,566,268,127,867,704,000 | 34.968254 | 79 | 0.604884 | false |
fynjah/django-pimp-my-filter | filter_manager/views.py | 1 | 7132 | import datetime
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, HttpResponseForbidden
from django.shortcuts import render_to_response, RequestContext
from django.core.context_processors import csrf
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.generic import GenericRelation
from filter_manager.models import Filter, Condition, LOGICAL_OPERATORS
@login_required
def save_filter(request):
if request.method == "POST" and request.is_ajax():
if 'filter' in request.POST:
new_filter = json.loads(request.POST['filter'])
app_model = '%s.%s' % (new_filter['app'],new_filter['model'])
if settings.PIMP_MY_FILTER['ALLOWED_MODELS']:
if not app_model in settings.PIMP_MY_FILTER['ALLOWED_MODELS']:
return HttpResponseForbidden('[{"error":"Forbidden."}]',
mimetype='application/json; charset=utf8')
else:
return HttpResponseForbidden(
'[{"error":"Forbidden. Check PIMP_MY_FILTER Settings."}]',
mimetype='application/json; charset=utf8',
)
ct = ContentType.objects.get_by_natural_key(new_filter['app'],
new_filter['model'])
if new_filter['quick'] == 'true':
quick = True
else:
quick = False
f = Filter(name=new_filter['name'],
user_id=request.user.id,
quick=quick,
content_type = ct,)
f.save()
for k,c in new_filter['conditions'].iteritems():
data = c['value_data']
if (data['type'] == 'ForeignKey'
or data['type'] == 'ManyToManyField'
or data['type'] == 'OneToOneField'):
value = data['fk_id']
elif (data['type'] == 'BooleanField'
or data['type'] == 'NullBooleanField'
or data['type'] == 'FieldFile'
or data['type'] == 'FileField'
or data['type'] == 'ImageField'):
if c['value'] == 'on':
value = True
else:
value = False
else:
value = c['value']
con = Condition(filter=f,
operator = c['operator'],
field_type = data['type'],
value=value,
field=c['field'],)
con.save()
r = {'filter_id':f.id}
return HttpResponse(json.dumps(r, indent = 4 * ' '),
mimetype='application/json; charset=utf8')
else:
return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]',
mimetype='application/json; charset=utf8')
@login_required
def get_structure(request):
if request.method == "POST" and request.is_ajax():
if 'app' in request.POST and 'model' in request.POST:
fields = {}
ct = ContentType.objects.get_by_natural_key(request.POST['app'],
request.POST['model'])
model = ContentType.model_class(ct)
for i,x in enumerate(model._meta.get_all_field_names()):
obj, m, direct, m2m = model._meta.get_field_by_name(x)
if obj.name == 'id' or not direct or isinstance(obj, GenericRelation):
continue
f = {}
f.update({"type":obj.get_internal_type()})
f.update({"name":obj.name})
fields.update( {i: f} )
r = {}
r.update({'fields':fields})
r.update({'operators':LOGICAL_OPERATORS})
return HttpResponse(json.dumps(r, indent = 4 * ' '),
mimetype='application/json; charset=utf8')
return HttpResponseForbidden('[{"error":"Forbidden"}]',
mimetype='application/json; charset=utf8')
def use_filter_internal(filter_id):
if filter_id:
try:
flt = Filter.objects.only('content_type').get(pk = filter_id)
except Filter.DoesNotExist:
return None
model = ContentType.model_class(flt.content_type)
kwargs = {}
for c in flt.conditions.all():
field = None
lookup = c.operator
field = "%s%s" % (c.field, lookup)
kwargs.update({field:c.value})
return model.objects.filter(**kwargs)
else:
return None
@login_required
def use_filter(request):
if request.is_ajax():
if 'filter_id' in request.GET:
try:
flt = Filter.objects.only('content_type').get(pk = request.GET['filter_id'])
except Filter.DoesNotExist:
return HttpResponseForbidden('[{"error":"Filter Not found."}]',
mimetype='application/json; charset=utf8')
model = ContentType.model_class(flt.content_type)
kwargs = {}
for c in flt.conditions.all():
field = None
lookup = c.operator
field = "%s%s" % (c.field, lookup)
kwargs.update({field:c.value})
qs = model.objects.filter(**kwargs)
response = {}
for i,q in enumerate(qs):
field_list = {}
for f in q._meta.get_all_field_names():
obj, model, direct, m2m = q._meta.get_field_by_name(f)
if not direct or isinstance(obj, GenericRelation):
continue
if m2m:
l = {}
val = obj.value_from_object(q)
for m in obj.value_from_object(q):
l.update({m.pk:m.__unicode__()})
field_list.update({f:l})
elif obj.rel:
val = q.__getattribute__(obj.name)
if val:
l = {val.pk:val.__unicode__()}
field_list.update({obj.name:l})
else:
field_list.update({f:None})
else:
field_list.update({f:obj.value_to_string(q)})
response.update({i:field_list})
r = json.dumps(response, indent = 4 * ' ')
return HttpResponse(r,
mimetype='application/json; charset=utf8')
return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]',
mimetype='application/json; charset=utf8')
@login_required
def get_typeahead(request):
if request.is_ajax() and request.method == "POST":
if ('field' in request.POST and
'app' in request.POST and
'model' in request.POST):
ct = ContentType.objects.get_by_natural_key(request.POST['app'],
request.POST['model'])
instance = ContentType.model_class(ct)
f = dict([(x,x) for x in instance._meta.get_all_field_names() ])
try:
o = f[request.POST['field']]
o = instance._meta.get_field_by_name(o)[0]
except KeyError:
return HttpResponseForbidden('[{"error":"Forbidden"}]',
mimetype='application/json; charset=utf8')
o = o.related.parent_model
obj_list = o.objects.all()
lst = {}
for i,obj in enumerate(obj_list):
l = {}
l.update({"id":obj.id})
l.update({"unicode":obj.__unicode__()})
#not sure about __unicode__, actually
lst.update({i:l})
return HttpResponse(json.dumps(lst, indent = 4 * ' '),
mimetype='application/json; charset=utf8')
else:
return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]',
mimetype='application/json; charset=utf8')
def get_filters_by_user(request):
if request.is_ajax():
user_filters = Filter.objects.filter(Q(user = request.user.id)|Q(for_all = True))
f_list = {}
for i,f in enumerate(user_filters):
f_list.update({i:{'id':f.pk, 'name':f.name, 'quick':f.quick}})
return HttpResponse(json.dumps(f_list, indent = 4 * ' '),
mimetype='application/json; charset=utf8')
return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]',
mimetype='application/json; charset=utf8')
| bsd-3-clause | -1,585,255,892,386,726,400 | 32.483568 | 103 | 0.651711 | false |
LaurentClaessens/phystricks | src/MathStructures.py | 1 | 3272 | # -*- coding: utf8 -*-
###########################################################################
# This is part of the module phystricks
#
# phystricks is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# phystricks is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with phystricks.py. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# copyright (c) Laurent Claessens, 2010,2011,2013-2017
# email: [email protected]
from sage.rings.rational import Rational
from sage.all import latex
from Utilities import *
from SmallComputations import MultipleBetween
from AngleMeasure import AngleMeasure
class PolarCoordinates(object):
def __init__(self,r,value_degree=None,value_radian=None):
self.r = r
self.measure=AngleMeasure(value_degree=value_degree,value_radian=value_radian)
self.degree=self.measure.degree
self.radian=self.measure.radian
def __str__(self):
return "PolarCoordinates, r=%s,degree=%s,radian=%s"%(str(self.r),str(self.degree),str(self.radian))
def DegreeAngleMeasure(x):
return AngleMeasure(value_degree=x)
def RadianAngleMeasure(x):
return AngleMeasure(value_radian=x)
class AxesUnit(object):
def __init__(self,numerical_value,latex_symbol=""):
try :
numerical_value=Rational(numerical_value)
except TypeError :
pass
self.numerical_value=numerical_value
self.latex_symbol=latex_symbol
def symbol(self,x):
return latex(x)+self.latex_symbol
def place_list(self,mx,Mx,frac=1,mark_origin=True):
"""
return a tuple of
1. values that are all the integer multiple of
<frac>*self.numerical_value
between mx and Mx
2. the multiple of the basis unit.
Give <frac> as literal real. Recall that python evaluates 1/2 to 0. If you pass 0.5, it will be converted back to 1/2 for a nice display.
"""
try :
frac=Rational(frac) # If the user enters "0.5", it is converted to 1/2
except TypeError :
pass
if frac==0:
raise ValueError,"frac is zero in AxesUnit.place_list(). Maybe you ignore that python evaluates 1/2 to 0 ? (writes literal 0.5 instead) \n Or are you trying to push me in an infinite loop ?"
l=[]
k=var("TheTag")
for x in MultipleBetween(frac*self.numerical_value,mx,Mx,mark_origin):
if self.latex_symbol == "":
l.append((x,"$"+latex(x)+"$"))
else :
pos=(x/self.numerical_value)*k
text="$"+latex(pos).replace("TheTag",self.latex_symbol)+"$" # This risks to be Sage-version dependent.
l.append((x,text))
return l
| gpl-3.0 | 3,012,877,965,020,561,000 | 40.417722 | 202 | 0.623472 | false |
ENCODE-DCC/encoded | src/encoded/tests/test_upgrade_file.py | 1 | 8535 | import pytest
def test_file_upgrade(upgrader, file_1):
value = upgrader.upgrade('file', file_1, target_version='2')
assert value['schema_version'] == '2'
assert value['status'] == 'current'
def test_file_upgrade2(root, upgrader, file_2, file, threadlocals, dummy_request):
context = root.get_by_uuid(file['uuid'])
dummy_request.context = context
value = upgrader.upgrade('file', file_2, target_version='3', context=context)
assert value['schema_version'] == '3'
assert value['status'] == 'in progress'
def test_file_upgrade3(root, upgrader, file_3, file, threadlocals, dummy_request):
context = root.get_by_uuid(file['uuid'])
dummy_request.context = context
value = upgrader.upgrade('file', file_3, target_version='4', context=context)
assert value['schema_version'] == '4'
assert value['lab'] != ''
assert value['award'] != ''
assert 'download_path' not in value
def test_file_upgrade4(root, upgrader, file_4, file, threadlocals, dummy_request):
context = root.get_by_uuid(file['uuid'])
dummy_request.context = context
content_md5sum = '0123456789abcdef0123456789abcdef'
fake_registry = {
'backfill_2683': {file['md5sum']: content_md5sum},
}
value = upgrader.upgrade(
'file', file_4, target_version='5', context=context, registry=fake_registry)
assert value['schema_version'] == '5'
assert value['file_format'] == 'bed'
assert value['file_format_type'] == 'bedMethyl'
assert value['output_type'] == 'base overlap signal'
assert value['content_md5sum'] == content_md5sum
def test_file_upgrade5(root, upgrader, registry, file_5, file, threadlocals, dummy_request):
#context = root.get_by_uuid(file['uuid'])
#dummy_request.context = context
value = upgrader.upgrade(
'file', file_5, current_version='5', target_version='6', registry=registry)
assert value['schema_version'] == '6'
assert value['output_type'] == 'signal of all reads'
def test_file_upgrade7(upgrader, file_7):
value = upgrader.upgrade('file', file_7, current_version='7', target_version='8')
assert value['schema_version'] == '8'
def test_file_upgrade8(upgrader, file_8a, file_8b):
value_a = upgrader.upgrade('file', file_8a, current_version='8', target_version='9')
assert value_a['schema_version'] == '9'
assert 'assembly' not in value_a
value_b = upgrader.upgrade('file', file_8b, current_version='8', target_version='9')
assert value_b['schema_version'] == '9'
assert 'supersedes' in value_b
assert 'supercedes' not in value_b
def test_file_upgrade_9_to_10(upgrader, file_9):
value = upgrader.upgrade('file', file_9, current_version='9', target_version='10')
assert value['date_created'] == '2017-04-28T00:00:00.000000+00:00'
def test_file_upgrade_10_to_11(upgrader, file_10):
value = upgrader.upgrade('file', file_10, current_version='10', target_version='11')
assert value['schema_version'] == '11'
assert value['no_file_available'] is False
def test_file_upgrade_12_to_13(upgrader, file_12):
value = upgrader.upgrade('file', file_12, current_version='12', target_version='13')
assert value['schema_version'] == '13'
assert 'run_type' not in value
def test_file_upgrade_13_to_14(upgrader, file_13):
value = upgrader.upgrade('file', file_13, current_version='13', target_version='14')
assert value['schema_version'] == '14'
assert value['output_type'] == 'candidate Cis-Regulatory Elements'
def test_file_upgrade_14_to_15(upgrader,
file_14_optimal,
file_14_conservative,
file_14_pseudoreplicated):
value = upgrader.upgrade(
'file', file_14_optimal, current_version='14', target_version='15'
)
assert value['schema_version'] == '15'
assert value['output_type'] == 'optimal IDR thresholded peaks'
value = upgrader.upgrade(
'file', file_14_conservative, current_version='14', target_version='15'
)
assert value['schema_version'] == '15'
assert value['output_type'] == 'conservative IDR thresholded peaks'
value = upgrader.upgrade(
'file',
file_14_pseudoreplicated,
current_version='14',
target_version='15'
)
assert value['schema_version'] == '15'
assert value['output_type'] == 'pseudoreplicated IDR thresholded peaks'
def test_file_upgrade_15_to_16(upgrader, file_15):
value = upgrader.upgrade('file', file_15, current_version='15', target_version='16')
assert value['schema_version'] == '16'
assert 'run_type' not in value
assert 'read_length' not in value
def test_file_upgrade_16_to_17(upgrader, file_16):
value = upgrader.upgrade('file', file_16, current_version='16', target_version='17')
assert value['schema_version'] == '17'
assert 'run_type' not in value
assert 'read_length' not in value
def test_file_upgrade_17_to_18(upgrader, file_17):
value = upgrader.upgrade('file', file_17, current_version='17', target_version='18')
assert value['schema_version'] == '18'
assert 'assembly' not in value
def test_file_upgrade_18_to_19(upgrader, file_18):
value = upgrader.upgrade('file', file_18, current_version='18', target_version='19')
assert value['schema_version'] == '19'
assert value['output_type'] == 'representative DNase hypersensitivity sites (rDHSs)'
def test_file_upgrade_19_to_20(upgrader, file_19):
value = upgrader.upgrade('file', file_19, current_version='19', target_version='20')
assert value['schema_version'] == '20'
assert value['run_type'] == 'single-ended'
assert value['notes'] == 'The run_type of this file was automatically upgraded by ENCD-5258.'
def test_file_upgrade_20_to_21(root, testapp, upgrader, registry, file_dnase_enrichment, file_chip_enrichment):
value = upgrader.upgrade('file', file_dnase_enrichment, registry=registry, current_version='20', target_version='21')
assert value['schema_version'] == '21'
assert value['output_type'] == 'FDR cut rate'
value = upgrader.upgrade('file', file_chip_enrichment, registry=registry, current_version='20', target_version='21')
assert value['schema_version'] == '21'
assert value['output_type'] == 'enrichment'
def test_file_upgrade_21_to_22(root, testapp, upgrader, registry, file_21_22):
# https://encodedcc.atlassian.net/browse/ENCD-5286
value = upgrader.upgrade('file', file_21_22, current_version='21', target_version='22')
assert value['schema_version'] == '22'
assert value['replicate'] == '70d6e704-bba5-4475-97b8-03bf717eecf3'
assert value['notes'] == 'Prior entry. This file lacks its correct replicate specified.'
def test_file_upgrade_22_to_23(upgrader, file_22):
value = upgrader.upgrade('file', file_22, current_version='22', target_version='23')
assert value['schema_version'] == '23'
assert value['output_type'] == 'spike-ins'
def test_file_upgrade_23_to_24(root, testapp, upgrader, registry, file_23):
value = upgrader.upgrade(
'file', file_23, current_version='23', target_version='24'
)
assert value['schema_version'] == '24'
assert value['output_type'] == 'pseudo-replicated peaks'
def test_file_upgrade_24_to_25(root, testapp, upgrader, registry, file_24):
value = upgrader.upgrade(
'file', file_24, current_version='24', target_version='25'
)
assert value['schema_version'] == '25'
assert value['output_type'] == 'smoothed methylation state at CpG'
def test_file_upgrade_25_to_26(upgrader, file_25):
value = upgrader.upgrade('file', file_25, current_version='25', target_version='26')
assert value['schema_version'] == '26'
assert value['output_type'] == 'representative DNase hypersensitivity sites'
def test_file_upgrade_26_to_27(upgrader, file_26):
value = upgrader.upgrade('file', file_26, current_version='26', target_version='27')
assert value['schema_version'] == '27'
assert value['output_type'] == 'pseudoreplicated peaks'
def test_file_upgrade_27_to_28(upgrader, file_27):
value = upgrader.upgrade('file', file_27, current_version='27', target_version='28')
assert value['schema_version'] == '28'
assert value['output_type'] == 'exclusion list regions'
def test_file_upgrade_28_to_29(upgrader, file_28):
value = upgrader.upgrade('file', file_28, current_version='28', target_version='29')
assert value['schema_version'] == '29'
assert 'read_length' not in value
| mit | 7,095,645,240,905,272,000 | 40.033654 | 121 | 0.667018 | false |
stefanwebb/tensorflow-models | tensorflow_models/models/vae_normal_obs.py | 1 | 5404 | # MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_models as tf_models
def create_placeholders(settings):
x = tf.placeholder(tf.float32, shape=tf_models.batchshape(settings), name='samples')
z = tf.placeholder(tf.float32, shape=tf_models.latentshape(settings), name='codes')
return x, z
def create_prior(settings):
dist_prior = tf_models.standard_normal(tf_models.latentshape(settings))
return tf.identity(dist_prior.sample(), name='p_z/sample')
def create_encoder(settings, reuse=True):
encoder_network = settings['architecture']['encoder']['fn']
x_placeholder = tf_models.samples_placeholder()
assert(not x_placeholder is None)
with tf.variable_scope('encoder', reuse=reuse):
mean_z, diag_stdev_z = encoder_network(settings, x_placeholder, is_training=False)
dist_z_given_x = tf.contrib.distributions.MultivariateNormalDiag(mean_z, diag_stdev_z)
encoder = tf.identity(dist_z_given_x.sample(name='sample'), name='q_z_given_x/sample')
return encoder
def create_decoder(settings, reuse=True):
if 'transformations' in settings and 'rescale' in settings['transformations']:
min_val = settings['transformations']['rescale'][0]
max_val = settings['transformations']['rescale'][1]
else:
min_val = 0.
max_val = 1.
decoder_network = settings['architecture']['decoder']['fn']
z_placeholder = tf_models.codes_placeholder()
assert(not z_placeholder is None)
with tf.variable_scope('decoder', reuse=reuse):
mean_x, diag_stdev_x = decoder_network(settings, z_placeholder, is_training=False)
dist_x_given_z = tf.contrib.distributions.MultivariateNormalDiag(mean_x, diag_stdev_x)
decoder = tf.identity(tf.clip_by_value(dist_x_given_z.sample(), min_val, max_val), name='p_x_given_z/sample')
return decoder
def create_probs(settings, inputs, is_training, reuse=False):
encoder_network = settings['architecture']['encoder']['fn']
decoder_network = settings['architecture']['decoder']['fn']
dist_prior = tf_models.standard_normal(tf_models.latentshape(settings))
# Use recognition network to determine mean and (log) variance of Gaussian distribution in latent space
with tf.variable_scope('encoder', reuse=reuse):
mean_z, diag_stdev_z = encoder_network(settings, inputs, is_training=is_training)
dist_z_given_x = tf.contrib.distributions.MultivariateNormalDiag(mean_z, diag_stdev_z)
# Draw one sample z from Gaussian distribution
eps = tf.random_normal(tf_models.latentshape(settings), 0, 1, dtype=tf.float32)
z_sample = tf.add(mean_z, tf.multiply(diag_stdev_z, eps))
# Use generator to determine mean of Bernoulli distribution of reconstructed input
with tf.variable_scope('decoder', reuse=reuse):
mean_x, diag_stdev_x = decoder_network(settings, z_sample, is_training=is_training)
dist_x_given_z = tf.contrib.distributions.MultivariateNormalDiag(tf_models.flatten(mean_x), tf_models.flatten(diag_stdev_x))
#print('*** Debugging ***')
#print('mean_x.shape', mean_x.shape)
#print('diag_stdev_x.shape', diag_stdev_x.shape)
#print('dist_x_given_z.sample().shape', dist_x_given_z.sample().shape)
#print('dist_x_given_z.log_prob(tf_models.flatten(inputs)).shape', dist_x_given_z.log_prob(tf_models.flatten(inputs)).shape)
lg_p_x_given_z = tf.identity(dist_x_given_z.log_prob(tf_models.flatten(inputs)), name='p_x_given_z/log_prob')
lg_p_z = tf.identity(dist_prior.log_prob(z_sample), name='p_z/log_prob')
lg_q_z_given_x = tf.identity(dist_z_given_x.log_prob(z_sample), name='q_z_given_x/log_prob')
return lg_p_x_given_z, lg_p_z, lg_q_z_given_x
# TODO: Fix this to be normal distribution!
def lg_likelihood(x, z, settings, reuse=True, is_training=False):
decoder_network = settings['architecture']['decoder']['fn']
with tf.variable_scope('model'):
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, z, is_training=is_training)
dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=tf_models.flatten(logits_x), dtype=tf.float32)
return tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(x)), 1)
def lg_prior(z, settings, reuse=True, is_training=False):
dist_prior = tf_models.standard_normal(z.shape)
return dist_prior.log_prob(z)
| mit | 4,858,192,034,011,259,000 | 45.586207 | 125 | 0.744078 | false |
lamestation/packthing | packthing/util.py | 1 | 7019 | import errno
import os
import platform
import shutil
import string
import subprocess
import sys
import tarfile
import zipfile
from contextlib import contextmanager
def get_platform():
_platform = dict()
_platform["system"] = platform.system().lower()
machine = platform.machine().lower()
if machine == "x86_64":
machine = "amd64"
_platform["machine"] = machine
return _platform
def warning(*args):
print("WARNING:" + " ".join(args))
def error(*objs):
blocks = []
for b in " ".join(objs).split("\n"):
if len(blocks) > 0:
blocks.append(" " + b)
else:
blocks.append(b)
print("\nERROR:" + "\n".join(blocks))
print()
sys.exit(1)
def subtitle(text):
line = (80 - (len(text) + 2)) // 2
print("-" * line, text, "-" * (line + (len(text) % 2)))
def title(text):
line = (80 - (len(text) + 2)) // 2
print("=" * line, text.upper(), "=" * (line + (len(text) % 2)))
def headline(func):
def wrapper(*args, **kwargs):
title(func.__name__)
res = func(*args, **kwargs)
return res
return wrapper
@contextmanager
def pushd(newDir):
previousDir = os.getcwd()
os.chdir(newDir)
yield
os.chdir(previousDir)
def copy(src, dest, verbose=True, permissions=0o644):
destfile = os.path.join(dest, os.path.basename(src))
if verbose:
print("Copy", src, "to dir", dest)
mkdir(dest)
shutil.copy(src, destfile)
os.chmod(destfile, permissions)
def command(args, verbose=True, strict=True, stdinput=None, abort=None):
if abort is None:
abort = True
if verbose:
print("-", " ".join(args))
if not args:
error("Attempting to run empty command.")
try:
process = subprocess.Popen(
args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE
)
except OSError as e:
if abort:
error("Command '" + args[0] + "' not found; exiting.")
return
if stdinput is not None:
stdinput = stdinput.encode()
out, err = process.communicate(input=stdinput)
out = out.decode()
err = err.decode()
if strict:
if process.returncode:
print(err)
raise subprocess.CalledProcessError(process.returncode, args, err)
return out, err
def command_in_dir(args, newdir, verbose=True, strict=True, stdinput=None):
if verbose:
print("DIR:", newdir)
with pushd(newdir):
out, err = command(args, verbose=verbose, strict=strict)
return out, err
def table(path, version, url):
return "%30s %10s %s" % (path, version, url)
def make(path, args):
with pushd(path):
args.insert(0, "make")
for m in ["make", "mingw32-make"]:
args[0] = m
failed = 0
try:
subprocess.check_call(args)
except OSError:
failed = 1
except subprocess.CalledProcessError as e:
error("Failed to build project '" + path + "'")
if not failed:
return
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def mkdir(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def tar_archive(name, files):
shortname = os.path.basename(name)
name += ".tgz"
archive = tarfile.open(name=name, mode="w:gz")
for f in files:
archive.add(name=f, arcname=os.path.join(shortname, f), recursive=False)
archive.close()
def zip_archive(name, files):
shortname = os.path.basename(name)
name += ".zip"
archive = zipfile.ZipFile(name, "w")
for f in files:
archive.write(
filename=f,
arcname=os.path.join(shortname, f),
compress_type=zipfile.ZIP_DEFLATED,
)
archive.close()
def from_scriptroot(filename):
currentpath = os.path.dirname(os.path.abspath(__file__))
return os.path.join(currentpath, filename)
def get_template_text(template):
template = os.path.join("template", template)
template = from_scriptroot(template)
return open(template, "r").read()
def get_template(template):
return string.Template(get_template_text(template))
# python-chroot-builder
# Copyright (C) 2012 Ji-hoon Kim
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------
def ldd(filenames):
libs = []
for x in filenames:
p = subprocess.Popen(["ldd", x], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = p.stdout.readlines()
for x in result:
s = x.split()
s.pop(1)
s.pop()
if len(s) == 2:
libs.append(s)
return libs
# -----------------------------------------
def extract_libs(files, libs):
resultlibs = []
for f in files:
for l in ldd([which(f)]):
for lib in libs:
if l[0].find(lib) == -1:
pass
else:
resultlibs.append(l)
return sorted(list(set(tuple(lib) for lib in resultlibs)))
def write(text, filename):
f = open(filename, "w")
f.seek(0)
f.write(text)
f.close()
def create(text, filename, executable=False):
print("Create", filename)
mkdir(os.path.dirname(filename))
f = open(filename, "w")
f.seek(0)
f.write(text)
f.close()
if executable:
os.chmod(filename, 0o755)
else:
os.chmod(filename, 0o644)
def root():
if os.geteuid() != 0:
error("This configuration requires root privileges!")
def cksum(files):
print("cksum:")
for f in files:
try:
out, err = command(["cksum", f], verbose=False)
except subprocess.CalledProcessError as e:
error("Failed to checksum file:", f)
print("| " + out.replace("\n", ""))
| gpl-3.0 | 2,858,408,366,746,371,000 | 23.371528 | 88 | 0.579855 | false |
tklengyel/patchwork | apps/patchwork/views/xmlrpc.py | 1 | 13846 | # Patchwork - automated patch tracking system
# Copyright (C) 2008 Jeremy Kerr <[email protected]>
#
# This file is part of the Patchwork package.
#
# Patchwork is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Patchwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Patchwork XMLRPC interface
#
from SimpleXMLRPCServer import SimpleXMLRPCDispatcher
from django.http import HttpResponse, HttpResponseRedirect, \
HttpResponseServerError
from django.core import urlresolvers
from django.contrib.auth import authenticate
from patchwork.models import Patch, Project, Person, State
from patchwork.views import patch_to_mbox
from django.views.decorators.csrf import csrf_exempt
import sys
import base64
import xmlrpclib
class PatchworkXMLRPCDispatcher(SimpleXMLRPCDispatcher):
def __init__(self):
if sys.version_info[:3] >= (2,5,):
SimpleXMLRPCDispatcher.__init__(self, allow_none=False,
encoding=None)
def _dumps(obj, *args, **kwargs):
kwargs['allow_none'] = self.allow_none
kwargs['encoding'] = self.encoding
return xmlrpclib.dumps(obj, *args, **kwargs)
else:
def _dumps(obj, *args, **kwargs):
return xmlrpclib.dumps(obj, *args, **kwargs)
SimpleXMLRPCDispatcher.__init__(self)
self.dumps = _dumps
# map of name => (auth, func)
self.func_map = {}
def register_function(self, fn, auth_required):
self.func_map[fn.__name__] = (auth_required, fn)
def _user_for_request(self, request):
auth_header = None
if 'HTTP_AUTHORIZATION' in request.META:
auth_header = request.META.get('HTTP_AUTHORIZATION')
elif 'Authorization' in request.META:
auth_header = request.META.get('Authorization')
if auth_header is None or auth_header == '':
raise Exception("No authentication credentials given")
str = auth_header.strip()
if not str.startswith('Basic '):
raise Exception("Authentication scheme not supported")
str = str[len('Basic '):].strip()
try:
decoded = base64.decodestring(str)
username, password = decoded.split(':', 1)
except:
raise Exception("Invalid authentication credentials")
return authenticate(username = username, password = password)
def _dispatch(self, request, method, params):
if method not in self.func_map.keys():
raise Exception('method "%s" is not supported' % method)
auth_required, fn = self.func_map[method]
if auth_required:
user = self._user_for_request(request)
if not user:
raise Exception("Invalid username/password")
params = (user,) + params
return fn(*params)
def _marshaled_dispatch(self, request):
try:
params, method = xmlrpclib.loads(request.body)
response = self._dispatch(request, method, params)
# wrap response in a singleton tuple
response = (response,)
response = self.dumps(response, methodresponse=1)
except xmlrpclib.Fault, fault:
response = self.dumps(fault)
except:
# report exception back to server
response = self.dumps(
xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)),
)
return response
dispatcher = PatchworkXMLRPCDispatcher()
# XMLRPC view function
@csrf_exempt
def xmlrpc(request):
if request.method != 'POST':
return HttpResponseRedirect(
urlresolvers.reverse('patchwork.views.help',
kwargs = {'path': 'pwclient/'}))
response = HttpResponse()
try:
ret = dispatcher._marshaled_dispatch(request)
response.write(ret)
except Exception:
return HttpResponseServerError()
return response
# decorator for XMLRPC methods. Setting login_required to true will call
# the decorated function with a non-optional user as the first argument.
def xmlrpc_method(login_required = False):
def wrap(f):
dispatcher.register_function(f, login_required)
return f
return wrap
# We allow most of the Django field lookup types for remote queries
LOOKUP_TYPES = ["iexact", "contains", "icontains", "gt", "gte", "lt",
"in", "startswith", "istartswith", "endswith",
"iendswith", "range", "year", "month", "day", "isnull" ]
#######################################################################
# Helper functions
#######################################################################
def project_to_dict(obj):
"""Return a trimmed down dictionary representation of a Project
object which is OK to send to the client."""
return \
{
'id' : obj.id,
'linkname' : obj.linkname,
'name' : obj.name,
}
def person_to_dict(obj):
"""Return a trimmed down dictionary representation of a Person
object which is OK to send to the client."""
# Make sure we don't return None even if the user submitted a patch
# with no real name. XMLRPC can't marshall None.
if obj.name is not None:
name = obj.name
else:
name = obj.email
return \
{
'id' : obj.id,
'email' : obj.email,
'name' : name,
'user' : unicode(obj.user).encode("utf-8"),
}
def patch_to_dict(obj):
"""Return a trimmed down dictionary representation of a Patch
object which is OK to send to the client."""
return \
{
'id' : obj.id,
'date' : unicode(obj.date).encode("utf-8"),
'filename' : obj.filename(),
'msgid' : obj.msgid,
'name' : obj.name,
'project' : unicode(obj.project).encode("utf-8"),
'project_id' : obj.project_id,
'state' : unicode(obj.state).encode("utf-8"),
'state_id' : obj.state_id,
'submitter' : unicode(obj.submitter).encode("utf-8"),
'submitter_id' : obj.submitter_id,
'delegate' : unicode(obj.delegate).encode("utf-8"),
'delegate_id' : max(obj.delegate_id, 0),
'commit_ref' : max(obj.commit_ref, ''),
}
def bundle_to_dict(obj):
"""Return a trimmed down dictionary representation of a Bundle
object which is OK to send to the client."""
return \
{
'id' : obj.id,
'name' : obj.name,
'n_patches' : obj.n_patches(),
'public_url' : obj.public_url(),
}
def state_to_dict(obj):
"""Return a trimmed down dictionary representation of a State
object which is OK to send to the client."""
return \
{
'id' : obj.id,
'name' : obj.name,
}
#######################################################################
# Public XML-RPC methods
#######################################################################
@xmlrpc_method(False)
def pw_rpc_version():
"""Return Patchwork XML-RPC interface version."""
return 1
@xmlrpc_method(False)
def project_list(search_str="", max_count=0):
"""Get a list of projects matching the given filters."""
try:
if len(search_str) > 0:
projects = Project.objects.filter(linkname__icontains = search_str)
else:
projects = Project.objects.all()
if max_count > 0:
return map(project_to_dict, projects)[:max_count]
else:
return map(project_to_dict, projects)
except:
return []
@xmlrpc_method(False)
def project_get(project_id):
"""Return structure for the given project ID."""
try:
project = Project.objects.filter(id = project_id)[0]
return project_to_dict(project)
except:
return {}
@xmlrpc_method(False)
def person_list(search_str="", max_count=0):
"""Get a list of Person objects matching the given filters."""
try:
if len(search_str) > 0:
people = (Person.objects.filter(name__icontains = search_str) |
Person.objects.filter(email__icontains = search_str))
else:
people = Person.objects.all()
if max_count > 0:
return map(person_to_dict, people)[:max_count]
else:
return map(person_to_dict, people)
except:
return []
@xmlrpc_method(False)
def person_get(person_id):
"""Return structure for the given person ID."""
try:
person = Person.objects.filter(id = person_id)[0]
return person_to_dict(person)
except:
return {}
@xmlrpc_method(False)
def patch_list(filter={}):
"""Get a list of patches matching the given filters."""
try:
# We allow access to many of the fields. But, some fields are
# filtered by raw object so we must lookup by ID instead over
# XML-RPC.
ok_fields = [
"id",
"name",
"project_id",
"submitter_id",
"delegate_id",
"state_id",
"date",
"commit_ref",
"hash",
"msgid",
"max_count",
]
dfilter = {}
max_count = 0
for key in filter:
parts = key.split("__")
if parts[0] not in ok_fields:
# Invalid field given
return []
if len(parts) > 1:
if LOOKUP_TYPES.count(parts[1]) == 0:
# Invalid lookup type given
return []
if parts[0] == 'project_id':
dfilter['project'] = Project.objects.filter(id =
filter[key])[0]
elif parts[0] == 'submitter_id':
dfilter['submitter'] = Person.objects.filter(id =
filter[key])[0]
elif parts[0] == 'state_id':
dfilter['state'] = State.objects.filter(id =
filter[key])[0]
elif parts[0] == 'max_count':
max_count = filter[key]
else:
dfilter[key] = filter[key]
patches = Patch.objects.filter(**dfilter)
if max_count > 0:
return map(patch_to_dict, patches[:max_count])
else:
return map(patch_to_dict, patches)
except:
return []
@xmlrpc_method(False)
def patch_get(patch_id):
"""Return structure for the given patch ID."""
try:
patch = Patch.objects.filter(id = patch_id)[0]
return patch_to_dict(patch)
except:
return {}
@xmlrpc_method(False)
def patch_get_by_hash(hash):
"""Return structure for the given patch hash."""
try:
patch = Patch.objects.filter(hash = hash)[0]
return patch_to_dict(patch)
except:
return {}
@xmlrpc_method(False)
def patch_get_by_project_hash(project, hash):
"""Return structure for the given patch hash."""
try:
patch = Patch.objects.filter(project__linkname = project,
hash = hash)[0]
return patch_to_dict(patch)
except:
return {}
@xmlrpc_method(False)
def patch_get_mbox(patch_id):
"""Return mbox string for the given patch ID."""
try:
patch = Patch.objects.filter(id = patch_id)[0]
return patch_to_mbox(patch).as_string()
except:
return ""
@xmlrpc_method(False)
def patch_get_diff(patch_id):
"""Return diff for the given patch ID."""
try:
patch = Patch.objects.filter(id = patch_id)[0]
return patch.content
except:
return ""
@xmlrpc_method(True)
def patch_set(user, patch_id, params):
"""Update a patch with the key,value pairs in params. Only some parameters
can be set"""
try:
ok_params = ['state', 'commit_ref', 'archived']
patch = Patch.objects.get(id = patch_id)
if not patch.is_editable(user):
raise Exception('No permissions to edit this patch')
for (k, v) in params.iteritems():
if k not in ok_params:
continue
if k == 'state':
patch.state = State.objects.get(id = v)
else:
setattr(patch, k, v)
patch.save()
return True
except:
raise
@xmlrpc_method(False)
def state_list(search_str="", max_count=0):
"""Get a list of state structures matching the given search string."""
try:
if len(search_str) > 0:
states = State.objects.filter(name__icontains = search_str)
else:
states = State.objects.all()
if max_count > 0:
return map(state_to_dict, states)[:max_count]
else:
return map(state_to_dict, states)
except:
return []
@xmlrpc_method(False)
def state_get(state_id):
"""Return structure for the given state ID."""
try:
state = State.objects.filter(id = state_id)[0]
return state_to_dict(state)
except:
return {}
| gpl-2.0 | -7,321,025,644,502,314,000 | 30.114607 | 79 | 0.560162 | false |
myfavouritekk/TPN | src/tpn/recurrent_extract_features.py | 1 | 5385 | #!/usr/bin/env python
import os
import os.path as osp
import numpy as np
import tensorflow as tf
from model import TPNModel
import argparse
import glog as log
import glob
from data_io import tpn_test_iterator
import cPickle
def bbox_transform_inv(boxes, deltas):
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
logging = tf.logging
def test_vid(session, m, vid_file, verbose=True):
assert m.batch_size == 1
tracks = tpn_test_iterator(vid_file)
# import pdb
# pdb.set_trace()
cum_acc_static = 0.
cum_acc_lstm = 0.
log.info(vid_file)
vid_res = []
for ind, track in enumerate(tracks, start=1):
# process track data
track_length = track['feature'].shape[0]
expend_feat = np.zeros((m.num_steps,) + track['feature'].shape[1:])
expend_feat[:track_length] = track['feature']
# extract features
state = session.run([m.initial_state])
cls_scores, bbox_deltas, end_probs, state = session.run(
[m.cls_scores, m.bbox_pred, m.end_probs, m.final_state],
{m.input_data: expend_feat[np.newaxis,:,:],
m.initial_state: state[0]})
# process outputs
cls_labels = track['class_label']
gt_len = cls_labels.shape[0]
bbox_pred = bbox_transform_inv(track['roi'], bbox_deltas[:gt_len,:])
cls_pred_lstm = np.argmax(cls_scores, axis=1)[:gt_len]
end_probs = end_probs[:gt_len]
# calculate accuracy comparison
cls_pred_static = np.argmax(track['scores'], axis=1)[:gt_len]
cum_acc_lstm += np.mean((cls_labels == cls_pred_lstm))
cum_acc_static += np.mean((cls_labels == cls_pred_static))
# save outputs
track_res = {}
for key in ['roi', 'frame', 'bbox', 'scores', 'anchor']:
track_res[key] = track[key]
track_res['scores_lstm'] = cls_scores[:gt_len,:]
track_res['end_lstm'] = end_probs
track_res['bbox_lstm'] = bbox_pred.reshape((gt_len, -1, 4))
vid_res.append(track_res)
cum_acc_lstm /= len(tracks)
cum_acc_static /= len(tracks)
log.info("Accuracy (Static): {:.03f} Accuracy (LSTM): {:.03f}".format(cum_acc_static, cum_acc_lstm))
return vid_res
class TestConfig(object):
"""Default config."""
init_scale = 0.01
learning_rate = 0.001
momentum = 0.9
max_grad_norm = 1.5
num_steps = 20
input_size = 1024
hidden_size = 1024
max_epoch = 5
iter_epoch = 2000
keep_prob = 1.0
lr_decay = 0.5
batch_size = 1
num_classes = 31
cls_weight = 1.0
bbox_weight = 0.0
ending_weight = 1.0
vid_per_batch = 4
cls_init = ''
bbox_init = ''
def main(args):
if not args.data_path:
raise ValueError("Must set --data_path to TPN data directory")
log.info("Processing data...")
# raw_data = tpn_raw_data(args.data_path)
# train_data, valid_data = raw_data
config = TestConfig()
config.num_layers = args.num_layers
config.type = args.type
config.hidden_size = config.input_size = args.input_size
#tf.set_random_seed(1017)
vids = glob.glob(osp.join(args.data_path, '*'))
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale, seed=1017)
with tf.variable_scope("model", reuse=None, initializer=None):
m = TPNModel(is_training=False, config=config)
# restoring variables
saver = tf.train.Saver()
log.info("Retoring from {}".format(args.model_path))
saver.restore(session, args.model_path)
for vid_file in vids:
vid_name = osp.split(vid_file)[-1]
save_dir = osp.join(args.save_dir, vid_name)
if not osp.isdir(save_dir):
os.makedirs(save_dir)
outputs = test_vid(session, m, vid_file, verbose=True)
for track_id, track in enumerate(outputs):
with open(osp.join(save_dir, '{:06d}.pkl'.format(track_id)), 'wb') as f:
cPickle.dump(track, f, cPickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Extracting recurrent features.')
parser.add_argument('data_path',
help='Data path')
parser.add_argument('save_dir',
help='Result directory')
parser.add_argument('model_path', help='model_stored path')
parser.add_argument('num_layers', type=int,
help='Number of layers')
parser.add_argument('input_size', type=int,
help='Input size.')
parser.add_argument('--type', type=str, choices=['residual', 'basic'], default='residual',
help='Type of LSTM cells. [residual]')
args = parser.parse_args()
main(args)
| mit | 8,704,772,570,818,406,000 | 31.053571 | 102 | 0.622284 | false |
razisayyed/django-ads | ads/conf.py | 1 | 1926 | from django.conf import settings
from appconf import AppConf
from django.utils.translation import ugettext_lazy as _
gettext = lambda s: s
class AdsConf(AppConf):
class Meta:
prefix = 'ads'
GOOGLE_ADSENSE_CLIENT = None # 'ca-pub-xxxxxxxxxxxxxxxx'
ZONES = {
'header': {
'name': gettext('Header'),
'ad_size': {
'xs': '720x150',
'sm': '800x90',
'md': '800x90',
'lg': '800x90',
'xl': '800x90'
},
'google_adsense_slot': None, # 'xxxxxxxxx',
'google_adsense_format': None, # 'auto'
},
'content': {
'name': gettext('Content'),
'ad_size': {
'xs': '720x150',
'sm': '800x90',
'md': '800x90',
'lg': '800x90',
'xl': '800x90'
},
'google_adsense_slot': None, # 'xxxxxxxxx',
'google_adsense_format': None, # 'auto'
},
'sidebar': {
'name': gettext('Sidebar'),
'ad_size': {
'xs': '720x150',
'sm': '800x90',
'md': '800x90',
'lg': '800x90',
'xl': '800x90'
}
}
}
DEFAULT_AD_SIZE = '720x150'
DEVICES = (
('xs', _('Extra small devices')),
('sm', _('Small devices')),
('md', _('Medium devices (Tablets)')),
('lg', _('Large devices (Desktops)')),
('xl', _('Extra large devices (Large Desktops)')),
)
VIEWPORTS = {
'xs': 'd-block img-fluid d-sm-none',
'sm': 'd-none img-fluid d-sm-block d-md-none',
'md': 'd-none img-fluid d-md-block d-lg-none',
'lg': 'd-none img-fluid d-lg-block d-xl-none',
'xl': 'd-none img-fluid d-xl-block',
}
| apache-2.0 | 5,576,397,388,752,822,000 | 26.913043 | 61 | 0.419522 | false |
jwinzer/OpenSlides | server/openslides/agenda/models.py | 1 | 20000 | from collections import defaultdict
from typing import Dict, List, Set
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models, transaction
from django.utils import timezone
from openslides.core.config import config
from openslides.core.models import Countdown, Tag
from openslides.utils.autoupdate import inform_changed_data
from openslides.utils.exceptions import OpenSlidesError
from openslides.utils.manager import BaseManager
from openslides.utils.models import (
CASCADE_AND_AUTOUPDATE,
SET_NULL_AND_AUTOUPDATE,
RESTModelMixin,
)
from openslides.utils.postgres import restart_id_sequence
from openslides.utils.utils import to_roman
from .access_permissions import ItemAccessPermissions, ListOfSpeakersAccessPermissions
class ItemManager(BaseManager):
"""
Customized model manager with special methods for agenda tree and
numbering.
"""
def get_prefetched_queryset(self, *args, **kwargs):
"""
Returns the normal queryset with all items. In the background all
related items (topics, motions, assignments) are prefetched from the database.
"""
# TODO: Fix the django bug: we cannot include "content_object__agenda_items" here,
# because this is some kind of cyclic lookup. The _prefetched_objects_cache of every
# content object will hold wrong values for the agenda item.
# See issue #4738
return (
super()
.get_prefetched_queryset(*args, **kwargs)
.prefetch_related("content_object", "parent", "tags")
)
def get_only_non_public_items(self):
"""
Generator, which yields only internal and hidden items, that means only items
which type is INTERNAL_ITEM or HIDDEN_ITEM or which are children of hidden items.
"""
# Do not execute non-hidden items because this would create a lot of db queries
root_items, item_children = self.get_root_and_children(only_item_type=None)
def yield_items(items, parent_is_not_public=False):
"""
Generator that yields a list of items and their children.
"""
for item in items:
if parent_is_not_public or item.type in (
item.INTERNAL_ITEM,
item.HIDDEN_ITEM,
):
item_is_not_public = True
yield item
else:
item_is_not_public = False
yield from yield_items(
item_children[item.pk], parent_is_not_public=item_is_not_public
)
yield from yield_items(root_items)
def get_root_and_children(self, only_item_type=None):
"""
Returns a list with all root items and a dictonary where the key is an
item pk and the value is a list with all children of the item.
If only_item_type is given, the tree hides items with other types and
all of their children.
"""
queryset = self.order_by("weight")
item_children: Dict[int, List[Item]] = defaultdict(list)
root_items = []
for item in queryset:
if only_item_type is not None and item.type != only_item_type:
continue
if item.parent_id is not None:
item_children[item.parent_id].append(item)
else:
root_items.append(item)
return root_items, item_children
def get_tree(self, only_item_type=None, include_content=False):
"""
Generator that yields dictonaries. Each dictonary has two keys, id
and children, where id is the id of one agenda item and children is a
generator that yields dictonaries like the one discribed.
If only_item_type is given, the tree hides items with other types and
all of their children.
If include_content is True, the yielded dictonaries have no key 'id'
but a key 'item' with the entire object.
"""
root_items, item_children = self.get_root_and_children(
only_item_type=only_item_type
)
def get_children(items):
"""
Generator that yields the descibed diconaries.
"""
for item in items:
if include_content:
yield dict(item=item, children=get_children(item_children[item.pk]))
else:
yield dict(
id=item.pk, children=get_children(item_children[item.pk])
)
yield from get_children(root_items)
@transaction.atomic
def set_tree(self, tree):
"""
Sets the agenda tree.
The tree has to be a nested object. For example:
[{"id": 1}, {"id": 2, "children": [{"id": 3}]}]
"""
def walk_items(tree, parent=None):
"""
Generator that returns each item in the tree as tuple.
This tuples have tree values. The item id, the item parent and the
weight of the item.
"""
for weight, element in enumerate(tree):
yield (element["id"], parent, weight)
yield from walk_items(element.get("children", []), element["id"])
touched_items: Set[int] = set()
db_items = dict((item.pk, item) for item in Item.objects.all())
for item_id, parent_id, weight in walk_items(tree):
# Check that the item is only once in the tree to prevent invalid trees
if item_id in touched_items:
raise ValueError(f"Item {item_id} is more then once in the tree.")
touched_items.add(item_id)
try:
db_item = db_items[item_id]
except KeyError:
raise ValueError(f"Item {item_id} is not in the database.")
# Check if the item has changed and update it
# Note: Do not use Item.objects.update, so that the items are sent
# to the clients via autoupdate
if db_item.parent_id != parent_id or db_item.weight != weight:
db_item.parent_id = parent_id
db_item.weight = weight
db_item.save()
@transaction.atomic
def number_all(self, numeral_system="arabic"):
"""
Auto numbering of the agenda according to the numeral_system. Manually
added item numbers will be overwritten.
"""
def walk_tree(tree, number=None):
for index, tree_element in enumerate(tree):
# Calculate number of visable agenda items.
if numeral_system == "roman" and number is None:
item_number = to_roman(index + 1)
else:
item_number = str(index + 1)
if number is not None:
item_number = ".".join((number, item_number))
# Add prefix.
if config["agenda_number_prefix"]:
item_number_tmp = f"{config['agenda_number_prefix']} {item_number}"
else:
item_number_tmp = item_number
# Save the new value and go down the tree.
tree_element["item"].item_number = item_number_tmp
tree_element["item"].save()
walk_tree(tree_element["children"], item_number)
# Start numbering visable agenda items.
walk_tree(self.get_tree(only_item_type=Item.AGENDA_ITEM, include_content=True))
# Reset number of hidden items.
for item in self.get_only_non_public_items():
item.item_number = ""
item.save()
class Item(RESTModelMixin, models.Model):
"""
An Agenda Item
"""
access_permissions = ItemAccessPermissions()
objects = ItemManager()
can_see_permission = "agenda.can_see"
AGENDA_ITEM = 1
INTERNAL_ITEM = 2
HIDDEN_ITEM = 3
ITEM_TYPE = (
(AGENDA_ITEM, "Agenda item"),
(INTERNAL_ITEM, "Internal item"),
(HIDDEN_ITEM, "Hidden item"),
)
item_number = models.CharField(blank=True, max_length=255)
"""
Number of agenda item.
"""
comment = models.TextField(null=True, blank=True)
"""
Optional comment to the agenda item. Will not be shown to normal users.
"""
closed = models.BooleanField(default=False)
"""
Flag, if the item is finished.
"""
type = models.IntegerField(choices=ITEM_TYPE, default=HIDDEN_ITEM)
"""
Type of the agenda item.
See Item.ITEM_TYPE for more information.
"""
duration = models.IntegerField(null=True, blank=True)
"""
The intended duration for the topic.
"""
parent = models.ForeignKey(
"self",
on_delete=SET_NULL_AND_AUTOUPDATE,
null=True,
blank=True,
related_name="children",
)
"""
The parent item in the agenda tree.
"""
weight = models.IntegerField(default=10000)
"""
Weight to sort the item in the agenda.
"""
content_type = models.ForeignKey(
ContentType, on_delete=models.SET_NULL, null=True, blank=True
)
"""
Field for generic relation to a related object. Type of the object.
"""
object_id = models.PositiveIntegerField(null=True, blank=True)
"""
Field for generic relation to a related object. Id of the object.
"""
content_object = GenericForeignKey()
"""
Field for generic relation to a related object. General field to the related object.
"""
tags = models.ManyToManyField(Tag, blank=True)
"""
Tags for the agenda item.
"""
class Meta:
default_permissions = ()
permissions = (
("can_see", "Can see agenda"),
("can_manage", "Can manage agenda"),
(
"can_see_internal_items",
"Can see internal items and time scheduling of agenda",
),
)
unique_together = ("content_type", "object_id")
ordering = ["weight"]
@property
def title_information(self):
"""
Return get_agenda_title_information() from the content_object.
"""
try:
return self.content_object.get_agenda_title_information()
except AttributeError:
raise NotImplementedError(
"You have to provide a get_agenda_title_information "
"method on your related model."
)
def is_internal(self):
"""
Returns True if the type of this object itself is a internal item or any
of its ancestors has such a type.
Attention! This executes one query for each ancestor of the item.
"""
return self.type == self.INTERNAL_ITEM or (
self.parent is not None and self.parent.is_internal()
)
def is_hidden(self):
"""
Returns True if the type of this object itself is a hidden item or any
of its ancestors has such a type.
Attention! This executes one query for each ancestor of the item.
"""
return self.type == self.HIDDEN_ITEM or (
self.parent is not None and self.parent.is_hidden()
)
@property
def level(self):
"""
Returns the level in agenda (=tree of all items). Level 0 means this
item is a root item in the agenda. Level 1 indicates that the parent is
a root item, level 2 that the parent's parent is a root item and so on.
Attention! This executes one query for each ancestor of the item.
"""
if self.parent is None:
return 0
else:
return self.parent.level + 1
class ListOfSpeakersManager(BaseManager):
def get_prefetched_queryset(self, *args, **kwargs):
"""
Returns the normal queryset with all items. In the background all
speakers and related items (topics, motions, assignments) are
prefetched from the database.
"""
return (
super()
.get_prefetched_queryset(*args, **kwargs)
.prefetch_related("speakers", "content_object")
)
class ListOfSpeakers(RESTModelMixin, models.Model):
access_permissions = ListOfSpeakersAccessPermissions()
objects = ListOfSpeakersManager()
can_see_permission = "agenda.can_see_list_of_speakers"
content_type = models.ForeignKey(
ContentType, on_delete=models.SET_NULL, null=True, blank=True
)
"""
Field for generic relation to a related object. Type of the object.
"""
object_id = models.PositiveIntegerField(null=True, blank=True)
"""
Field for generic relation to a related object. Id of the object.
"""
content_object = GenericForeignKey()
"""
Field for generic relation to a related object. General field to the related object.
"""
closed = models.BooleanField(default=False)
"""
True, if the list of speakers is closed.
"""
class Meta:
default_permissions = ()
permissions = (
("can_see_list_of_speakers", "Can see list of speakers"),
("can_manage_list_of_speakers", "Can manage list of speakers"),
)
unique_together = ("content_type", "object_id")
@property
def title_information(self):
"""
Return get_list_of_speakers_title_information() from the content_object.
"""
try:
return self.content_object.get_list_of_speakers_title_information()
except AttributeError:
raise NotImplementedError(
"You have to provide a get_list_of_speakers_title_information "
"method on your related model."
)
def get_next_speaker(self):
"""
Returns the speaker object of the speaker who is next.
"""
try:
return self.speakers.filter(begin_time=None).order_by("weight")[0]
except IndexError:
# The list of speakers is empty.
return None
class SpeakerManager(models.Manager):
"""
Manager for Speaker model. Provides a customized add method.
"""
def add(self, user, list_of_speakers, skip_autoupdate=False, point_of_order=False):
"""
Customized manager method to prevent anonymous users to be on the
list of speakers and that someone is twice on one list (off coming
speakers). Cares also initial sorting of the coming speakers.
"""
if isinstance(user, AnonymousUser):
raise OpenSlidesError("An anonymous user can not be on lists of speakers.")
if point_of_order and not config["agenda_enable_point_of_order_speakers"]:
raise OpenSlidesError("Point of order speakers are not enabled.")
if self.filter(
user=user,
list_of_speakers=list_of_speakers,
begin_time=None,
point_of_order=point_of_order,
).exists():
raise OpenSlidesError(f"{user} is already on the list of speakers.")
if config["agenda_present_speakers_only"] and not user.is_present:
raise OpenSlidesError("Only present users can be on the lists of speakers.")
if point_of_order:
weight = (
self.filter(list_of_speakers=list_of_speakers).aggregate(
models.Min("weight")
)["weight__min"]
or 0
) - 1
else:
weight = (
self.filter(list_of_speakers=list_of_speakers).aggregate(
models.Max("weight")
)["weight__max"]
or 0
) + 1
speaker = self.model(
list_of_speakers=list_of_speakers,
user=user,
weight=weight,
point_of_order=point_of_order,
)
speaker.save(
force_insert=True,
skip_autoupdate=skip_autoupdate,
no_delete_on_restriction=True,
)
return speaker
class Speaker(RESTModelMixin, models.Model):
"""
Model for the Speaker list.
"""
objects = SpeakerManager()
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=CASCADE_AND_AUTOUPDATE)
"""
ForeinKey to the user who speaks.
"""
list_of_speakers = models.ForeignKey(
ListOfSpeakers, on_delete=models.CASCADE, related_name="speakers"
)
"""
ForeinKey to the list of speakers to which the user want to speak.
"""
begin_time = models.DateTimeField(null=True)
"""
Saves the time, when the speaker begins to speak. None, if he has not spoken yet.
"""
end_time = models.DateTimeField(null=True)
"""
Saves the time, when the speaker ends his speech. None, if he is not finished yet.
"""
weight = models.IntegerField(null=True)
"""
The sort order of the list of speakers. None, if he has already spoken.
"""
marked = models.BooleanField(default=False)
"""
Marks a speaker.
"""
point_of_order = models.BooleanField(default=False)
"""
Identifies the speaker as someone with a point of order
"""
class Meta:
default_permissions = ()
permissions = (("can_be_speaker", "Can put oneself on the list of speakers"),)
def __str__(self):
return str(self.user)
def begin_speech(self):
"""
Let the user speak.
Set the weight to None and the time to now. If anyone is still
speaking, end his speech.
"""
try:
current_speaker = (
Speaker.objects.filter(
list_of_speakers=self.list_of_speakers, end_time=None
)
.exclude(begin_time=None)
.get()
)
except Speaker.DoesNotExist:
pass
else:
# Do not send an autoupdate for the countdown and the list_of_speakers. This is done
# by saving the list_of_speakers and countdown later.
current_speaker.end_speech(skip_autoupdate=True)
self.weight = None
self.begin_time = timezone.now()
self.save() # Here, the list_of_speakers is saved and causes an autoupdate.
if config["agenda_couple_countdown_and_speakers"]:
countdown, created = Countdown.objects.get_or_create(
pk=1,
defaults={
"default_time": config["projector_default_countdown"],
"title": "Default countdown",
"countdown_time": config["projector_default_countdown"],
},
)
if created:
restart_id_sequence("core_countdown")
else:
countdown.control(action="reset", skip_autoupdate=True)
countdown.control(action="start", skip_autoupdate=True)
inform_changed_data(
countdown
) # Here, the autoupdate for the countdown is triggered.
def end_speech(self, skip_autoupdate=False):
"""
The speech is finished. Set the time to now.
"""
self.end_time = timezone.now()
self.save(skip_autoupdate=skip_autoupdate)
if config["agenda_couple_countdown_and_speakers"]:
try:
countdown = Countdown.objects.get(pk=1)
except Countdown.DoesNotExist:
pass # Do not create a new countdown on stop action
else:
countdown.control(action="reset", skip_autoupdate=skip_autoupdate)
def get_root_rest_element(self):
"""
Returns the list_of_speakers to this instance which is the root REST element.
"""
return self.list_of_speakers
| mit | 8,516,494,536,356,955,000 | 32.898305 | 96 | 0.58865 | false |
lukasmonk/lucaschess | Code/GestorTurnOnLights.py | 1 | 12214 | import time
from Code import ControlPosicion
from Code import Gestor
from Code import Jugada
from Code import TurnOnLights
from Code.QT import QTUtil
from Code.QT import QTUtil2
from Code.Constantes import *
class GestorTurnOnLights(Gestor.Gestor):
def inicio(self, num_theme, num_block, tol):
if hasattr(self, "reiniciando"):
if self.reiniciando:
return
self.reiniciando = True
self.num_theme = num_theme
self.num_block = num_block
self.tol = tol
self.block = self.tol.get_block(self.num_theme, self.num_block)
self.block.shuffle()
self.calculation_mode = self.tol.is_calculation_mode()
self.penaltyError = self.block.penaltyError(self.calculation_mode)
self.penaltyHelp = self.block.penaltyHelp(self.calculation_mode)
# self.factorDistancia = self.block.factorDistancia() # No se usa es menor que 1.0
self.av_seconds = self.block.av_seconds()
if self.av_seconds:
cat, ico = self.block.cqualification(self.calculation_mode)
self.lb_previous = "%s - %0.2f\"" % (cat, self.av_seconds)
else:
self.lb_previous = None
self.num_line = 0
self.num_lines = len(self.block)
self.num_moves = 0
self.total_time_used = 0.0
self.ayudas = 0
self.errores = 0
self.dicFENayudas = {} # se muestra la flecha a partir de dos del mismo
self.tipoJuego = kJugEntLight
self.siJuegaHumano = False
self.siTutorActivado = False
self.pantalla.ponActivarTutor(False)
self.ayudasPGN = 0
self.pantalla.activaJuego(True, False, siAyudas=False)
self.pantalla.quitaAyudas(True, True)
self.ponMensajero(self.mueveHumano)
self.mostrarIndicador(True)
self.reiniciando = False
self.next_line_run()
def pon_rotulos(self, next):
r1 = _("Calculation mode") if self.calculation_mode else _("Memory mode")
r1 += "<br>%s" % self.line.label
if self.lb_previous:
r1 += "<br><b>%s</b>" % self.lb_previous
if self.num_line:
av_secs, txt = self.block.calc_current(self.num_line - 1, self.total_time_used, self.errores, self.ayudas, self.calculation_mode)
r1 += "<br><b>%s: %s - %0.2f\"" % (_("Current"), txt, av_secs)
self.ponRotulo1(r1)
if next is not None:
r2 = "<b>%d/%d</b>" % (self.num_line + next, self.num_lines)
else:
r2 = None
self.ponRotulo2(r2)
def next_line(self):
if self.num_line < self.num_lines:
self.line = self.block.line(self.num_line)
self.num_move = -1
self.ini_time = None
cp = ControlPosicion.ControlPosicion()
cp.leeFen(self.line.fen)
self.partida.reset(cp)
siBlancas = cp.siBlancas
self.siJugamosConBlancas = siBlancas
self.siRivalConBlancas = not siBlancas
self.ponPosicion(self.partida.ultPosicion)
self.ponPiezasAbajo(siBlancas)
self.pgnRefresh(True)
self.partida.pendienteApertura = False
self.pon_rotulos(1)
def next_line_run(self):
liOpciones = [k_mainmenu, k_ayuda, k_reiniciar]
self.pantalla.ponToolBar(liOpciones)
self.next_line()
QTUtil.xrefreshGUI()
self.ponPosicionDGT()
self.estado = kJugando
self.siguienteJugada()
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave == k_ayuda:
self.ayuda()
elif clave == k_reiniciar:
self.reiniciar()
elif clave == k_configurar:
self.configurar(siSonidos=True, siCambioTutor=False)
elif clave == k_utilidades:
self.utilidades()
elif clave == k_siguiente:
self.next_line_run()
def reiniciar(self):
if self.estado == kJugando:
if self.ini_time:
self.total_time_used += time.time() - self.ini_time
if self.total_time_used:
self.block.new_reinit(self.total_time_used, self.errores, self.ayudas)
self.total_time_used = 0.0
TurnOnLights.write_tol(self.tol)
self.inicio(self.num_theme, self.num_block, self.tol)
def siguienteJugada(self):
if self.estado == kFinJuego:
return
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
self.ponIndicador(siBlancas)
self.refresh()
siRival = siBlancas == self.siRivalConBlancas
self.num_move += 1
if self.num_move >= self.line.total_moves():
self.finLinea()
return
if siRival:
pv = self.line.get_move(self.num_move)
desde, hasta, coronacion = pv[:2], pv[2:4], pv[4:]
self.mueveRival(desde, hasta, coronacion)
self.siguienteJugada()
else:
self.siJuegaHumano = True
self.base_time = time.time()
if not (self.calculation_mode and self.ini_time is None): # Se inicia salvo que sea el principio de la linea
self.ini_time = self.base_time
self.activaColor(siBlancas)
if self.calculation_mode:
self.tablero.setDispatchMove(self.dispatchMove)
def dispatchMove(self):
if self.ini_time is None:
self.ini_time = time.time()
def finLinea(self):
self.num_line += 1
islast_line = self.num_line == self.num_lines
if islast_line:
#Previous
ant_tm = self.block.av_seconds()
ant_done = self.tol.done_level()
ant_cat_level, nada = self.tol.cat_num_level()
ant_cat_global = self.tol.cat_global()
num_moves = self.block.num_moves()
ta = self.total_time_used + self.errores*self.penaltyError + self.ayudas*self.penaltyHelp
tm = ta/num_moves
self.block.new_result(tm, self.total_time_used, self.errores, self.ayudas)
TurnOnLights.write_tol(self.tol)
cat_block, ico = TurnOnLights.qualification(tm, self.calculation_mode)
cat_level, ico = self.tol.cat_num_level()
cat_global = self.tol.cat_global()
txt_more_time = ""
txt_more_cat = ""
txt_more_line = ""
txt_more_global = ""
if ant_tm is None or tm < ant_tm:
txt_more_time = '<span style="color:red">%s</span>' % _("New record")
done = self.tol.done_level()
if done and (not ant_done):
if not self.tol.islast_level():
txt_more_line = "%s<hr>" % _("Open the next level")
if cat_level != ant_cat_level:
txt_more_cat = '<span style="color:red">%s</span>' % _("New")
if cat_global != ant_cat_global:
txt_more_global = '<span style="color:red">%s</span>' % _("New")
cErrores = '<tr><td align=right> %s </td><td> %d (x%d"=%d")</td></tr>' % (_('Errors'), self.errores, self.penaltyError, self.errores*self.penaltyError) if self.errores else ""
cAyudas = '<tr><td align=right> %s </td><td> %d (x%d"=%d")</td></tr>' % (_('Hints'), self.ayudas, self.penaltyHelp, self.ayudas*self.penaltyHelp) if self.ayudas else ""
mens = ('<hr><center><big>'+_('You have finished this block of positions') +
'<hr><table>' +
'<tr><td align=right> %s </td><td> %0.2f"</td></tr>' % (_('Time used'), self.total_time_used) +
cErrores +
cAyudas +
'<tr><td align=right> %s: </td><td> %0.2f" %s</td></tr>' % (_('Time assigned'), ta, txt_more_time) +
'<tr><td align=right> %s: </td><td> %d</td></tr>' % (_('Total moves'), num_moves) +
'<tr><td align=right> %s: </td><td> %0.2f"</td></tr>' % (_('Average time'), tm) +
'<tr><td align=right> %s: </td><td> %s</td></tr>' % (_('Block qualification'), cat_block) +
'<tr><td align=right> %s: </td><td> %s %s</td></tr>' % (_('Level qualification'), cat_level, txt_more_cat) +
'<tr><td align=right> %s: </td><td> %s %s</td></tr>' % (_('Global qualification'), cat_global, txt_more_global) +
'</table></center></big><hr>' +
txt_more_line
)
self.pon_rotulos(None)
QTUtil2.mensaje(self.pantalla, mens, _("Result of training"))
self.total_time_used = 0
else:
if self.tol.go_fast == True or (self.tol.go_fast is None and self.tol.work_level > 0):
self.next_line_run()
return
QTUtil2.mensajeTemporal(self.pantalla, _("This line training is completed."), 1.3)
self.pon_rotulos(0)
self.estado = kFinJuego
self.desactivaTodas()
liOpciones = [k_mainmenu, k_reiniciar, k_configurar, k_utilidades]
if not islast_line:
liOpciones.append(k_siguiente)
self.pantalla.ponToolBar(liOpciones)
def mueveHumano(self, desde, hasta, coronacion=None):
if self.ini_time is None:
self.ini_time = self.base_time
end_time = time.time()
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
movimiento = jg.movimiento().lower()
if movimiento == self.line.get_move(self.num_move).lower():
self.movimientosPiezas(jg.liMovs)
self.partida.ultPosicion = jg.posicion
self.masJugada(jg, True)
self.error = ""
self.total_time_used += (end_time - self.ini_time)
self.siguienteJugada()
return True
self.errores += 1
self.sigueHumano()
return False
def masJugada(self, jg, siNuestra):
if self.siTerminada():
jg.siJaqueMate = jg.siJaque
jg.siAhogado = not jg.siJaque
self.partida.append_jg(jg)
resp = self.partida.si3repetidas()
if resp:
jg.siTablasRepeticion = True
rotulo = ""
for j in resp:
rotulo += "%d," % (j / 2 + 1,)
rotulo = rotulo.strip(",")
self.rotuloTablasRepeticion = rotulo
if self.partida.ultPosicion.movPeonCap >= 100:
jg.siTablas50 = True
if self.partida.ultPosicion.siFaltaMaterial():
jg.siTablasFaltaMaterial = True
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
def mueveRival(self, desde, hasta, coronacion):
siBien, mens, jg = Jugada.dameJugada(self.partida.ultPosicion, desde, hasta, coronacion)
self.partida.ultPosicion = jg.posicion
self.masJugada(jg, False)
self.movimientosPiezas(jg.liMovs, True)
self.error = ""
def ayuda(self):
self.ayudas += 1
mov = self.line.get_move(self.num_move).lower()
self.tablero.markPosition(mov[:2])
fen = self.partida.ultPosicion.fen()
if fen not in self.dicFENayudas:
self.dicFENayudas[fen] = 1
else:
self.dicFENayudas[fen] += 1
if self.dicFENayudas[fen] > 2:
self.ponFlechaSC(mov[:2], mov[2:4])
def finPartida(self):
self.procesador.inicio()
self.procesador.showTurnOnLigths(self.tol.name)
def finalX(self):
self.procesador.inicio()
return False
def actualPGN(self):
resp = '[Event "%s"]\n' % _("Turn on the lights")
resp += '[Site "%s"]\n' % self.line.label.replace("<br>", " ").strip()
resp += '[FEN "%s"\n' % self.partida.iniPosicion.fen()
resp += "\n" + self.partida.pgnBase()
return resp
| gpl-2.0 | 7,487,070,209,496,803,000 | 34.923529 | 187 | 0.55682 | false |
mattrobenolt/python-sourcemap | tests/test_objects.py | 1 | 1747 | try:
import unittest2 as unittest
except ImportError:
import unittest
from sourcemap.objects import Token, SourceMapIndex
class TokenTestCase(unittest.TestCase):
def test_eq(self):
assert Token(1, 1, 'lol.js', 1, 1, 'lol') == Token(1, 1, 'lol.js', 1, 1, 'lol')
assert Token(99, 1, 'lol.js', 1, 1, 'lol') != Token(1, 1, 'lol.js', 1, 1, 'lol')
class SourceMapIndexTestCase(unittest.TestCase):
def get_index(self):
tokens = [
Token(dst_line=0, dst_col=0),
Token(dst_line=0, dst_col=5),
Token(dst_line=1, dst_col=0),
Token(dst_line=1, dst_col=12),
]
rows = [
[0, 5],
[0, 12],
]
index = {
(0, 0): tokens[0],
(0, 5): tokens[1],
(1, 0): tokens[2],
(1, 12): tokens[3],
}
raw = {}
return SourceMapIndex(raw, tokens, rows, index), tokens
def test_lookup(self):
index, tokens = self.get_index()
for i in range(5):
assert index.lookup(0, i) is tokens[0]
for i in range(5, 10):
assert index.lookup(0, i) is tokens[1]
for i in range(12):
assert index.lookup(1, i) is tokens[2]
for i in range(12, 20):
assert index.lookup(1, i) is tokens[3]
def test_getitem(self):
index, tokens = self.get_index()
for i in range(4):
assert index[i] is tokens[i]
def test_iter(self):
index, tokens = self.get_index()
for idx, token in enumerate(index):
assert token is tokens[idx]
def test_len(self):
index, tokens = self.get_index()
assert len(index) == len(tokens)
| bsd-2-clause | 6,267,878,097,871,203,000 | 25.074627 | 88 | 0.516314 | false |
skosukhin/spack | var/spack/repos/builtin/packages/r-mzid/package.py | 1 | 2260 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RMzid(RPackage):
"""A parser for mzIdentML files implemented using the XML package. The
parser tries to be general and able to handle all types of mzIdentML
files with the drawback of having less 'pretty' output than a vendor
specific parser. Please contact the maintainer with any problems and
supply an mzIdentML file so the problems can be fixed quickly."""
homepage = "https://www.bioconductor.org/packages/mzID/"
url = "https://git.bioconductor.org/packages/mzID"
version('1.14.0', git='https://git.bioconductor.org/packages/mzID', commit='1c53aa6523ae61d3ebb13381381fc119d6cc6115')
depends_on('r-xml', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-doparallel', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-iterators', type=('build', 'run'))
depends_on('r-protgenerics', type=('build', 'run'))
depends_on('[email protected]:3.4.9', when='@1.14.0')
| lgpl-2.1 | -8,712,464,723,545,602,000 | 48.130435 | 122 | 0.670796 | false |
frewsxcv/python-geojson | tests/test_strict_json.py | 1 | 1529 | """
GeoJSON produces and consumes only strict JSON. NAN and Infinity are not
permissible values according to the JSON specification.
"""
import unittest
import geojson
class StrictJsonTest(unittest.TestCase):
def test_encode_nan(self):
"""
Ensure Error is raised when encoding nan
"""
self._raises_on_dump({
"type": "Point",
"coordinates": (float("nan"), 1.0),
})
def test_encode_inf(self):
"""
Ensure Error is raised when encoding inf or -inf
"""
self._raises_on_dump({
"type": "Point",
"coordinates": (float("inf"), 1.0),
})
self._raises_on_dump({
"type": "Point",
"coordinates": (float("-inf"), 1.0),
})
def _raises_on_dump(self, unstrict):
with self.assertRaises(ValueError):
geojson.dumps(unstrict)
def test_decode_nan(self):
"""
Ensure Error is raised when decoding NaN
"""
self._raises_on_load('{"type": "Point", "coordinates": [1.0, NaN]}')
def test_decode_inf(self):
"""
Ensure Error is raised when decoding Infinity or -Infinity
"""
self._raises_on_load(
'{"type": "Point", "coordinates": [1.0, Infinity]}')
self._raises_on_load(
'{"type": "Point", "coordinates": [1.0, -Infinity]}')
def _raises_on_load(self, unstrict):
with self.assertRaises(ValueError):
geojson.loads(unstrict)
| bsd-3-clause | 4,059,820,129,994,343,400 | 26.303571 | 76 | 0.543492 | false |
sg-/project_generator | project_generator/builders/gccarm.py | 1 | 1789 | # Copyright 2014 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import logging
from .builder import Builder
from os.path import dirname
class MakefileGccArmBuilder(Builder):
# http://www.gnu.org/software/make/manual/html_node/Running.html
ERRORLEVEL = {
0: 'success (0 warnings, 0 errors)',
1: 'targets not already up to date',
2: 'errors'
}
SUCCESSVALUE = 0
def build_project(self, project_name, project_files, env_settings):
# cwd: relpath(join(project_path, ("gcc_arm" + project)))
# > make all
path = dirname(project_files[0])
logging.debug("Building GCC ARM project: %s" % path)
args = ['make', 'all']
try:
ret_code = None
ret_code = subprocess.call(args, cwd=path)
except:
logging.error("Error whilst calling make. Is it in your PATH?")
else:
if ret_code != self.SUCCESSVALUE:
# Seems like something went wrong.
logging.error("Build failed with the status: %s" %
self.ERRORLEVEL[ret_code])
else:
logging.info("Build succeeded with the status: %s" %
self.ERRORLEVEL[ret_code])
| apache-2.0 | 6,599,894,252,396,413,000 | 34.078431 | 75 | 0.628284 | false |
pu239ppy/authentic2 | authentic2/migrations/0011_auto__add_authenticationevent.py | 1 | 4418 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from authentic2.compat import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AuthenticationEvent'
db.create_table(u'authentic2_authenticationevent', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('when', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('who', self.gf('django.db.models.fields.CharField')(max_length=80)),
('how', self.gf('django.db.models.fields.CharField')(max_length=10)),
('nonce', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'authentic2', ['AuthenticationEvent'])
def backwards(self, orm):
# Deleting model 'AuthenticationEvent'
db.delete_table(u'authentic2_authenticationevent')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_model_label.split('.')[-1]},
},
u'authentic2.authenticationevent': {
'Meta': {'object_name': 'AuthenticationEvent'},
'how': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nonce': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'authentic2.deleteduser': {
'Meta': {'object_name': 'DeletedUser'},
'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % user_model_label})
},
u'authentic2.userexternalid': {
'Meta': {'object_name': 'UserExternalId'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '256'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % user_model_label})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['authentic2']
| agpl-3.0 | -636,708,911,938,553,300 | 57.906667 | 187 | 0.559982 | false |
bgris/ODL_bgris | odl/trafos/util/ft_utils.py | 1 | 23184 | # Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Utility functions for Fourier transforms on regularly sampled data."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
import numpy as np
from odl.discr import (
uniform_grid, DiscreteLp, uniform_partition_fromgrid,
uniform_discr_frompartition)
from odl.set import RealNumbers
from odl.util import (
fast_1d_tensor_mult,
is_real_dtype, is_scalar_dtype, is_real_floating_dtype,
is_complex_floating_dtype, complex_dtype, dtype_repr,
conj_exponent,
normalized_scalar_param_list, normalized_axes_tuple)
__all__ = ('reciprocal_grid', 'realspace_grid',
'reciprocal_space',
'dft_preprocess_data', 'dft_postprocess_data')
def reciprocal_grid(grid, shift=True, axes=None, halfcomplex=False):
"""Return the reciprocal of the given regular grid.
This function calculates the reciprocal (Fourier/frequency space)
grid for a given regular grid defined by the nodes::
x[k] = x[0] + k * s,
where ``k = (k[0], ..., k[d-1])`` is a ``d``-dimensional index in
the range ``0 <= k < N`` (component-wise). The multi-index
``N`` is the shape of the input grid.
This grid's reciprocal is then given by the nodes::
xi[j] = xi[0] + j * sigma,
with the reciprocal grid stride ``sigma = 2*pi / (s * N)``.
The minimum frequency ``xi[0]`` can in principle be chosen
freely, but usually it is chosen in a such a way that the reciprocal
grid is centered around zero. For this, there are two possibilities:
1. Make the grid point-symmetric around 0.
2. Make the grid "almost" point-symmetric around zero by shifting
it to the left by half a reciprocal stride.
In the first case, the minimum frequency (per axis) is given as::
xi_1[0] = -pi/s + pi/(s*n) = -pi/s + sigma/2.
For the second case, it is::
xi_1[0] = -pi / s.
Note that the zero frequency is contained in case 1 for an odd
number of points, while for an even size, the second option
guarantees that 0 is contained.
If a real-to-complex (half-complex) transform is to be computed,
the reciprocal grid has the shape ``M[i] = floor(N[i]/2) + 1``
in the last transform axis ``i``.
Parameters
----------
grid : uniform `RectGrid`
Original sampling grid,.
shift : bool or sequence of bools, optional
If ``True``, the grid is shifted by half a stride in the negative
direction. With a sequence, this option is applied separately on
each axis.
axes : int or sequence of ints, optional
Dimensions in which to calculate the reciprocal. The sequence
must have the same length as ``shift`` if the latter is given
as a sequence. ``None`` means all axes in ``grid``.
halfcomplex : bool, optional
If ``True``, return the half of the grid with last coordinate
less than zero. This is related to the fact that for real-valued
functions, the other half is the mirrored complex conjugate of
the given half and therefore needs not be stored.
Returns
-------
reciprocal_grid : uniform `RectGrid`
The reciprocal grid.
"""
if axes is None:
axes = list(range(grid.ndim))
else:
try:
axes = [int(axes)]
except TypeError:
axes = list(axes)
# List indicating shift or not per "active" axis, same length as axes
shift_list = normalized_scalar_param_list(shift, length=len(axes),
param_conv=bool)
# Full-length vectors
stride = grid.stride
shape = np.array(grid.shape)
rmin = grid.min_pt.copy()
rmax = grid.max_pt.copy()
rshape = list(shape)
# Shifted axes (full length to avoid ugly double indexing)
shifted = np.zeros(grid.ndim, dtype=bool)
shifted[axes] = shift_list
rmin[shifted] = -np.pi / stride[shifted]
# Length min->max increases by double the shift, so we
# have to compensate by a full stride
rmax[shifted] = (-rmin[shifted] -
2 * np.pi / (stride[shifted] * shape[shifted]))
# Non-shifted axes
not_shifted = np.zeros(grid.ndim, dtype=bool)
not_shifted[axes] = np.logical_not(shift_list)
rmin[not_shifted] = ((-1.0 + 1.0 / shape[not_shifted]) *
np.pi / stride[not_shifted])
rmax[not_shifted] = -rmin[not_shifted]
# Change last axis shape and max if halfcomplex
if halfcomplex:
rshape[axes[-1]] = shape[axes[-1]] // 2 + 1
# - Odd and shifted: - stride / 2
# - Even and not shifted: + stride / 2
# - Otherwise: 0
last_odd = shape[axes[-1]] % 2 == 1
last_shifted = shift_list[-1]
half_rstride = np.pi / (shape[axes[-1]] * stride[axes[-1]])
if last_odd and last_shifted:
rmax[axes[-1]] = -half_rstride
elif not last_odd and not last_shifted:
rmax[axes[-1]] = half_rstride
else:
rmax[axes[-1]] = 0
return uniform_grid(rmin, rmax, rshape)
def realspace_grid(recip_grid, x0, axes=None, halfcomplex=False,
halfcx_parity='even'):
"""Return the real space grid from the given reciprocal grid.
Given a reciprocal grid::
xi[j] = xi[0] + j * sigma,
with a multi-index ``j = (j[0], ..., j[d-1])`` in the range
``0 <= j < M``, this function calculates the original grid::
x[k] = x[0] + k * s
by using a provided ``x[0]`` and calculating the stride ``s``.
If the reciprocal grid is interpreted as coming from a usual
complex-to-complex FFT, it is ``N == M``, and the stride is::
s = 2*pi / (sigma * N)
For a reciprocal grid from a real-to-complex (half-complex) FFT,
it is ``M[i] = floor(N[i]/2) + 1`` in the last transform axis ``i``.
To resolve the ambiguity regarding the parity of ``N[i]``, the
it must be specified if the output shape should be even or odd,
resulting in::
odd : N[i] = 2 * M[i] - 1
even: N[i] = 2 * M[i] - 2
The output stride is calculated with this ``N`` as above in this
case.
Parameters
----------
recip_grid : uniform `RectGrid`
Sampling grid in reciprocal space.
x0 : `array-like`
Desired minimum point of the real space grid.
axes : int or sequence of ints, optional
Dimensions in which to calculate the real space grid. The sequence
must have the same length as ``shift`` if the latter is given
as a sequence. ``None`` means "all axes".
halfcomplex : bool, optional
If ``True``, interpret the given grid as the reciprocal as used
in a half-complex FFT (see above). Otherwise, the grid is
regarded as being used in a complex-to-complex transform.
halfcx_parity : {'even', 'odd'}
Use this parity for the shape of the returned grid in the
last axis of ``axes`` in the case ``halfcomplex=True``
Returns
-------
irecip : uniform `RectGrid`
The inverse reciprocal grid.
"""
if axes is None:
axes = list(range(recip_grid.ndim))
else:
try:
axes = [int(axes)]
except TypeError:
axes = list(axes)
rstride = recip_grid.stride
rshape = recip_grid.shape
# Calculate shape of the output grid by adjusting in axes[-1]
irshape = list(rshape)
if halfcomplex:
if str(halfcx_parity).lower() == 'even':
irshape[axes[-1]] = 2 * rshape[axes[-1]] - 2
elif str(halfcx_parity).lower() == 'odd':
irshape[axes[-1]] = 2 * rshape[axes[-1]] - 1
else:
raise ValueError("`halfcomplex` parity '{}' not understood"
"".format(halfcx_parity))
irmin = np.asarray(x0)
irshape = np.asarray(irshape)
irstride = np.copy(rstride)
irstride[axes] = 2 * np.pi / (irshape[axes] * rstride[axes])
irmax = irmin + (irshape - 1) * irstride
return uniform_grid(irmin, irmax, irshape)
def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None):
"""Pre-process the real-space data before DFT.
This function multiplies the given data with the separable
function::
p(x) = exp(+- 1j * dot(x - x[0], xi[0]))
where ``x[0]`` and ``xi[0]`` are the minimum coodinates of
the real-space and reciprocal grids, respectively. The sign of
the exponent depends on the choice of ``sign``. In discretized
form, this function becomes an array::
p[k] = exp(+- 1j * k * s * xi[0])
If the reciprocal grid is not shifted, i.e. symmetric around 0,
it is ``xi[0] = pi/s * (-1 + 1/N)``, hence::
p[k] = exp(-+ 1j * pi * k * (1 - 1/N))
For a shifted grid, we have :math:``xi[0] = -pi/s``, thus the
array is given by::
p[k] = (-1)**k
Parameters
----------
arr : `array-like`
Array to be pre-processed. If its data type is a real
non-floating type, it is converted to 'float64'.
shift : bool or or sequence of bools, optional
If ``True``, the grid is shifted by half a stride in the negative
direction. With a sequence, this option is applied separately on
each axis.
axes : int or sequence of ints, optional
Dimensions in which to calculate the reciprocal. The sequence
must have the same length as ``shift`` if the latter is given
as a sequence.
Default: all axes.
sign : {'-', '+'}, optional
Sign of the complex exponent.
out : `numpy.ndarray`, optional
Array in which the result is stored. If ``out is arr``,
an in-place modification is performed. For real data type,
this is only possible for ``shift=True`` since the factors are
complex otherwise.
Returns
-------
out : `numpy.ndarray`
Result of the pre-processing. If ``out`` was given, the returned
object is a reference to it.
Notes
-----
If ``out`` is not specified, the data type of the returned array
is the same as that of ``arr`` except when ``arr`` has real data
type and ``shift`` is not ``True``. In this case, the return type
is the complex counterpart of ``arr.dtype``.
"""
arr = np.asarray(arr)
if not is_scalar_dtype(arr.dtype):
raise ValueError('array has non-scalar data type {}'
''.format(dtype_repr(arr.dtype)))
elif is_real_dtype(arr.dtype) and not is_real_floating_dtype(arr.dtype):
arr = arr.astype('float64')
if axes is None:
axes = list(range(arr.ndim))
else:
try:
axes = [int(axes)]
except TypeError:
axes = list(axes)
shape = arr.shape
shift_list = normalized_scalar_param_list(shift, length=len(axes),
param_conv=bool)
# Make a copy of arr with correct data type if necessary, or copy values.
if out is None:
if is_real_dtype(arr.dtype) and not all(shift_list):
out = np.array(arr, dtype=complex_dtype(arr.dtype), copy=True)
else:
out = arr.copy()
else:
out[:] = arr
if is_real_dtype(out.dtype) and not shift:
raise ValueError('cannot pre-process real input in-place without '
'shift')
if sign == '-':
imag = -1j
elif sign == '+':
imag = 1j
else:
raise ValueError("`sign` '{}' not understood".format(sign))
def _onedim_arr(length, shift):
if shift:
# (-1)^indices
factor = np.ones(length, dtype=out.dtype)
factor[1::2] = -1
else:
factor = np.arange(length, dtype=out.dtype)
factor *= -imag * np.pi * (1 - 1.0 / length)
np.exp(factor, out=factor)
return factor.astype(out.dtype, copy=False)
onedim_arrs = []
for axis, shift in zip(axes, shift_list):
length = shape[axis]
onedim_arrs.append(_onedim_arr(length, shift))
fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out)
return out
def _interp_kernel_ft(norm_freqs, interp):
"""Scaled FT of a one-dimensional interpolation kernel.
For normalized frequencies ``-1/2 <= xi <= 1/2``, this
function returns::
sinc(pi * xi)**k / sqrt(2 * pi)
where ``k=1`` for 'nearest' and ``k=2`` for 'linear' interpolation.
Parameters
----------
norm_freqs : `numpy.ndarray`
Normalized frequencies between -1/2 and 1/2
interp : {'nearest', 'linear'}
Type of interpolation kernel
Returns
-------
ker_ft : `numpy.ndarray`
Values of the kernel FT at the given frequencies
"""
# Numpy's sinc(x) is equal to the 'math' sinc(pi * x)
ker_ft = np.sinc(norm_freqs)
interp_ = str(interp).lower()
if interp_ == 'nearest':
pass
elif interp_ == 'linear':
ker_ft **= 2
else:
raise ValueError("`interp` '{}' not understood".format(interp))
ker_ft /= np.sqrt(2 * np.pi)
return ker_ft
def dft_postprocess_data(arr, real_grid, recip_grid, shift, axes,
interp, sign='-', op='multiply', out=None):
"""Post-process the Fourier-space data after DFT.
This function multiplies the given data with the separable
function::
q(xi) = exp(+- 1j * dot(x[0], xi)) * s * phi_hat(xi_bar)
where ``x[0]`` and ``s`` are the minimum point and the stride of
the real-space grid, respectively, and ``phi_hat(xi_bar)`` is the FT
of the interpolation kernel. The sign of the exponent depends on the
choice of ``sign``. Note that for ``op='divide'`` the
multiplication with ``s * phi_hat(xi_bar)`` is replaced by a
division with the same array.
In discretized form on the reciprocal grid, the exponential part
of this function becomes an array::
q[k] = exp(+- 1j * dot(x[0], xi[k]))
and the arguments ``xi_bar`` to the interpolation kernel
are the normalized frequencies::
for 'shift=True' : xi_bar[k] = -pi + pi * (2*k) / N
for 'shift=False' : xi_bar[k] = -pi + pi * (2*k+1) / N
See [Pre+2007]_, Section 13.9 "Computing Fourier Integrals Using
the FFT" for a similar approach.
Parameters
----------
arr : `array-like`
Array to be pre-processed. An array with real data type is
converted to its complex counterpart.
real_grid : uniform `RectGrid`
Real space grid in the transform.
recip_grid : uniform `RectGrid`
Reciprocal grid in the transform
shift : bool or sequence of bools
If ``True``, the grid is shifted by half a stride in the negative
direction in the corresponding axes. The sequence must have the
same length as ``axes``.
axes : int or sequence of ints
Dimensions along which to take the transform. The sequence must
have the same length as ``shifts``.
interp : string or sequence of strings
Interpolation scheme used in the real-space.
sign : {'-', '+'}, optional
Sign of the complex exponent.
op : {'multiply', 'divide'}, optional
Operation to perform with the stride times the interpolation
kernel FT
out : `numpy.ndarray`, optional
Array in which the result is stored. If ``out is arr``, an
in-place modification is performed.
Returns
-------
out : `numpy.ndarray`
Result of the post-processing. If ``out`` was given, the returned
object is a reference to it.
"""
arr = np.asarray(arr)
if is_real_floating_dtype(arr.dtype):
arr = arr.astype(complex_dtype(arr.dtype))
elif not is_complex_floating_dtype(arr.dtype):
raise ValueError('array data type {} is not a complex floating point '
'data type'.format(dtype_repr(arr.dtype)))
if out is None:
out = arr.copy()
elif out is not arr:
out[:] = arr
if axes is None:
axes = list(range(arr.ndim))
else:
try:
axes = [int(axes)]
except TypeError:
axes = list(axes)
shift_list = normalized_scalar_param_list(shift, length=len(axes),
param_conv=bool)
if sign == '-':
imag = -1j
elif sign == '+':
imag = 1j
else:
raise ValueError("`sign` '{}' not understood".format(sign))
op, op_in = str(op).lower(), op
if op not in ('multiply', 'divide'):
raise ValueError("kernel `op` '{}' not understood".format(op_in))
# Make a list from interp if that's not the case already
try:
# Duck-typed string check
interp + ''
except TypeError:
pass
else:
interp = [str(interp).lower()] * arr.ndim
onedim_arrs = []
for ax, shift, intp in zip(axes, shift_list, interp):
x = real_grid.min_pt[ax]
xi = recip_grid.coord_vectors[ax]
# First part: exponential array
onedim_arr = np.exp(imag * x * xi)
# Second part: interpolation kernel
len_dft = recip_grid.shape[ax]
len_orig = real_grid.shape[ax]
halfcomplex = (len_dft < len_orig)
odd = len_orig % 2
fmin = -0.5 if shift else -0.5 + 1.0 / (2 * len_orig)
if halfcomplex:
# maximum lies around 0, possibly half a cell left or right of it
if shift and odd:
fmax = - 1.0 / (2 * len_orig)
elif not shift and not odd:
fmax = 1.0 / (2 * len_orig)
else:
fmax = 0.0
else: # not halfcomplex
# maximum lies close to 0.5, half or full cell left of it
if shift:
# -0.5 + (N-1)/N = 0.5 - 1/N
fmax = 0.5 - 1.0 / len_orig
else:
# -0.5 + 1/(2*N) + (N-1)/N = 0.5 - 1/(2*N)
fmax = 0.5 - 1.0 / (2 * len_orig)
freqs = np.linspace(fmin, fmax, num=len_dft)
stride = real_grid.stride[ax]
if op == 'multiply':
onedim_arr *= stride * _interp_kernel_ft(freqs, intp)
else:
onedim_arr /= stride * _interp_kernel_ft(freqs, intp)
onedim_arrs.append(onedim_arr.astype(out.dtype, copy=False))
fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out)
return out
def reciprocal_space(space, axes=None, halfcomplex=False, shift=True,
**kwargs):
"""Return the range of the Fourier transform on ``space``.
Parameters
----------
space : `DiscreteLp`
Real space whose reciprocal is calculated. It must be
uniformly discretized.
axes : sequence of ints, optional
Dimensions along which the Fourier transform is taken.
Default: all axes
halfcomplex : bool, optional
If ``True``, take only the negative frequency part along the last
axis for. For ``False``, use the full frequency space.
This option can only be used if ``space`` is a space of
real-valued functions.
shift : bool or sequence of bools, optional
If ``True``, the reciprocal grid is shifted by half a stride in
the negative direction. With a boolean sequence, this option
is applied separately to each axis.
If a sequence is provided, it must have the same length as
``axes`` if supplied. Note that this must be set to ``True``
in the halved axis in half-complex transforms.
Default: ``True``
impl : string, optional
Implementation back-end for the created space.
Default: ``'numpy'``
exponent : float, optional
Create a space with this exponent. By default, the conjugate
exponent ``q = p / (p - 1)`` of the exponent of ``space`` is
used, where ``q = inf`` for ``p = 1`` and vice versa.
dtype : optional
Complex data type of the created space. By default, the
complex counterpart of ``space.dtype`` is used.
Returns
-------
rspace : `DiscreteLp`
Reciprocal of the input ``space``. If ``halfcomplex=True``, the
upper end of the domain (where the half space ends) is chosen to
coincide with the grid node.
"""
if not isinstance(space, DiscreteLp):
raise TypeError('`space` {!r} is not a `DiscreteLp` instance'
''.format(space))
if not space.is_uniform:
raise ValueError('`space` is not uniformly discretized')
if axes is None:
axes = tuple(range(space.ndim))
axes = normalized_axes_tuple(axes, space.ndim)
if halfcomplex and space.field != RealNumbers():
raise ValueError('`halfcomplex` option can only be used with real '
'spaces')
exponent = kwargs.pop('exponent', None)
if exponent is None:
exponent = conj_exponent(space.exponent)
dtype = kwargs.pop('dtype', None)
if dtype is None:
dtype = complex_dtype(space.dtype)
else:
if not is_complex_floating_dtype(dtype):
raise ValueError('{} is not a complex data type'
''.format(dtype_repr(dtype)))
impl = kwargs.pop('impl', 'numpy')
# Calculate range
recip_grid = reciprocal_grid(space.grid, shift=shift,
halfcomplex=halfcomplex, axes=axes)
# Make a partition with nodes on the boundary in the last transform axis
# if `halfcomplex == True`, otherwise a standard partition.
if halfcomplex:
max_pt = {axes[-1]: recip_grid.max_pt[axes[-1]]}
part = uniform_partition_fromgrid(recip_grid, max_pt=max_pt)
else:
part = uniform_partition_fromgrid(recip_grid)
# Use convention of adding a hat to represent fourier transform of variable
axis_labels = list(space.axis_labels)
for i in axes:
# Avoid double math
label = axis_labels[i].replace('$', '')
axis_labels[i] = '$\^{{{}}}$'.format(label)
recip_spc = uniform_discr_frompartition(part, exponent=exponent,
dtype=dtype, impl=impl,
axis_labels=axis_labels)
return recip_spc
if __name__ == '__main__':
from doctest import testmod, NORMALIZE_WHITESPACE
testmod(optionflags=NORMALIZE_WHITESPACE)
| gpl-3.0 | -1,737,695,355,543,527,700 | 34.180577 | 79 | 0.598516 | false |
allenai/document-qa | docqa/text_preprocessor.py | 1 | 7061 | from collections import Counter
from typing import List, Optional, Tuple
import numpy as np
from tqdm import tqdm
from docqa.utils import flatten_iterable
from docqa.data_processing.document_splitter import ExtractedParagraphWithAnswers, MergeParagraphs, ExtractedParagraph
from docqa.data_processing.multi_paragraph_qa import ParagraphWithAnswers
from docqa.configurable import Configurable
from docqa.squad.squad_data import SquadCorpus
from docqa.triviaqa.build_span_corpus import TriviaQaWebDataset
class TextPreprocessor(Configurable):
""" Preprocess text input, must be deterministic. Only used thus far adding special indicator tokens """
def encode_extracted_paragraph(self, question: List[str], paragraph: ExtractedParagraphWithAnswers):
text, answers, _ = self.encode_paragraph(question, paragraph.text,
paragraph.start == 0, paragraph.answer_spans)
return ParagraphWithAnswers(text, answers)
def encode_text(self, question: List[str], paragraph: ExtractedParagraph):
text, _, _ = self.encode_paragraph(question, paragraph.text, paragraph.start == 0,
np.zeros((0, 2), dtype=np.int32))
return text
def encode_paragraph(self, question: List[str], paragraphs: List[List[str]],
is_first, answer_spans: np.ndarray,
token_spans=None) -> Tuple[List[str], np.ndarray, Optional[np.ndarray]]:
"""
Returns updated (and flattened) text, answer_spans, and token_spans
"""
raise NotImplementedError()
def special_tokens(self) -> List[str]:
return []
class WithIndicators(TextPreprocessor):
"""
Adds a document or group start token before the text, and a paragraph token between each
between in each paragraph.
"""
PARAGRAPH_TOKEN = "%%PARAGRAPH%%"
DOCUMENT_START_TOKEN = "%%DOCUMENT%%"
PARAGRAPH_GROUP = "%%PARAGRAPH_GROUP%%"
def __init__(self, remove_cross_answer: bool=True, para_tokens: bool=True, doc_start_token: bool=True):
self.remove_cross_answer = remove_cross_answer
self.doc_start_token = doc_start_token
self.para_tokens = para_tokens
def special_tokens(self) -> List[str]:
tokens = [self.PARAGRAPH_GROUP]
if self.doc_start_token:
tokens.append(self.DOCUMENT_START_TOKEN)
if self.para_tokens:
tokens.append(self.PARAGRAPH_TOKEN)
return tokens
def encode_paragraph(self, question: List[str], paragraphs: List[List[str]], is_first, answer_spans: np.ndarray, inver=None):
out = []
offset = 0
if self.doc_start_token and is_first:
out.append(self.DOCUMENT_START_TOKEN)
else:
out.append(self.PARAGRAPH_GROUP)
if inver is not None:
inv_out = [np.zeros((1, 2), dtype=np.int32)]
else:
inv_out = None
offset += 1
spans = answer_spans + offset
out += paragraphs[0]
offset += len(paragraphs[0])
on_ix = len(paragraphs[0])
if inv_out is not None:
inv_out.append(inver[:len(paragraphs[0])])
for sent in paragraphs[1:]:
if self.remove_cross_answer:
remove = np.logical_and(spans[:, 0] < offset, spans[:, 1] >= offset)
spans = spans[np.logical_not(remove)]
if self.para_tokens:
spans[spans[:, 0] >= offset, 0] += 1
spans[spans[:, 1] >= offset, 1] += 1
out.append(self.PARAGRAPH_TOKEN)
if inv_out is not None:
if len(inv_out) == 0 or len(inv_out[-1]) == 0:
inv_out.append(np.zeros((1, 2), dtype=np.int32))
else:
inv_out.append(np.full((1, 2), inv_out[-1][-1][1], dtype=np.int32))
offset += 1
out += sent
offset += len(sent)
if inv_out is not None:
inv_out.append(inver[on_ix:on_ix+len(sent)])
on_ix += len(sent)
return out, spans, None if inv_out is None else np.concatenate(inv_out)
def __setstate__(self, state):
if "state" in state:
state["state"]["doc_start_token"] = True
state["state"]["para_tokens"] = True
else:
if "doc_start_token" not in state:
state["doc_start_token"] = True
if "para_tokens" not in state:
state["para_tokens"] = True
super().__setstate__(state)
def check_preprocess():
data = TriviaQaWebDataset()
merge = MergeParagraphs(400)
questions = data.get_dev()
pre = WithIndicators(False)
remove_cross = WithIndicators(True)
rng = np.random.RandomState(0)
rng.shuffle(questions)
for q in tqdm(questions[:1000]):
doc = rng.choice(q.all_docs, 1)[0]
text = data.evidence.get_document(doc.doc_id, n_tokens=800)
paras = merge.split_annotated(text, doc.answer_spans)
para = paras[np.random.randint(0, len(paras))]
built = pre.encode_extracted_paragraph(q.question, para)
expected_text = flatten_iterable(para.text)
if expected_text != [x for x in built.text if x not in pre.special_tokens()]:
raise ValueError()
expected = [expected_text[s:e+1] for s, e in para.answer_spans]
expected = Counter([tuple(x) for x in expected])
actual = [tuple(built.text[s:e+1]) for s,e in built.answer_spans]
actual_cleaned = Counter(tuple(z for z in x if z not in pre.special_tokens()) for x in actual)
if actual_cleaned != expected:
raise ValueError()
r_built = remove_cross.encode_extracted_paragraph(q.question, para)
rc = Counter(tuple(r_built.text[s:e + 1]) for s, e in r_built.answer_spans)
removed = Counter()
for w in actual:
if all(x not in pre.special_tokens() for x in w):
removed[w] += 1
if rc != removed:
raise ValueError()
def check_preprocess_squad():
data = SquadCorpus().get_train()
remove_cross = WithIndicators(True)
for doc in tqdm(data):
for para in doc.paragraphs:
q = para.questions[np.random.randint(0, len(para.questions))]
text, ans, inv = remove_cross.encode_paragraph(q.words, para.text, para.paragraph_num == 0,
q.answer.answer_spans, para.spans)
if len(inv) != len(text):
raise ValueError()
for i in range(len(inv)-1):
if inv[i, 0] > inv[i+1, 0]:
raise ValueError()
for (s1, e1), (s2, e2) in zip(ans, q.answer.answer_spans):
if tuple(inv[s1]) != tuple(para.spans[s2]):
raise ValueError()
if tuple(inv[e1]) != tuple(para.spans[e2]):
raise ValueError()
if __name__ == "__main__":
check_preprocess_squad() | apache-2.0 | 8,086,274,971,685,163,000 | 37.172973 | 129 | 0.584195 | false |
hylje/tekis | tekis/flatpages/migrations/0003_auto_20160221_0250.py | 1 | 1533 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-21 00:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flatpages', '0002_auto_20160221_0006'),
]
operations = [
migrations.CreateModel(
name='Sponsor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('url', models.URLField()),
('logo', models.ImageField(upload_to='sponsors/')),
('titletext', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
],
options={
'ordering': ('name',),
},
),
migrations.AlterField(
model_name='flatpage',
name='menu_index',
field=models.IntegerField(default=0, help_text='Menus are sorted ascending by this value. The first menu item in a category is the category link itself. <strong>Note:</strong> The first menu item in the top level category should be the front page.'),
),
migrations.AlterField(
model_name='flatpage',
name='published',
field=models.BooleanField(default=False, help_text='Published pages show up on the menu. Unpublished pages can be reached over direct link.'),
),
]
| bsd-3-clause | -2,971,475,573,143,553,500 | 38.307692 | 262 | 0.580561 | false |
kowey/attelo | attelo/harness/parse.py | 1 | 5021 | '''
Control over attelo parsers as might be needed for a test harness
'''
from __future__ import print_function
from os import path as fp
import os
import sys
from joblib import (delayed)
from ..io import (write_predictions_output)
from attelo.decoding.util import (prediction_to_triples)
from attelo.fold import (select_training,
select_testing)
from attelo.harness.util import (makedirs)
def _eval_banner(econf, hconf, fold):
"""
Which combo of eval parameters are we running now?
"""
msg = ("Reassembling "
"fold {fnum} [{dset}]\t"
"parser: {parser}")
return msg.format(fnum=fold,
dset=hconf.dataset,
parser=econf.parser.key)
def _tmp_output_filename(path, suffix):
"""
Temporary filename for output file segment
"""
return fp.join(fp.dirname(path),
'_' + fp.basename(path) + '.' + suffix)
def concatenate_outputs(mpack, output_path):
"""
(For use after :py:func:`delayed_main_for_harness`)
Concatenate temporary per-group outputs into a single
combined output
"""
tmpfiles = [_tmp_output_filename(output_path, d)
for d in sorted(mpack.keys())]
with open(output_path, 'wb') as file_out:
for tfile in tmpfiles:
with open(tfile, 'rb') as file_in:
file_out.write(file_in.read())
for tmpfile in tmpfiles:
os.remove(tmpfile)
def _parse_group(dpack, parser, output_path):
'''
parse a single group and write its output
score the predictions if we have
:rtype Count or None
'''
dpack = parser.transform(dpack)
# we trust the parser to select what it thinks is its best prediction
prediction = prediction_to_triples(dpack)
write_predictions_output(dpack, prediction, output_path)
def jobs(mpack, parser, output_path):
"""
Return a list of delayed decoding jobs for the various
documents in this group
"""
res = []
tmpfiles = [_tmp_output_filename(output_path, d)
for d in mpack.keys()]
for tmpfile in tmpfiles:
if fp.exists(tmpfile):
os.remove(tmpfile)
for onedoc, dpack in mpack.items():
tmp_output_path = _tmp_output_filename(output_path, onedoc)
res.append(delayed(_parse_group)(dpack, parser, tmp_output_path))
return res
def learn(hconf, econf, dconf, fold):
"""
Run the learners for the given configuration
"""
if fold is None:
subpacks = dconf.pack
parent_dir = hconf.combined_dir_path()
else:
subpacks = select_training(dconf.pack, dconf.folds, fold)
parent_dir = hconf.fold_dir_path(fold)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
cache = hconf.model_paths(econf.learner, fold)
print('learning ', econf.key, '...', file=sys.stderr)
dpacks = subpacks.values()
targets = [d.target for d in dpacks]
econf.parser.payload.fit(dpacks, targets, cache=cache)
def delayed_decode(hconf, dconf, econf, fold):
"""
Return possible futures for decoding groups within
this model/decoder combo for the given fold
"""
if fold is None and hconf.test_evaluation is None:
return []
if _say_if_decoded(hconf, econf, fold, stage='decoding'):
return []
output_path = hconf.decode_output_path(econf, fold)
makedirs(fp.dirname(output_path))
if fold is None:
subpack = dconf.pack
else:
subpack = select_testing(dconf.pack, dconf.folds, fold)
parser = econf.parser.payload
return jobs(subpack, parser, output_path)
def decode_on_the_fly(hconf, dconf, fold):
"""
Learn each parser, returning decoder jobs as each is learned.
Return a decoder job generator that should hopefully allow us
to effectively learn and decode in parallel.
"""
for econf in hconf.evaluations:
learn(hconf, econf, dconf, fold)
for job in delayed_decode(hconf, dconf, econf, fold):
yield job
def _say_if_decoded(hconf, econf, fold, stage='decoding'):
"""
If we have already done the decoding for a given config
and fold, say so and return True
"""
if fp.exists(hconf.decode_output_path(econf, fold)):
print(("skipping {stage} {parser} "
"(already done)").format(stage=stage,
parser=econf.parser.key),
file=sys.stderr)
return True
else:
return False
def post_decode(hconf, dconf, econf, fold):
"""
Join together output files from this model/decoder combo
"""
if _say_if_decoded(hconf, econf, fold, stage='reassembly'):
return
print(_eval_banner(econf, hconf, fold), file=sys.stderr)
if fold is None:
subpack = dconf.pack
else:
subpack = select_testing(dconf.pack, dconf.folds, fold)
concatenate_outputs(subpack,
hconf.decode_output_path(econf, fold))
| gpl-3.0 | 2,330,291,924,736,310,300 | 28.710059 | 73 | 0.626967 | false |
DailyActie/Surrogate-Model | surrogate/selection/selRoulette.py | 1 | 2509 | # MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <[email protected]>
# License: MIT License
# Create: 2016-12-02
import random
from operator import attrgetter
def selRoulette(individuals, k=1):
"""Select *k* individuals from the input *individuals* using *k*
spins of a roulette. The selection is made by looking only at the first
objective of each individual. The list returned contains references to
the input *individuals*.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:returns: A list of selected individuals.
This function uses the :func:`~random.random` function from the python base
:mod:`random` module.
.. warning::
The roulette selection by definition cannot be used for minimization
or when the fitness can be smaller or equal to 0.
"""
s_inds = sorted(individuals, key=attrgetter("fitness"), reverse=True)
# TODO 20161204 individual property fitness.values[]
# sum_fits = sum(ind.fitness.values[0] for ind in individuals)
sum_fits = sum(ind.fitness for ind in individuals)
chosen = []
for i in xrange(k):
u = random.random() * sum_fits
sum_ = 0
for ind in s_inds:
# sum_ += ind.fitness.values[0]
sum_ += ind.fitness
if sum_ > u:
chosen.append(ind)
break
return chosen
| mit | 1,467,734,263,457,390,600 | 38.203125 | 80 | 0.707852 | false |
accraze/bitcoin | qa/pull-tester/rpc-tests.py | 1 | 8302 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if not vars().has_key('ENABLE_WALLET'):
ENABLE_WALLET=0
if not vars().has_key('ENABLE_BITCOIND'):
ENABLE_BITCOIND=0
if not vars().has_key('ENABLE_UTILS'):
ENABLE_UTILS=0
if not vars().has_key('ENABLE_ZMQ'):
ENABLE_ZMQ=0
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("WARNING: \"import zmq\" failed. Setting ENABLE_ZMQ=0. " \
"To run zmq tests, see dependency info in /qa/README.md.")
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = buildDir + '/src/bitcoind' + EXEEXT
if "BITCOINCLI" not in os.environ:
os.environ["BITCOINCLI"] = buildDir + '/src/bitcoin-cli' + EXEEXT
#Disable Windows tests by default
if EXEEXT == ".exe" and "-win" not in opts:
print "Win tests currently disabled. Use -win option to enable"
sys.exit(0)
#Tests
testScripts = [
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'decodescript.py',
'p2p-fullblocktest.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
testScriptsExt = [
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
'p2p-feefilter.py',
'pruning.py', # leave pruning last as it takes a REALLY long time
]
#Enable ZMQ tests
if ENABLE_ZMQ == 1:
testScripts.append('zmq_test.py')
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
if(ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
else:
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| mit | 6,671,498,825,894,520,000 | 29.522059 | 91 | 0.607564 | false |
isyippee/nova | nova/tests/unit/objects/test_compute_node.py | 1 | 25221 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import netaddr
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_versionedobjects import exception as ovo_exc
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import compute_node
from nova.objects import hv_spec
from nova.objects import service
from nova.tests.unit import fake_pci_device_pools
from nova.tests.unit.objects import test_objects
NOW = timeutils.utcnow().replace(microsecond=0)
fake_stats = {'num_foo': '10'}
fake_stats_db_format = jsonutils.dumps(fake_stats)
# host_ip is coerced from a string to an IPAddress
# but needs to be converted to a string for the database format
fake_host_ip = '127.0.0.1'
fake_numa_topology = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=0, memory_usage=0,
mempages=[], pinned_cpus=set([]),
siblings=[]),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=0, memory_usage=0,
mempages=[], pinned_cpus=set([]),
siblings=[])])
fake_numa_topology_db_format = fake_numa_topology._to_json()
fake_supported_instances = [('x86_64', 'kvm', 'hvm')]
fake_hv_spec = hv_spec.HVSpec(arch=fake_supported_instances[0][0],
hv_type=fake_supported_instances[0][1],
vm_mode=fake_supported_instances[0][2])
fake_supported_hv_specs = [fake_hv_spec]
# for backward compatibility, each supported instance object
# is stored as a list in the database
fake_supported_hv_specs_db_format = jsonutils.dumps([fake_hv_spec.to_list()])
fake_pci = jsonutils.dumps(fake_pci_device_pools.fake_pool_list_primitive)
fake_compute_node = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'service_id': None,
'host': 'fake',
'vcpus': 4,
'memory_mb': 4096,
'local_gb': 1024,
'vcpus_used': 2,
'memory_mb_used': 2048,
'local_gb_used': 512,
'hypervisor_type': 'Hyper-Dan-VM-ware',
'hypervisor_version': 1001,
'hypervisor_hostname': 'vm.danplanet.com',
'free_ram_mb': 1024,
'free_disk_gb': 256,
'current_workload': 100,
'running_vms': 2013,
'cpu_info': 'Schmintel i786',
'disk_available_least': 256,
'metrics': '',
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
'numa_topology': fake_numa_topology_db_format,
'supported_instances': fake_supported_hv_specs_db_format,
'pci_stats': fake_pci,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5,
}
# FIXME(sbauza) : For compatibility checking, to be removed once we are sure
# that all computes are running latest DB version with host field in it.
fake_old_compute_node = fake_compute_node.copy()
del fake_old_compute_node['host']
# resources are passed from the virt drivers and copied into the compute_node
fake_resources = {
'vcpus': 2,
'memory_mb': 1024,
'local_gb': 10,
'cpu_info': 'fake-info',
'vcpus_used': 1,
'memory_mb_used': 512,
'local_gb_used': 4,
'numa_topology': fake_numa_topology_db_format,
'hypervisor_type': 'fake-type',
'hypervisor_version': 1,
'hypervisor_hostname': 'fake-host',
'disk_available_least': 256,
'host_ip': fake_host_ip,
'supported_instances': fake_supported_instances
}
fake_compute_with_resources = objects.ComputeNode(
vcpus=fake_resources['vcpus'],
memory_mb=fake_resources['memory_mb'],
local_gb=fake_resources['local_gb'],
cpu_info=fake_resources['cpu_info'],
vcpus_used=fake_resources['vcpus_used'],
memory_mb_used=fake_resources['memory_mb_used'],
local_gb_used =fake_resources['local_gb_used'],
numa_topology=fake_resources['numa_topology'],
hypervisor_type=fake_resources['hypervisor_type'],
hypervisor_version=fake_resources['hypervisor_version'],
hypervisor_hostname=fake_resources['hypervisor_hostname'],
disk_available_least=fake_resources['disk_available_least'],
host_ip=netaddr.IPAddress(fake_resources['host_ip']),
supported_hv_specs=fake_supported_hv_specs,
)
class _TestComputeNodeObject(object):
def supported_hv_specs_comparator(self, expected, obj_val):
obj_val = [inst.to_list() for inst in obj_val]
self.assertJsonEqual(expected, obj_val)
def pci_device_pools_comparator(self, expected, obj_val):
obj_val = obj_val.obj_to_primitive()
self.assertJsonEqual(expected, obj_val)
def comparators(self):
return {'stats': self.assertJsonEqual,
'host_ip': self.str_comparator,
'supported_hv_specs': self.supported_hv_specs_comparator,
'pci_device_pools': self.pci_device_pools_comparator,
}
def subs(self):
return {'supported_hv_specs': 'supported_instances',
'pci_device_pools': 'pci_stats'}
def test_get_by_id(self):
self.mox.StubOutWithMock(db, 'compute_node_get')
db.compute_node_get(self.context, 123).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode.get_by_id(self.context, 123)
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch.object(objects.Service, 'get_by_id')
@mock.patch.object(db, 'compute_node_get')
def test_get_by_id_with_host_field_not_in_db(self, mock_cn_get,
mock_obj_svc_get):
fake_compute_node_with_svc_id = fake_compute_node.copy()
fake_compute_node_with_svc_id['service_id'] = 123
fake_compute_node_with_no_host = fake_compute_node_with_svc_id.copy()
host = fake_compute_node_with_no_host.pop('host')
fake_service = service.Service(id=123)
fake_service.host = host
mock_cn_get.return_value = fake_compute_node_with_no_host
mock_obj_svc_get.return_value = fake_service
compute = compute_node.ComputeNode.get_by_id(self.context, 123)
self.compare_obj(compute, fake_compute_node_with_svc_id,
subs=self.subs(),
comparators=self.comparators())
def test_get_by_service_id(self):
self.mox.StubOutWithMock(db, 'compute_nodes_get_by_service_id')
db.compute_nodes_get_by_service_id(self.context, 456).AndReturn(
[fake_compute_node])
self.mox.ReplayAll()
compute = compute_node.ComputeNode.get_by_service_id(self.context, 456)
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch.object(db, 'compute_node_get_by_host_and_nodename')
def test_get_by_host_and_nodename(self, cn_get_by_h_and_n):
cn_get_by_h_and_n.return_value = fake_compute_node
compute = compute_node.ComputeNode.get_by_host_and_nodename(
self.context, 'fake', 'vm.danplanet.com')
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.objects.Service.get_by_id')
@mock.patch('nova.db.compute_nodes_get_by_service_id')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch.object(db, 'compute_node_get_by_host_and_nodename')
def test_get_by_host_and_nodename_with_old_compute(self, cn_get_by_h_and_n,
svc_get_by_ch,
cn_get_by_svc_id,
svc_get_by_id):
cn_get_by_h_and_n.side_effect = exception.ComputeHostNotFound(
host='fake')
fake_service = service.Service(id=123)
fake_service.host = 'fake'
svc_get_by_ch.return_value = fake_service
cn_get_by_svc_id.return_value = [fake_old_compute_node]
svc_get_by_id.return_value = fake_service
compute = compute_node.ComputeNode.get_by_host_and_nodename(
self.context, 'fake', 'vm.danplanet.com')
# NOTE(sbauza): Result is still converted to new style Compute
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.objects.Service.get_by_id')
@mock.patch('nova.db.compute_nodes_get_by_service_id')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch.object(db, 'compute_node_get_by_host_and_nodename')
def test_get_by_host_and_nodename_not_found(self, cn_get_by_h_and_n,
svc_get_by_ch,
cn_get_by_svc_id,
svc_get_by_id):
cn_get_by_h_and_n.side_effect = exception.ComputeHostNotFound(
host='fake')
fake_service = service.Service(id=123)
fake_service.host = 'fake'
another_node = fake_old_compute_node.copy()
another_node['hypervisor_hostname'] = 'elsewhere'
svc_get_by_ch.return_value = fake_service
cn_get_by_svc_id.return_value = [another_node]
svc_get_by_id.return_value = fake_service
self.assertRaises(exception.ComputeHostNotFound,
compute_node.ComputeNode.get_by_host_and_nodename,
self.context, 'fake', 'vm.danplanet.com')
@mock.patch('nova.objects.Service.get_by_id')
@mock.patch('nova.db.compute_nodes_get_by_service_id')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch.object(db, 'compute_node_get_by_host_and_nodename')
def test_get_by_host_and_nodename_good_and_bad(self, cn_get_by_h_and_n,
svc_get_by_ch,
cn_get_by_svc_id,
svc_get_by_id):
cn_get_by_h_and_n.side_effect = exception.ComputeHostNotFound(
host='fake')
fake_service = service.Service(id=123)
fake_service.host = 'fake'
bad_node = fake_old_compute_node.copy()
bad_node['hypervisor_hostname'] = 'elsewhere'
good_node = fake_old_compute_node.copy()
svc_get_by_ch.return_value = fake_service
cn_get_by_svc_id.return_value = [bad_node, good_node]
svc_get_by_id.return_value = fake_service
compute = compute_node.ComputeNode.get_by_host_and_nodename(
self.context, 'fake', 'vm.danplanet.com')
# NOTE(sbauza): Result is still converted to new style Compute
self.compare_obj(compute, good_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.db.compute_node_get_all_by_host')
def test_get_first_node_by_host_for_old_compat(
self, cn_get_all_by_host):
another_node = fake_compute_node.copy()
another_node['hypervisor_hostname'] = 'neverland'
cn_get_all_by_host.return_value = [fake_compute_node, another_node]
compute = (
compute_node.ComputeNode.get_first_node_by_host_for_old_compat(
self.context, 'fake')
)
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
def test_get_first_node_by_host_for_old_compat_not_found(
self, cn_get_all_by_host):
cn_get_all_by_host.side_effect = exception.ComputeHostNotFound(
host='fake')
self.assertRaises(
exception.ComputeHostNotFound,
compute_node.ComputeNode.get_first_node_by_host_for_old_compat,
self.context, 'fake')
def test_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(
self.context,
{
'service_id': 456,
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
'supported_instances': fake_supported_hv_specs_db_format,
}).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.service_id = 456
compute.stats = fake_stats
# NOTE (pmurray): host_ip is coerced to an IPAddress
compute.host_ip = fake_host_ip
compute.supported_hv_specs = fake_supported_hv_specs
compute.create()
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
def test_recreate_fails(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, {'service_id': 456}).AndReturn(
fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.service_id = 456
compute.create()
self.assertRaises(exception.ObjectActionError, compute.create)
def test_save(self):
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(
self.context, 123,
{
'vcpus_used': 3,
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
'supported_instances': fake_supported_hv_specs_db_format,
}).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.vcpus_used = 3
compute.stats = fake_stats
# NOTE (pmurray): host_ip is coerced to an IPAddress
compute.host_ip = fake_host_ip
compute.supported_hv_specs = fake_supported_hv_specs
compute.save()
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch.object(db, 'compute_node_create',
return_value=fake_compute_node)
def test_set_id_failure(self, db_mock):
compute = compute_node.ComputeNode(context=self.context)
compute.create()
self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr,
compute, 'id', 124)
def test_destroy(self):
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, 123)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.destroy()
def test_service(self):
self.mox.StubOutWithMock(service.Service, 'get_by_id')
service.Service.get_by_id(self.context, 456).AndReturn('my-service')
self.mox.ReplayAll()
compute = compute_node.ComputeNode()
compute._context = self.context
compute.id = 123
compute.service_id = 456
self.assertEqual('my-service', compute.service)
# Make sure it doesn't call Service.get_by_id() again
self.assertEqual('my-service', compute.service)
def test_get_all(self):
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(self.context).AndReturn([fake_compute_node])
self.mox.ReplayAll()
computes = compute_node.ComputeNodeList.get_all(self.context)
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
def test_get_by_hypervisor(self):
self.mox.StubOutWithMock(db, 'compute_node_search_by_hypervisor')
db.compute_node_search_by_hypervisor(self.context, 'hyper').AndReturn(
[fake_compute_node])
self.mox.ReplayAll()
computes = compute_node.ComputeNodeList.get_by_hypervisor(self.context,
'hyper')
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.db.compute_nodes_get_by_service_id')
def test_get_by_service(self, cn_get_by_svc_id):
cn_get_by_svc_id.return_value = [fake_compute_node]
fake_service = service.Service(id=123)
computes = compute_node.ComputeNodeList.get_by_service(self.context,
fake_service)
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.db.compute_node_get_all_by_host')
def test_get_all_by_host(self, cn_get_all_by_host):
cn_get_all_by_host.return_value = [fake_compute_node]
computes = compute_node.ComputeNodeList.get_all_by_host(self.context,
'fake')
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('nova.objects.Service.get_by_id')
@mock.patch('nova.db.compute_nodes_get_by_service_id')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.db.compute_node_get_all_by_host')
def test_get_all_by_host_with_old_compute(self, cn_get_all_by_host,
svc_get_by_ch,
cn_get_by_svc_id,
svc_get_by_id):
cn_get_all_by_host.side_effect = exception.ComputeHostNotFound(
host='fake')
fake_service = service.Service(id=123)
fake_service.host = 'fake'
svc_get_by_ch.return_value = fake_service
cn_get_by_svc_id.return_value = [fake_old_compute_node]
svc_get_by_id.return_value = fake_service
computes = compute_node.ComputeNodeList.get_all_by_host(self.context,
'fake')
self.assertEqual(1, len(computes))
# NOTE(sbauza): Result is still converted to new style Compute
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
def test_compat_numa_topology(self):
compute = compute_node.ComputeNode()
primitive = compute.obj_to_primitive(target_version='1.4')
self.assertNotIn('numa_topology', primitive)
def test_compat_supported_hv_specs(self):
compute = compute_node.ComputeNode()
compute.supported_hv_specs = fake_supported_hv_specs
primitive = compute.obj_to_primitive(target_version='1.5')
self.assertNotIn('supported_hv_specs', primitive)
def test_compat_host(self):
compute = compute_node.ComputeNode()
primitive = compute.obj_to_primitive(target_version='1.6')
self.assertNotIn('host', primitive)
def test_compat_pci_device_pools(self):
compute = compute_node.ComputeNode()
compute.pci_device_pools = fake_pci_device_pools.fake_pool_list
primitive = compute.obj_to_primitive(target_version='1.8')
self.assertNotIn('pci_device_pools', primitive)
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_compat_service_id(self, mock_get):
mock_get.return_value = objects.Service(id=1)
compute = objects.ComputeNode(host='fake-host', service_id=None)
primitive = compute.obj_to_primitive(target_version='1.12')
self.assertEqual(1, primitive['nova_object.data']['service_id'])
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_compat_service_id_compute_host_not_found(self, mock_get):
mock_get.side_effect = exception.ComputeHostNotFound(host='fake-host')
compute = objects.ComputeNode(host='fake-host', service_id=None)
primitive = compute.obj_to_primitive(target_version='1.12')
self.assertEqual(-1, primitive['nova_object.data']['service_id'])
def test_update_from_virt_driver(self):
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
compute = compute_node.ComputeNode()
compute.update_from_virt_driver(resources)
expected = fake_compute_with_resources
self.assertTrue(base.obj_equal_prims(expected, compute))
def test_update_from_virt_driver_missing_field(self):
# NOTE(pmurray): update_from_virt_driver does not require
# all fields to be present in resources. Validation of the
# resources data structure would be done in a different method.
resources = copy.deepcopy(fake_resources)
del resources['vcpus']
compute = compute_node.ComputeNode()
compute.update_from_virt_driver(resources)
expected = fake_compute_with_resources.obj_clone()
del expected.vcpus
self.assertTrue(base.obj_equal_prims(expected, compute))
def test_update_from_virt_driver_extra_field(self):
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
resources['extra_field'] = 'nonsense'
compute = compute_node.ComputeNode()
compute.update_from_virt_driver(resources)
expected = fake_compute_with_resources
self.assertTrue(base.obj_equal_prims(expected, compute))
def test_update_from_virt_driver_bad_value(self):
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
resources['vcpus'] = 'nonsense'
compute = compute_node.ComputeNode()
self.assertRaises(ValueError,
compute.update_from_virt_driver, resources)
def test_compat_allocation_ratios(self):
compute = compute_node.ComputeNode()
primitive = compute.obj_to_primitive(target_version='1.13')
self.assertNotIn('cpu_allocation_ratio', primitive)
self.assertNotIn('ram_allocation_ratio', primitive)
def test_compat_allocation_ratios_old_compute(self):
self.flags(cpu_allocation_ratio=2.0, ram_allocation_ratio=3.0)
compute_dict = fake_compute_node.copy()
# old computes don't provide allocation ratios to the table
compute_dict['cpu_allocation_ratio'] = None
compute_dict['ram_allocation_ratio'] = None
cls = objects.ComputeNode
compute = cls._from_db_object(self.context, cls(), compute_dict)
self.assertEqual(2.0, compute.cpu_allocation_ratio)
self.assertEqual(3.0, compute.ram_allocation_ratio)
def test_compat_allocation_ratios_default_values(self):
compute_dict = fake_compute_node.copy()
# new computes provide allocation ratios defaulted to 0.0
compute_dict['cpu_allocation_ratio'] = 0.0
compute_dict['ram_allocation_ratio'] = 0.0
cls = objects.ComputeNode
compute = cls._from_db_object(self.context, cls(), compute_dict)
self.assertEqual(16.0, compute.cpu_allocation_ratio)
self.assertEqual(1.5, compute.ram_allocation_ratio)
def test_compat_allocation_ratios_old_compute_default_values(self):
compute_dict = fake_compute_node.copy()
# old computes don't provide allocation ratios to the table
compute_dict['cpu_allocation_ratio'] = None
compute_dict['ram_allocation_ratio'] = None
cls = objects.ComputeNode
compute = cls._from_db_object(self.context, cls(), compute_dict)
self.assertEqual(16.0, compute.cpu_allocation_ratio)
self.assertEqual(1.5, compute.ram_allocation_ratio)
class TestComputeNodeObject(test_objects._LocalTest,
_TestComputeNodeObject):
pass
class TestRemoteComputeNodeObject(test_objects._RemoteTest,
_TestComputeNodeObject):
pass
| apache-2.0 | -4,816,332,282,331,989,000 | 43.718085 | 79 | 0.612545 | false |
aroth-arsoft/arsoft-web-crashupload | app/crashdump/utils.py | 1 | 24246 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
crashdump_use_jinja2 = False
def _(msg):
return msg
def tag_a(name, title=None, href=None, alt=None):
from xml.etree.ElementTree import Element, tostring
a = Element('a')
a.text = name
if href:
a.set('href', href)
if title:
a.set('title', title)
if alt:
a.set('alt', alt)
return tostring(a, encoding="utf8", method='html').decode()
def _hex_format(number, prefix='0x', width=None, bits=None):
if isinstance(number, str):
try:
number = int(number)
except ValueError:
number = None
if number is None:
return '(none)'
if bits is not None:
if bits == 32:
number = number & 0xffffffff
if width is None:
width = 8
elif bits == 64:
number = number & 0xffffffffffffffff
if width is None:
width = 16
if width is None:
if number > 2**48:
width = 16
elif number > 2**40:
width = 12
elif number > 2**32:
width = 10
elif number > 2**24:
width = 8
elif number > 2**16:
width = 6
elif number > 2**8:
width = 4
else:
width = 2
fmt = '%%0%ix' % width
return prefix + fmt % number
def hex_format(number, prefix='0x', width=None, bits=None):
if isinstance(number, list):
nums = []
for n in number:
nums.append(_hex_format(n, prefix, width, bits))
return ','.join(nums)
else:
return _hex_format(number, prefix, width, bits)
def hex_format_bits(number, bits):
return hex_format(number, bits=bits)
def addr_format(number, prefix='0x', bits=64):
if number == 0:
return 'NULL'
elif number < 256:
return hex_format(number, 'NULL+' + prefix, bits=bits)
else:
return hex_format(number, prefix, bits=bits)
def addr_format_64(number, prefix='0x'):
if number == 0:
return 'NULL'
elif number < 256:
return hex_format(number, 'NULL+' + prefix, bits=64)
else:
return hex_format(number, prefix, bits=64)
def addr_format_32(number, prefix='0x'):
if number == 0:
return 'NULL'
elif number < 256:
return hex_format(number, 'NULL+' + prefix, bits=32)
else:
return hex_format(number, prefix, bits=32)
def addr_format_bits(number, bits=64):
return addr_format(number, bits=bits)
def exception_code(platform_type, code, name):
if platform_type is None:
return 'Platform unknown'
elif platform_type == 'Linux':
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Unix_signal')
elif platform_type == 'Windows NT':
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Windows_NT')
elif platform_type == 'Windows':
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Microsoft_Windows')
else:
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Special:Search/' + str(platform_type))
def format_bool_yesno(val):
if isinstance(val, str) or isinstance(val, unicode):
try:
val = bool(val)
except ValueError:
val = None
if val is None:
return '(none)'
elif val == True:
return _('yes')
elif val == False:
return _('no')
else:
return _('neither')
def format_source_line(source, line, line_offset=None, source_url=None):
if source is None:
return _('unknown')
else:
title = str(source) + ':' + str(line)
if line_offset is not None:
title += '+' + hex_format(line_offset)
if source_url is not None:
href = source_url
else:
href='file:///' + str(source)
return tag_a(title, href=href)
def format_function_plus_offset(function, funcoff=None):
if function is None:
return _('unknown')
else:
if funcoff:
return str(function) + '+' + hex_format(funcoff)
else:
return str(function)
def str_or_unknown(str):
if str is None:
return _('unknown')
else:
return str
def format_cpu_type(cputype):
cputype = cputype.lower()
if cputype == 'amd64':
href='http://en.wikipedia.org/wiki/X86-64'
title = 'x86-64 (also known as x64, x86_64 and AMD64)'
elif cputype == 'x86':
href='http://en.wikipedia.org/wiki/X86'
title = 'x86 (also known as i386)'
elif cputype == 'mips':
href='http://en.wikipedia.org/wiki/MIPS_instruction_set'
title = 'MIPS instruction set'
elif cputype == 'alpha':
href='http://en.wikipedia.org/wiki/DEC_Alpha'
title = 'Alpha, originally known as Alpha AXP'
elif cputype == 'alpha64':
href='http://en.wikipedia.org/wiki/DEC_Alpha'
title = 'Alpha64, originally known as Alpha AXP'
elif cputype == 'powerpc':
href='http://en.wikipedia.org/wiki/PowerPC'
title = 'PowerPC'
elif cputype == 'powerpc64':
href='http://en.wikipedia.org/wiki/Ppc64'
title = 'PowerPC64 or ppc64'
elif cputype == 'arm':
href='http://en.wikipedia.org/wiki/ARM_architecture'
title = 'ARM'
elif cputype == 'arm64':
href='http://en.wikipedia.org/wiki/ARM_architecture#64-bit'
title = 'ARM 64-bit'
elif cputype == 'sparc':
href='http://en.wikipedia.org/wiki/SPARC'
title = 'SPARC ("scalable processor architecture")'
elif cputype == 'ia64':
href='http://en.wikipedia.org/wiki/Itanium'
title = 'Intel Itanium architecture (IA-64)'
elif cputype == 'msil':
href='http://en.wikipedia.org/wiki/Common_Intermediate_Language'
title = 'Microsoft Intermediate Language (MSIL)'
elif cputype == 'x64 wow':
href='http://en.wikipedia.org/wiki/WoW64'
title = 'Microsoft WoW64'
else:
href = 'http://en.wikipedia.org/wiki/Central_processing_unit'
title = 'Unknown:%s' % cputype
return tag_a(title, title=cputype, href=href)
def format_cpu_vendor(vendor):
if vendor == 'AuthenticAMD':
title = 'AMD'
href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'
elif vendor == 'GenuineIntel':
title = 'Intel'
href = 'http://en.wikipedia.org/wiki/Intel'
elif vendor == 'Microsoft Hv':
title = 'Microsoft Hyper-V'
href = 'http://en.wikipedia.org/wiki/Hyper-V'
elif vendor == 'VMwareVMware':
title = 'VMware'
href = 'http://en.wikipedia.org/wiki/VMware'
elif vendor == 'KVMKVMKVMKVM':
title = 'KVM'
href = 'http://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine'
elif vendor == 'XenVMMXenVMM':
title = 'Xen'
href = 'http://en.wikipedia.org/wiki/Xen'
else:
title = vendor
href = 'http://en.wikipedia.org/wiki/List_of_x86_manufacturers'
return tag_a(title, title=vendor, href=href)
def format_cpu_name(vendor, name):
# http://en.wikipedia.org/wiki/CPUID
# http://www.sandpile.org/x86/cpuid.htm
if vendor == 'AuthenticAMD':
if name is None:
title = 'Unknown AMD CPU'
href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'
elif name.startswith('AMD Ryzen'):
href = 'https://en.wikipedia.org/wiki/Ryzen'
title = 'AMD Ryzen'
elif name.startswith('AMD FX'):
href = 'http://en.wikipedia.org/wiki/List_of_AMD_FX_microprocessors'
title = 'AMD FX-series'
elif name.startswith('AMD Phenom'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Phenom_microprocessors'
title = 'AMD Phenom family'
elif name.startswith('AMD Opteron'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Opteron_microprocessors'
title = 'AMD Opteron family'
elif name.startswith('AMD Sempron'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Sempron_microprocessors'
title = 'AMD Sempron family'
elif name.startswith('AMD Turion'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Turion_microprocessors'
title = 'AMD Turion family'
elif name.startswith('AMD A'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_accelerated_processing_unit_microprocessors'
title = 'AMD APU series'
else:
title = 'Unknown AMD CPU'
href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'
title = title + ' (%s)' % name
elif vendor == 'GenuineIntel':
if name is None:
title = 'Unknown Intel CPU'
href = 'https://en.wikipedia.org/wiki/List_of_Intel_microprocessors'
elif name.startswith('Intel(R) Core(TM) i3'):
title = 'Intel Core i3 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM) i5'):
title = 'Intel Core i5 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM) i7'):
title = 'Intel Core i7 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM) i9'):
title = 'Intel Core i9 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM)'):
title = 'Unknown Intel Core series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Xeon(R)') or name.startswith('Intel(R) Xeon(TM)'):
title = 'Intel Xeon series'
href = 'http://en.wikipedia.org/wiki/Xeon'
else:
title = 'Unknown Intel CPU'
href = 'https://en.wikipedia.org/wiki/List_of_Intel_microprocessors'
title = title + ' (%s)' % name
else:
title = name
href = 'http://en.wikipedia.org/wiki/List_of_x86_manufacturers'
return tag_a(name, title=title, href=href)
def format_distribution_id(distro_id):
if distro_id == 'Debian':
name = 'Debian'
href = 'http://www.debian.org'
elif distro_id == 'Ubuntu':
name = 'Ubuntu'
href = 'http://www.ubuntu.com'
else:
name = distro_id
href = 'http://distrowatch.com/' + distro_id
return tag_a(name, title=distro_id, href=href)
def format_distribution_codename(distro_id, distro_codename):
if distro_id == 'Debian':
name = '%s %s' % (distro_id.capitalize(), distro_codename.capitalize())
href = 'http://www.debian.org/%s%s' % (distro_id.capitalize(), distro_codename.capitalize())
elif distro_id == 'Ubuntu':
name = '%s %s' % (distro_id.capitalize(), distro_codename.capitalize())
href = 'http://ubuntuguide.org/wiki/%s_%s' % (distro_id.capitalize(), distro_codename.capitalize())
else:
name = distro_id
href = 'http://distrowatch.com/' + distro_id
return tag_a(name, title=distro_id, href=href)
def format_seconds(s):
if s is None:
return 'None'
elif s >= 3600:
hr = int(float(s) / 3600.0)
from math import fmod
m = fmod(float(s), 3600.0) / 60.0
return '%ihr %0.1fmin' % (hr, m)
elif s >= 60:
m = float(s) / 60.0
return '%0.1fmin' % m
elif s >= 1:
return '%0.1fs' % s
else:
return '%0.1fms' % ( s * 1000.0 )
def format_milliseconds(ms):
if ms is None:
return 'None'
elif ms > 1000:
s = float(ms) / 1000.0
return format_seconds(s)
else:
return '%ims' % ms
def format_trust_level(tl):
if tl == 0 or tl is None:
return 'Unknown'
elif tl == 1:
return 'Stack scan'
elif tl == 2:
return 'CFI scan'
elif tl == 3:
return 'FP'
elif tl == 4:
return 'CFI'
elif tl == 5:
return 'External'
elif tl == 6:
return 'IP'
else:
return 'unknown(%i)' % tl
_suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def format_size(nbytes):
if isinstance(nbytes, str):
try:
nbytes = int(nbytes)
except ValueError:
nbytes = None
if nbytes == 0: return '0 B'
elif nbytes is None: return 'None'
i = 0
while nbytes >= 1024 and i < len(_suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, _suffixes[i])
def format_memory_usagetype(usage):
if usage == 0 or usage is None:
return 'Unknown'
elif usage == 1:
return 'Stack'
elif usage == 2:
return 'TEB'
elif usage == 3:
return 'PEB'
elif usage == 4:
return 'Process Parameters'
elif usage == 5:
return 'Environment'
elif usage == 6:
return 'IP'
elif usage == 7:
return 'Process Heap Handles'
elif usage == 8:
return 'Process Heap'
elif usage == 9:
return 'TLS'
elif usage == 10:
return 'Thread info block'
else:
return 'unknown(%i)' % usage
def format_gl_extension_name(ext):
khronos_extension_base_url = 'https://www.khronos.org/registry/OpenGL/extensions'
unknown_extension_url = 'https://www.khronos.org/opengl/wiki/OpenGL_Extension'
title = ext
name = ext
href = unknown_extension_url
vendor = None
ext_name = None
if ext.startswith('GL_'):
vendor_end = ext.index('_', 3)
if vendor_end > 0:
vendor = ext[3:vendor_end]
ext_name = ext[3:]
elif ext.startswith('GLX_') or ext.startswith('WGL_'):
vendor_end = ext.index('_', 4)
if vendor_end > 0:
vendor = ext[4:vendor_end]
ext_name = ext
if vendor and ext_name:
href = khronos_extension_base_url + '/%s/%s.txt' % (vendor, ext_name)
return tag_a(name, title=title, href=href)
def format_version_number(num):
if isinstance(num, str) or isinstance(num, unicode):
try:
num = int(num)
except ValueError:
num = None
if num is None: return 'None'
m, n, o, p = (num >> 48) & 0xffff, (num >> 32) & 0xffff, (num >> 16) & 0xffff, (num >> 0) & 0xffff
return '%i.%i.%i.%i' % (m, n, o, p)
def format_platform_type(platform_type):
if platform_type is None:
return _('Platform unknown')
elif platform_type == 'Linux':
return tag_a('Linux', href='https://en.wikipedia.org/wiki/Linux')
elif platform_type == 'Windows NT':
return tag_a('Windows NT',href='https://en.wikipedia.org/wiki/Windows_NT')
elif platform_type == 'Windows':
return tag_a('Windows', href='https://en.wikipedia.org/wiki/Microsoft_Windows')
else:
return tag_a(platform_type, href='https://en.wikipedia.org/wiki/Special:Search/' + str(platform_type))
def _get_version_from_string(number_str):
elems = number_str.split('.')
major = 0
minor = 0
patch = 0
build = 0
if len(elems) >= 1:
major = int(elems[0])
if len(elems) >= 2:
minor = int(elems[1])
if len(elems) >= 3:
patch = int(elems[2])
if len(elems) >= 4:
build = int(elems[3])
return major, minor, patch, build
def _get_version_from_numbers(os_version_number, os_build_number):
print('_get_version_from_numbers %s, %s' % (os_version_number, os_build_number))
if isinstance(os_version_number, int):
major = os_version_number >> 48 & 0xffff
minor = os_version_number >> 32 & 0xffff
patch = os_version_number >> 16 & 0xffff
build = os_version_number & 0xffff
if build == 0 and os_build_number:
build = int(os_build_number) if os_build_number is not None else 0
else:
major, minor, patch, build = _get_version_from_string(os_version_number)
#print('%x, %s -> %i.%i.%i.%i' % (os_version_number, os_build_number, major, minor, patch, build))
return major, minor, patch, build
def get_os_version_number(platform_type, os_version_number, os_build_number):
if platform_type is None or os_version_number is None:
return 0
if platform_type == 'Linux':
major, minor, patch, build = _get_version_from_string(os_version_number)
elif platform_type == 'Windows NT':
major, minor, patch, build = _get_version_from_string(os_version_number)
if major >= 10:
build = patch
patch = 0
else:
major = 0
minor = 0
patch = 0
build = 0
ret = (major << 48) | (minor << 32) | (patch << 16) | build
print('ver in %s -> %x' % (os_version_number, ret))
return ret
def get_os_build_number(platform_type, os_version_number, os_build_number):
if platform_type is None or os_version_number is None:
return 0
if platform_type == 'Linux':
build = 0
elif platform_type == 'Windows NT':
major, minor, patch, build = _get_version_from_string(os_version_number)
if major >= 10:
build = patch
else:
build = 0
print('build in %s -> %x' % (os_version_number, build))
return build
def os_version_info(platform_type, os_version_number, os_build_number):
ret = {'text': 'unknown' }
if platform_type is None or os_version_number is None:
return ret
major, minor, patch, build = _get_version_from_numbers(os_version_number, os_build_number)
if platform_type == 'Linux':
ret['text'] = 'Linux %i.%i.%i.%i' % (major, minor, patch, build)
ret['href'] = 'https://en.wikipedia.org/wiki/Linux'
elif platform_type == 'Windows NT':
productName = 'Windows %i.%i' % (major, minor)
marketingName = None
if (major < 6):
productName = "Windows XP"
ret['short'] = 'WinXP'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_XP'
elif (major == 6 and minor == 0):
productName = "Windows Vista"
ret['short'] = 'WinVista'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_Vista'
elif (major == 6 and minor == 1):
productName = "Windows 7"
ret['short'] = 'Win7'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_7'
elif (major == 6 and minor == 2):
productName = "Windows 8"
ret['short'] = 'Win8'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_8'
elif (major == 6 and minor == 3):
productName = "Windows 8.1"
ret['short'] = 'Win8.1'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_8'
elif (major == 10):
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_10'
# See https://en.wikipedia.org/wiki/Windows_10_version_history
if build <= 10240:
ret['short'] = 'Win10'
productName = "Windows 10"
marketingName = ''
elif(build <= 10586):
ret['short'] = 'Win10/1511'
productName = "Windows 10 Version 1511"
marketingName = "November Update"
elif (build <= 14393):
ret['short'] = 'Win10/1607'
productName = "Windows 10 Version 1607"
marketingName = "Anniversary Update"
elif (build <= 15063):
ret['short'] = 'Win10/1703'
productName = "Windows 10 Version 1703"
marketingName = "Creators Update"
elif (build <= 16299):
ret['short'] = 'Win10/1709'
productName = "Windows 10 Version 1709"
marketingName = "Fall Creators Update"
elif (build <= 17134):
ret['short'] = 'Win10/1803'
productName = "Windows 10 Version 1803"
marketingName = "April 2018 Update"
elif (build <= 18204):
ret['short'] = 'Win10/1809'
productName = "Windows 10 Version 1809"
marketingName = "October 2018 Update"
elif (build <= 18362):
ret['short'] = 'Win10/1903'
productName = "Windows 10 Version 1903"
marketingName = "May 2019 Update"
elif (build <= 18363):
ret['short'] = 'Win10/1909'
productName = "Windows 10 Version 1909"
marketingName = "November 2019 Update"
elif (build <= 19041):
ret['short'] = 'Win10/2004'
productName = "Windows 10 Version 2004"
marketingName = "May 2020 Update"
elif (build <= 19042):
ret['short'] = 'Win10/1903'
productName = "Windows 10 Version 20H2"
marketingName = '' # TBA
else:
ret['short'] = 'Win10/TBA'
productName = 'Windows 10 Build %i' % build
if marketingName:
ret['text'] = '%s (%s)' % (productName, marketingName)
else:
ret['text'] = productName
ret['full'] = ret['text'] + ' %i.%i.%i.%i' % (major, minor, patch, build)
elif platform_type == 'Windows':
ret['text'] = 'Windows %i.%i' % (major, minor)
ret['href'] = 'https://en.wikipedia.org/wiki/Microsoft_Windows'
return ret
def format_os_version(platform_type, os_version_number, os_build_number):
info = os_version_info(platform_type, os_version_number, os_build_number)
if 'href' in info:
return tag_a(info.get('text'), href=info.get('href'))
else:
return info.get('text')
def format_os_version_short(platform_type, os_version_number, os_build_number):
info = os_version_info(platform_type, os_version_number, os_build_number)
if 'short' in info:
return info.get('short')
else:
return info.get('text')
def language_from_qlocale_language_enum(num):
_codes = {
0: 'Any language',
31: 'English',
42: 'German',
}
if num in _codes:
return _codes[num]
else:
return str(num)
# See https://doc.qt.io/qt-5/qlocale.html#Country-enum
def country_from_qlocale_country_enum(num):
_codes = {
0: 'Any country',
82: 'Germany',
224: 'United Kingdom',
225: 'United States',
}
if num in _codes:
return _codes[num]
else:
return str(num)
# https://doc.qt.io/qt-5/qlocale.html#Script-enum
def script_from_qlocale_script_enum(num):
_codes = {
0: 'Any script',
1: 'Arabic',
2: 'Cyrillic',
16: 'Greek',
7: 'Latin',
}
if num in _codes:
return _codes[num]
else:
return str(num)
def thread_extra_info(thread):
if thread is None:
return _('N/A')
elif thread.main_thread:
return '*@' if thread.exception else '@'
elif thread.rpc_thread:
return '*[RPC]' if thread.exception else '[RPC]'
elif thread.exception:
return '*'
else:
return ''
def format_thread(thread):
if thread is None:
return _('N/A')
else:
if thread.main_thread:
ret = _('Main thread')
elif thread.rpc_thread:
ret = _('RPC thread')
else:
ret = _('Thread')
ret = ret + ' ' + hex_format(thread.id)
if thread.name:
ret = ret + ' ' + thread.name
if thread.exception:
ret = ret + ' ' + _('with exception')
return ret
def format_stack_frame(frame):
if frame is None:
return _('N/A')
else:
if frame.function is None:
offset = frame.addr - frame.module_base
if frame.module:
return frame.module + '+' + hex_format(offset)
else:
return frame.addr
else:
return format_function_plus_offset(frame.function, frame.funcoff)
| gpl-3.0 | -138,188,937,514,413,520 | 34.39562 | 137 | 0.558443 | false |
libicocco/poser-hand-generator | createGraspICRA09.py | 1 | 3429 | # creatGraspICRA09.py - script for creating a hand poses database
#
# Copyright (c) 2009 Javier Romero
#
# Author: Javier Romero <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import poser
import linecache
import os
import setCamAZEL
import setTexture
from os.path import join
scene = poser.Scene()
basedir = os.path.dirname(os.path.abspath(__file__))
dir = join(basedir, 'out')
lightdir = join(basedir, 'lights')
taxonomyDir = join(basedir, 'taxonomy')
texture = join(basedir, 'Hand Texture2.TIF')
listpath = join(basedir, 'poses', 'handjointssavinglist.txt')
#lights=["light1.lt2","light2.lt2","light3.lt2","light4.lt2"]
lights = ["light1.lt2"]
nAz = 24
nEl = 12
nRo = 9
nFrames = 6
grasps = ["largeDiameter", "smallDiameter", "mediumWrap", "adductedThumb",
"lightTool", "prismatic4Finger", "prismatic3Finger",
"prismatic2Finger", "palmarPinch", "powerDisk", "powerSphere",
"precisionDisk", "precisionSphere", "tripod", "fixedHook", "lateral",
"indexFingerExtension", "extensionType", "distalType",
"writingTripod", "tripodVariation", "parallelExtension",
"adductionGrip", "tipPinch", "lateralTripod", "sphere4Finger",
"quadpod", "sphere3Finger", "stick", "palmarGrasp",
"ringGrasp", "ventralGrasp", "inferiorPincerGrasp"]
#poser.SetNumRenderThreads(4)
#poser.SetRenderInSeparateProcess(1)
for graspIndex in range(len(grasps)):
outdir = join(dir, '%02d' % (graspIndex+1))
if not os.path.isdir(outdir):
os.mkdir(outdir)
for lightindex in range(len(lights)):
jointFileName0 = join(taxonomyDir, "rest.txt")
jointFileName1 = join(taxonomyDir, grasps[graspIndex] + ".txt")
graspCode = (graspIndex)*(len(lights)) + lightindex + 1
# close and discard changes
poser.CloseDocument(1)
poser.OpenDocument(join(taxonomyDir, grasps[graspIndex] + ".pz3"))
scene.LoadLibraryLight(lightdir+lights[lightindex])
setTexture.setTexture(texture)
linecache.checkcache(jointFileName0)
linecache.checkcache(jointFileName1)
setCamAZEL.setRenderOptions(scale=0)
gnd = scene.Actor("GROUND")
gnd.SetVisible(0)
gnd.SetVisibleInRender(0)
gnd.SetVisibleInReflections(0)
ffly = scene.CurrentFireFlyOptions()
ffly.SetManual(1)
setCamAZEL.multiViewSeqRender(basedir, nAz, nEl, nRo, outdir,
jointFileName0, jointFileName1,
nFrames, graspCode, listpath=listpath,
fullSphere=True, f=70,
camName="RHand Camera")
| gpl-2.0 | 4,005,655,278,173,334,000 | 39.313253 | 79 | 0.656168 | false |
EasyPost/easypost-python | tests/test_pickup.py | 1 | 4491 | # Unit tests related to 'Pickups' (https://www.easypost.com/docs/api#pickups).
import time
import datetime
import easypost
import pytest
import pytz
ONE_DAY = datetime.timedelta(days=1)
@pytest.fixture
def noon_on_next_monday():
today = datetime.date.today()
next_monday = today + datetime.timedelta(days=(7 - today.weekday()))
noon_est = datetime.time(12, 0, tzinfo=pytz.timezone('America/New_York'))
return datetime.datetime.combine(next_monday, noon_est)
@pytest.mark.vcr()
def test_pickup_batch(noon_on_next_monday, vcr):
# Create a Batch containing multiple Shipments. Then we try to buy a Pickup and assert if it was bought.
pickup_address = easypost.Address.create(
verify=['delivery'],
name='TAKASHI KOVACS',
company='EasyPost',
street1='2889 W ASHTON BLVD',
street2='SUITE 325',
city='Lehi',
state='UT',
zip='84042',
country='US',
phone='415-456-7890'
)
shipments = [
{
'to_address': {
'name': 'Customer',
'street1': '8308 Fenway Rd',
'city': 'Bethesda',
'state': 'MD',
'zip': '20817',
'country': 'US'
},
'from_address': pickup_address,
'parcel': {
'weight': 10.2
},
'carrier': 'USPS',
'service': 'Priority'
}, {
'to_address': {
'name': 'Customer',
'street1': '8308 Fenway Rd',
'city': 'Bethesda',
'state': 'MD',
'zip': '20817',
'country': 'US'
},
'from_address': {
'name': 'Sawyer Bateman',
'company': 'EasyPost',
'street1': '164 Townsend St',
'city': 'San Francisco',
'state': 'CA',
'zip': '94107',
'phone': '415-456-7890'
},
'parcel': {
'weight': 10.2
},
'carrier': 'USPS',
'service': 'Priority'
}
]
batch = easypost.Batch.create_and_buy(shipments=shipments)
while batch.state in ('creating', 'queued_for_purchase', 'purchasing'):
if vcr.record_mode != 'none':
time.sleep(0.1)
batch.refresh()
# Insure the shipments after purchase
if batch.state == 'purchased':
for shipment in batch.shipments:
shipment.insure(amount=100)
pickup = easypost.Pickup.create(
address=pickup_address,
batch=batch,
reference='internal_id_1234',
min_datetime=noon_on_next_monday.isoformat(),
max_datetime=(noon_on_next_monday + ONE_DAY).isoformat(),
is_account_address=True,
instructions='Special pickup instructions'
)
assert pickup.pickup_rates != [], pickup.messages
pickup.buy(
carrier=pickup.pickup_rates[0].carrier,
service=pickup.pickup_rates[0].service
)
@pytest.mark.vcr()
def test_single_pickup(noon_on_next_monday):
"""Create a Shipment, buy it, and then buy a pickup for it"""
pickup_address = easypost.Address.create(
verify=['delivery'],
name='TAKASHI KOVACS',
company='EasyPost',
street1='2889 W ASHTON BLVD',
street2='SUITE 325',
city='Lehi',
state='UT',
zip='84042',
country='US',
phone='415-456-7890'
)
shipment = easypost.Shipment.create(
to_address={
'name': 'Customer',
'street1': '8308 Fenway Rd',
'city': 'Bethesda',
'state': 'MD',
'zip': '20817',
'country': 'US'
},
from_address=pickup_address,
parcel={
'weight': 21.2
},
)
shipment.buy(rate=shipment.lowest_rate('USPS', 'Priority'), insurance=100.00)
pickup = easypost.Pickup.create(
address=pickup_address,
shipment=shipment,
reference='internal_id_1234',
min_datetime=noon_on_next_monday.isoformat(),
max_datetime=(noon_on_next_monday + ONE_DAY).isoformat(),
is_account_address=True,
instructions='Special pickup instructions'
)
assert pickup.pickup_rates != [], pickup.messages
pickup.buy(
carrier=pickup.pickup_rates[0].carrier,
service=pickup.pickup_rates[0].service
)
| mit | -7,046,206,442,330,147,000 | 27.605096 | 108 | 0.529281 | false |
Data-Mechanics/dml | dml/dml.py | 1 | 7488 | ###############################################################################
##
## dml.py
##
## Python library providing common functionalities for building Data
## Mechanics platform components.
##
## Web: datamechanics.org
## Version: 0.0.16.0
##
##
import sys # To parse command line arguments.
import os.path # To check if a file exists.
import types
import json
import pymongo
###############################################################################
##
"""
An interface error occurs if a user of the library tries defining an algorithm
class without providing definitions for the required methods.
"""
class InterfaceError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
"""
A metaclass for creating contributor-authored (static) classes for platform
algorithms, and a corresponding base class that uses that metaclass.
"""
class MetaAlgorithm(type):
def __new__(cls, clsname, bases, dct):
methods = {name:val for name, val in dct.items()}
if clsname != 'Algorithm':
if 'contributor' not in methods or type(methods['contributor']) != str:
raise InterfaceError("The class definition for " + clsname + " does not identify a contributor.")
if 'reads' not in methods or type(methods['reads']) != list:
raise InterfaceError("The class definition for " + clsname + " does not list the data sets it reads.")
reads_types = list({type(x) for x in methods['reads']})
if len(reads_types) > 0 and (len(reads_types) != 1 or reads_types[0] != str):
raise InterfaceError("The class definition for " + clsname + " has a non-name in its list of data sets it reads.")
if 'writes' not in methods or type(methods['writes']) != list:
raise InterfaceError("The class definition for " + clsname + " does not list the data sets it writes.")
writes_types = list({type(x) for x in methods['writes']})
if len(writes_types) > 0 and (len(writes_types) != 1 or writes_types[0] != str):
raise InterfaceError("The class definition for " + clsname + " has a non-name in its list of data sets it writes.")
if 'execute' not in methods or isinstance(methods['execute'], types.FunctionType):
raise InterfaceError("The class definition for " + clsname + " does not define a static 'execute' method.")
if 'provenance' not in methods or isinstance(methods['execute'], types.FunctionType):
raise InterfaceError("The class definition for " + clsname + " does not define a static 'provenance' method.")
return super(MetaAlgorithm, cls).__new__(cls, clsname, bases, dict(dct.items()))
class Algorithm(metaclass=MetaAlgorithm):
__dml__ = True
"""
An environment error occurs if a user of the library tries running a script
that loads the library in an environment that does not provide the appropriate
configuration and credentials files for a Data Mechanics platform instance.
"""
class EnvironmentError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
"""
Process the command line parameters supplied to the script loading
this module.
"""
class Parameters():
def __init__(self, arguments):
self.trial = ('--trial' in arguments) or ('--t' in arguments)
parameters = Parameters(sys.argv[1:])
options = parameters # Public synonym.
"""
We check that the environment provides an appropriate configuration file and
an appropriate authentication credentials file for third-party services.
"""
pathToConfig = "../config.json"
if not os.path.isfile(pathToConfig):
pathToConfig = "config.json"
if not os.path.isfile(pathToConfig):
raise EnvironmentError(\
"No valid configuration file found at '"\
+ "../config.json" + " or " + "config.json"\
+ "'. All scripts must be located within an immediate "\
+ "subdirectory of the platform instance root directory."\
)
pathToAuth = "../auth.json"
if not os.path.isfile(pathToAuth):
pathToAuth = "auth.json"
if not os.path.isfile(pathToAuth):
raise EnvironmentError(\
"No valid credentials file found at '"\
+ "../auth.json" + " or " + "auth.json"\
+ "'. All scripts must be located within an immediate "\
+ "subdirectory of the platform instance root directory."\
)
auth = json.loads(open(pathToAuth).read())
"""
Extend the PyMongo database.Database class with customized
methods for creating and dropping collections within repositories.
"""
def customElevatedCommand(db, f, arg, op = None):
"""
Wrapper to create custom commands for managing the repository that
require temporary elevation to an authenticated account with higher
privileges.
"""
config = json.loads(open(pathToConfig).read())
user = db.command({"connectionStatus":1})['authInfo']['authenticatedUsers'][0]['user']
if op != 'record' and arg.split(".")[0] != user:
arg = user + '.' + arg
db.logout()
db.authenticate(config['admin']['name'], config['admin']['pwd'])
result = f(arg, user, user)
db.logout()
db.authenticate(user, user)
return result
def createTemporary(self, name):
"""
Wrapper for creating a temporary repository collection
that can be removed after a particular computation is complete.
"""
return customElevatedCommand(self, self.system_js.createCollection, name)
def createPermanent(self, name):
"""
Wrapper for creating a repository collection that should remain
after it is derived.
"""
return customElevatedCommand(self, self.system_js.createCollection, name)
def dropTemporary(self, name):
"""
Wrapper for removing a temporary repository collection.
"""
return customElevatedCommand(self, self.system_js.dropCollection, name)
def dropPermanent(self, name):
"""
Wrapper for removing a permanent repository collection.
"""
return customElevatedCommand(self, self.system_js.dropCollection, name)
def record(self, raw):
"""
Wrapper for recording a provenance document. Since MongoDB
does not support fields with the reserved "$" character, we
replace this character with "@".
"""
raw = raw.replace('"$"', '"@"')
return customElevatedCommand(self, self.system_js.record, raw, 'record')
def metadata(self, obj = None):
if obj is None:
return self.database[self.name + '.metadata'].find_one()
else:
return self.database[self.name + '.metadata'].insert_one(obj)
"""
We extend the pymongo Database class with the additional methods
defined above.
"""
pymongo.database.Database.createTemporary = createTemporary
pymongo.database.Database.createTemp = createTemporary
pymongo.database.Database.createPermanent = createPermanent
pymongo.database.Database.createPerm = createPermanent
pymongo.database.Database.dropTemporary = dropTemporary
pymongo.database.Database.dropTemp = dropTemporary
pymongo.database.Database.dropPermanent = dropPermanent
pymongo.database.Database.dropPerm = dropPermanent
pymongo.database.Database.record = record
pymongo.database.Database.createCollection = createPermanent
pymongo.database.Database.dropCollection = dropPermanent
pymongo.collection.Collection.metadata = metadata
##eof | mit | -5,271,523,373,676,222,000 | 37.803109 | 131 | 0.667468 | false |
DirectlineDev/django-robokassa-merchant | robokassa_merchant/conf.py | 1 | 1761 | # -*- coding: utf-8 -*-
from django.conf import settings
class Conf:
""" Класс конфигурации для робокассы, берёт настройки из settings.ROBOKASSA_CONF
"""
# todo: в большинстве случаев 1 магазин на 1 сайт - сделать необязательным параметр token
# обязательные параметры - реквизиты магазина
LOGIN = ''
PASSWORD1 = ''
PASSWORD2 = ''
# url, по которому будет идти отправка форм
FORM_TARGET = 'https://merchant.roboxchange.com/Index.aspx'
# использовать ли метод POST при приеме результатов
USE_POST = True
# требовать предварительного уведомления на ResultURL
STRICT_CHECK = True
# тестовый режим
TEST_MODE = False
# список пользовательских параметров ("shp" к ним приписывать не нужно)
EXTRA_PARAMS = []
def __init__(self, token):
if token not in settings.ROBOKASSA_CONF:
raise ValueError('Can not find "{}" in settings.ROBOKASSA_CONF'.format(token))
config = settings.ROBOKASSA_CONF[token]
self.LOGIN = config['ROBOKASSA_LOGIN']
self.PASSWORD1 = config['ROBOKASSA_PASSWORD1']
self.PASSWORD2 = config.get('ROBOKASSA_PASSWORD2', None)
self.USE_POST = config.get('ROBOKASSA_USE_POST', True)
self.STRICT_CHECK = config.get('ROBOKASSA_STRICT_CHECK', True)
self.TEST_MODE = config.get('ROBOKASSA_TEST_MODE', False)
self.EXTRA_PARAMS = sorted(config.get('ROBOKASSA_EXTRA_PARAMS', []))
| mit | 460,906,213,722,533,950 | 35.05 | 93 | 0.670596 | false |
jeffmacinnes/pyneal | pyneal_scanner/utils/Siemens_utils.py | 1 | 30222 | """ Set of classes and methods specific to Siemens scanning environments
"""
from __future__ import print_function
from __future__ import division
import os
from os.path import join
import sys
import time
import re
import json
import glob
import logging
from threading import Thread
from queue import Queue
import numpy as np
import pydicom
import nibabel as nib
from nibabel.nicom import dicomreaders
import zmq
# regEx for Siemens style file naming
Siemens_filePattern = re.compile('\d{3}_\d{6}_\d{6}.dcm')
# regEx for pulling the volume field out of the mosaic file name
Siemens_mosaicVolumeNumberField = re.compile('(?<=\d{6}_)\d{6}')
Siemens_mosaicSeriesNumberField = re.compile('(?<=\d{3}_)\d{6}(?=_\d{6}.dcm)')
class Siemens_DirStructure():
""" Finding the names and paths of series directories in a Siemens scanning
environment.
In Siemens environments, using the ideacmdtool, the scanner is set up to
export data in real-time to a shared directory that is accessible from a
remote workstation (running Pyneal Scanner). For functional data, Siemens
scanners store reconstructed slices images by taking all of the slices for
a single volume, and placing them side-by-side in a larger "mosaic" dicom
image. A scan will produce one mosaic image per volume.
For anatomical data, dicom images for each 2D slice will be written as
separate files, numbered sequentially, and saved in the `sessionDir`.
All dicom images for all scans across a single session will be stored in
the same directory. We'll call this directory the `sessionDir`.
A single `sessionDir` will hold all of the mosaic files for all of the
series for the current session. The series number is contained in the
filename, which follows the pattern:
[session#]_[series#]_[vol#].dcm
These files will appear in real-time as the scan progresses.
This class contains methods to retrieve the current `sessionDir`, show the
current series that are present, and monitor the `sessionDir` for the
appearance of new series files.
"""
def __init__(self, scannerSettings):
""" Initialize the class
Parameters
----------
scannerSettings : object
class attributes represent all of the settings unique to the
current scanning environment (many of them read from
`scannerConfig.yaml`)
See Also
--------
general_utils.ScannerSettings
"""
# initialize class attributes
if 'scannerSessionDir' in scannerSettings.allSettings:
self.sessionDir = scannerSettings.allSettings['scannerSessionDir']
else:
print('No scannerSessionDir found in scannerConfig file')
sys.exit()
def print_currentSeries(self):
""" Find all of the series present in given sessionDir, and print them
all, along with time since last modification, and directory size
"""
# find the sessionDir, if not already found
if self.sessionDir is None:
self.findSessionDir()
print('Session Dir: ')
print('{}'.format(self.sessionDir))
# find all mosaic files in the sessionDir
self.uniqueSeries = self.getUniqueSeries()
if len(self.uniqueSeries) == 0:
print('No mosaic files found in {}'.format(self.sessionDir))
else:
# print out info on each unique series in sessionDir
currentTime = int(time.time())
print('Unique Series: ')
for series in sorted(self.uniqueSeries):
# get list of all dicoms that match this series number
thisSeriesDicoms = glob.glob(join(self.sessionDir, ('*_' + series + '_*.dcm')))
# get time since last modification for last dicom in list
lastModifiedTime = os.stat(thisSeriesDicoms[-1]).st_mtime
timeElapsed = currentTime - lastModifiedTime
m, s = divmod(timeElapsed, 60)
time_string = '{} min, {} s ago'.format(int(m), int(s))
print(' {}\t{} files \t{}'.format(series, len(thisSeriesDicoms), time_string))
def getUniqueSeries(self):
""" Return a list of unique series numbers from the filenames of the
files found in the sessionDir
"""
uniqueSeries = []
self.allMosaics = [f for f in os.listdir(self.sessionDir) if Siemens_filePattern.match(f)]
if len(self.allMosaics) > 0:
# find unique series numbers among all mosaics
seriesNums = []
for f in self.allMosaics:
seriesNums.append(Siemens_mosaicSeriesNumberField.search(f).group())
uniqueSeries = set(seriesNums)
return uniqueSeries
def waitForNewSeries(self, interval=.1):
""" Listen for the appearance of new series files
Once a scan starts, new series mosaic files will be created in the
`sessionDir`. By the time this function is called, this class should
already have the `sessionDir` defined
Parameters
----------
interval : float, optional
time, in seconds, to wait between polling for a new directory
Returns
-------
newSeries : string
seriesNum of the new series
"""
keepWaiting = True
existingSeries = self.getUniqueSeries()
while keepWaiting:
# get all of the unique series again
currentSeries = self.getUniqueSeries()
# compare against existing series
diff = currentSeries - existingSeries
if len(diff) > 0:
newSeries = diff.pop()
keepWaiting = False
# pause before searching directories again
time.sleep(interval)
# return the found series name
return newSeries
class Siemens_BuildNifti():
""" Tools to build a 3D or 4D Nifti image from all of the dicom mosaic
images in a directory.
Input is a path to a series directory containing dicom images (either
mosaic images for functional data, or 2D slice image for anatomical data).
Image parameters, like voxel spacing and dimensions, are obtained
automatically from the info in the dicom tags
End result is a Nifti1 formatted 3D (anat) or 4D (func) file in RAS+
orientation
"""
def __init__(self, seriesDir, seriesNum):
""" Initialize class, and set/obtain basic class attributes like file
paths and scan parameters
Parameters
----------
seriesDir : string
full path to the directory containing the raw dicom mosaic files
for each volume in the series
seriesNum : string
series number of the series that you'd like to build the nifti
image from
"""
# initialize attributes
self.seriesDir = seriesDir
self.seriesNum = seriesNum
self.niftiImage = None
# make a list of the specified raw dicom mosaic files in this dir
rawDicoms = glob.glob(join(self.seriesDir, ('*_' + str(self.seriesNum).zfill(6) + '_*.dcm')))
# figure out what type of image this is, 4d or 3d
self.scanType = self._determineScanType(rawDicoms[0])
# build the nifti image
if self.scanType == 'anat':
self.niftiImage = self.buildAnat(rawDicoms)
elif self.scanType == 'func':
self.niftiImage = self.buildFunc(rawDicoms)
def buildAnat(self, dicomFiles):
""" Build a 3D structural/anatomical image from list of dicom files
Given a list of `dicomFiles`, build a 3D anatomical image from them.
Figure out the image dimensions and affine transformation to map
from voxels to mm from the dicom tags
Parameters
----------
dicomFiles : list
list containing the file names (file names ONLY, no path) of all
dicom slice images to be used in constructing the final nifti image
Returns
-------
anatImage_RAS : Nifti1Image
nifti-1 formated image of the 3D anatomical data, oriented in
RAS+
See Also
--------
nibabel.nifti1.Nifti1Image()
"""
# read the first dicom in the list to get overall image dimensions
dcm = pydicom.dcmread(join(self.seriesDir, dicomFiles[0]), stop_before_pixels=1)
sliceDims = (getattr(dcm, 'Columns'), getattr(dcm, 'Rows'))
self.nSlicesPerVol = len(dicomFiles)
sliceThickness = getattr(dcm, 'SliceThickness')
### Build 3D array of voxel data
# create an empty array to store the slice data
imageMatrix = np.zeros(shape=(
sliceDims[0],
sliceDims[1],
self.nSlicesPerVol), dtype='int16')
# Use the InstanceNumber tag to order the slices. This works for anat
# 3D images only, since the instance numbers do not repeat as they would
# with functional data with multiple volumes
sliceDict = {}
for s in dicomFiles:
dcm = pydicom.dcmread(join(self.seriesDir, s))
sliceDict[dcm.InstanceNumber] = join(self.seriesDir, s)
# sort by InStackPositionNumber and assemble the image
for sliceIdx, ISPN in enumerate(sorted(sliceDict.keys())):
dcm = pydicom.dcmread(sliceDict[ISPN])
# extract the pixel data as a numpy array. Transpose
# so that the axes order go [cols, rows]
pixel_array = dcm.pixel_array.T
# place in the image matrix
imageMatrix[:, :, sliceIdx] = pixel_array
### create the affine transformation to map from vox to mm space
# in order to do this, we need to get some values from the first and
# last slices in the volume.
firstSlice = sliceDict[sorted(sliceDict.keys())[0]]
lastSlice = sliceDict[sorted(sliceDict.keys())[-1]]
dcm_first = pydicom.dcmread(firstSlice)
dcm_last = pydicom.dcmread(lastSlice)
self.pixelSpacing = getattr(dcm_first, 'PixelSpacing')
self.firstSlice_IOP = np.array(getattr(dcm_first, 'ImageOrientationPatient'))
self.firstSlice_IPP = np.array(getattr(dcm_first, 'ImagePositionPatient'))
self.lastSlice_IPP = np.array(getattr(dcm_last, 'ImagePositionPatient'))
# now we can build the affine
affine = self.buildAffine()
### Build a Nifti object, reorder it to RAS+
anatImage = nib.Nifti1Image(imageMatrix, affine=affine)
anatImage_RAS = nib.as_closest_canonical(anatImage) # reoder to RAS+
print('Nifti image dims: {}'.format(anatImage_RAS.shape))
return anatImage_RAS
def buildFunc(self, dicomFiles):
""" Build a 4D functional image from list of dicom files
Given a list of dicomFile paths, build a 4d functional image. For
Siemens scanners, each dicom file is assumed to represent a mosaic
image comprised of mulitple slices. This tool will split apart the
mosaic images, and construct a 4D nifti object. The 4D nifti object
contain a voxel array ordered like RAS+ as well the affine
transformation to map between vox and mm space
Parameters
----------
dicomFiles : list
list containing the file names (file names ONLY, no path) of all
dicom mosaic images to be used in constructing the final nifti
image
"""
imageMatrix = None
affine = None
TR = None
# make dicomFiles store the full path
dicomFiles = [join(self.seriesDir, f) for f in dicomFiles]
### Loop over all dicom mosaic files
nVols = len(dicomFiles)
for mosaic_dcm_fname in dicomFiles:
### Parse the mosaic image into a 3D volume
# we use the nibabel mosaic_to_nii() method which does a lot of the
# heavy-lifting of extracting slices, arranging in a 3D array, and
# grabbing the affine
dcm = pydicom.dcmread(mosaic_dcm_fname) # create dicom object
# for mosaic files, the instanceNumber tag will correspond to the
# volume number (using a 1-based indexing, so subtract by 1)
volIdx = dcm.InstanceNumber - 1
# convert the dicom object to nii
thisVol = dicomreaders.mosaic_to_nii(dcm)
# convert to RAS+
thisVol_RAS = nib.as_closest_canonical(thisVol)
if TR is None:
TR = dcm.RepetitionTime / 1000
# construct the imageMatrix if it hasn't been made yet
if imageMatrix is None:
imageMatrix = np.zeros(shape=(thisVol_RAS.shape[0],
thisVol_RAS.shape[1],
thisVol_RAS.shape[2],
nVols), dtype=np.uint16)
# construct the affine if it isn't made yet
if affine is None:
affine = thisVol_RAS.affine
# Add this data to the image matrix
imageMatrix[:, :, :, volIdx] = thisVol_RAS.get_fdata()
### Build a Nifti object
funcImage = nib.Nifti1Image(imageMatrix, affine=affine)
pixDims = np.array(funcImage.header.get_zooms())
pixDims[3] = TR
funcImage.header.set_zooms(pixDims)
return funcImage
def buildAffine(self):
""" Build the affine matrix that will transform the data to RAS+.
This function should only be called once the required data has been
extracted from the dicom tags from the relevant slices. The affine
matrix is constructed by using the information in the
ImageOrientationPatient and ImagePositionPatient tags from the first
and last slices in a volume.
However, note that those tags will tell you how to orient the image to
DICOM reference coordinate space, which is LPS+. In order to to get to
RAS+ we have to invert the first two axes.
Notes
-----
For more info on building this affine, please see the documentation at:
http://nipy.org/nibabel/dicom/dicom_orientation.html
http://nipy.org/nibabel/coordinate_systems.html
"""
### Get the ImageOrientation values from the first slice,
# split the row-axis values (0:3) and col-axis values (3:6)
# and then invert the first and second values of each
rowAxis_orient = self.firstSlice_IOP[0:3] * np.array([-1, -1, 1])
colAxis_orient = self.firstSlice_IOP[3:6] * np.array([-1, -1, 1])
### Get the voxel size along Row and Col axis
voxSize_row = float(self.pixelSpacing[0])
voxSize_col = float(self.pixelSpacing[1])
### Figure out the change along the 3rd axis by subtracting the
# ImagePosition of the last slice from the ImagePosition of the first,
# then dividing by 1/(total number of slices-1), then invert to
# make it go from LPS+ to RAS+
slAxis_orient = (self.firstSlice_IPP - self.lastSlice_IPP) / (1 - self.nSlicesPerVol)
slAxis_orient = slAxis_orient * np.array([-1, -1, 1])
### Invert the first two values of the firstSlice ImagePositionPatient.
# This tag represents the translation needed to take the origin of our 3D voxel
# array to the origin of the LPS+ reference coordinate system. Since we want
# RAS+, need to invert those first two axes
voxTranslations = self.firstSlice_IPP * np.array([-1, -1, 1])
### Assemble the affine matrix
affine = np.matrix([
[rowAxis_orient[0] * voxSize_row, colAxis_orient[0] * voxSize_col, slAxis_orient[0], voxTranslations[0]],
[rowAxis_orient[1] * voxSize_row, colAxis_orient[1] * voxSize_col, slAxis_orient[1], voxTranslations[1]],
[rowAxis_orient[2] * voxSize_row, colAxis_orient[2] * voxSize_col, slAxis_orient[2], voxTranslations[2]],
[0, 0, 0, 1]
])
return affine
def _determineScanType(self, dicomFile):
""" Figure out what type of scan this is, anat or func
This tool will determine the scan type from a given dicom file.
Possible scan types are either single 3D volume (anat), or a 4D dataset
built up of 2D slices (func). The scan type is determined by reading
the `MRAcquisitionType` tag from the dicom file
Parameters
----------
dcmFile : string
file name of dicom file from the current series that you would like
to open to read the imaging parameters from
Returns
-------
scanType : string
either 'anat' or 'func' depending on scan type stored in dicom tag
"""
# read the dicom file
dcm = pydicom.dcmread(join(self.seriesDir, dicomFile), stop_before_pixels=1)
if getattr(dcm, 'MRAcquisitionType') == '3D':
scanType = 'anat'
elif getattr(dcm, 'MRAcquisitionType') == '2D':
scanType = 'func'
else:
print('Cannot determine a scan type from this image!')
sys.exit()
return scanType
def get_scanType(self):
""" Return the scan type """
return self.scanType
def get_niftiImage(self):
""" Return the constructed Nifti Image """
return self.niftiImage
def write_nifti(self, output_path):
""" Write the nifti file to disk
Parameters
----------
outputPath : string
full path, including filename, you want to use to save the nifti
image
"""
nib.save(self.niftiImage, output_path)
print('Image saved at: {}'.format(output_path))
class Siemens_monitorSessionDir(Thread):
""" Class to monitor for new mosaic images to appear in the sessionDir.
This class will run independently in a separate thread. Each new mosaic
file that appears and matches the current series number will be added to
the Queue for further processing
"""
def __init__(self, sessionDir, seriesNum, dicomQ, interval=.2):
""" Initialize the class, and set basic class attributes
Parameters
----------
sessionDir : string
full path to the session directory where new dicom mosaic files
will appear
seriesNum : string
series number assigned to the new series
dicomQ : object
instance of python queue class to hold new dicom files before they
have been processed. This class will add items to that queue.
interval : float, optional
time, in seconds, to wait before repolling the seriesDir to check
for any new files
"""
# start the thread upon completion
Thread.__init__(self)
# set up logger
self.logger = logging.getLogger(__name__)
# initialize class parameters
self.interval = interval # interval for polling for new files
self.sessionDir = sessionDir # full path to series directory
self.seriesNum = seriesNum # series number of current series
self.dicomQ = dicomQ # queue to store dicom mosaic files
self.alive = True # thread status
self.numMosaicsAdded = 0 # counter to keep track of # mosaics
self.queued_mosaic_files = set() # empty set to store names of queued mosaic
def run(self):
# function that runs while the Thread is still alive
while self.alive:
# create a set of all mosaic files with the current series num
#currentMosaics = set(os.listdir(self.seriesDir))
currentMosaics = set(glob.glob(join(self.sessionDir, ('*_' + str(self.seriesNum).zfill(6) + '_*.dcm'))))
# grab only the ones that haven't already been added to the queue
newMosaics = [f for f in currentMosaics if f not in self.queued_mosaic_files]
# loop over each of the new mosaic files, add each to queue
for f in newMosaics:
mosaic_fname = join(self.sessionDir, f)
try:
self.dicomQ.put(mosaic_fname)
except:
self.logger.error('failed on: {}'.format(mosaic_fname))
print(sys.exc_info())
sys.exit()
if len(newMosaics) > 0:
self.logger.debug('Put {} new mosaic file on the queue'.format(len(newMosaics)))
self.numMosaicsAdded += len(newMosaics)
# now update the set of mosaics added to the queue
self.queued_mosaic_files.update(set(newMosaics))
# pause
time.sleep(self.interval)
def get_numMosaicsAdded(self):
""" Return the cumulative number of mosaic files added to the queue thus far """
return self.numMosaicsAdded
def stop(self):
""" Set the `alive` flag to False, stopping thread """
self.alive = False
class Siemens_processMosaic(Thread):
""" Class to process each mosaic file in the queue.
This class will run in a separate thread. While running, it will pull
'tasks' off of the queue and process each one. Processing each task
involves reading the mosaic file, converting it to a 3D Nifti object,
reordering it to RAS+, and then sending the volume out over the
pynealSocket
"""
def __init__(self, dicomQ, pynealSocket, interval=.2):
""" Initialize the class
Parameters
----------
dicomQ : object
instance of python queue class that will store the dicom slice file
names. This class will pull items from that queue.
pynealSocket : object
instance of ZMQ style socket that will be used to communicate with
Pyneal. This class will use this socket to send image data and
headers to Pyneal during the real-time scan.
See also: general_utils.create_pynealSocket()
interval : float, optional
time, in seconds, to wait before repolling the queue to see if
there are any new file names to process
"""
# start the threat upon creation
Thread.__init__(self)
# set up logger
self.logger = logging.getLogger(__name__)
# initialize class parameters
self.dicomQ = dicomQ
self.interval = interval # interval between polling queue for new files
self.alive = True
self.pynealSocket = pynealSocket
self.totalProcessed = 0 # counter for total number of slices processed
def run(self):
self.logger.debug('Siemens_processMosaic started')
# function to run on loop
while self.alive:
# if there are any mosaic files in the queue, process them
if not self.dicomQ.empty():
numMosaicsInQueue = self.dicomQ.qsize()
# loop through all mosaics currently in queue & process
for m in range(numMosaicsInQueue):
# retrieve file name from queue
mosaic_dcm_fname = self.dicomQ.get(True, 2)
# ensure the file has copied completely
file_size = 0
while True:
file_info = os.stat(mosaic_dcm_fname)
if file_info.st_size == 0 or file_info.st_size > file_size:
file_size = file_info.st_size
else:
break
# process this mosaic
self.processMosaicFile(mosaic_dcm_fname)
# complete this task, thereby clearing it from the queue
self.dicomQ.task_done()
# log how many were processed
self.totalProcessed += numMosaicsInQueue
self.logger.debug('Processed {} tasks from the queue ({} total)'.format(numMosaicsInQueue, self.totalProcessed))
# pause for a bit
time.sleep(self.interval)
def processMosaicFile(self, mosaic_dcm_fname):
""" Process a given mosaic dicom file
This method will read the dicom mosaic file. Convert to a nifti object
that will provide the 3D voxel array for this mosaic. Reorder to RAS+,
and then send to the pynealSocket
Parameters
----------
mosaic_dcm_fname : string
full path to the dicom mosaic file that you want to process
"""
### Figure out the volume index for this mosaic by reading
# the field from the file name itself
mosaicFile_root, mosaicFile_name = os.path.split(mosaic_dcm_fname)
volIdx = int(Siemens_mosaicVolumeNumberField.search(mosaicFile_name).group(0)) - 1
self.logger.info('Volume {} processing'.format(volIdx))
### Parse the mosaic image into a 3D volume
# we use the nibabel mosaic_to_nii() method which does a lot of the
# heavy-lifting of extracting slices, arranging in a 3D array, and
# grabbing the affine
dcm = pydicom.dcmread(mosaic_dcm_fname) # create dicom object
thisVol = dicomreaders.mosaic_to_nii(dcm) # convert to nifti
# convert to RAS+
thisVol_RAS = nib.as_closest_canonical(thisVol)
# get the data as a contiguous array (required for ZMQ)
thisVol_RAS_data = np.ascontiguousarray(thisVol_RAS.get_fdata())
### Create a header with metadata info
volHeader = {
'volIdx': volIdx,
'dtype': str(thisVol_RAS_data.dtype),
'shape': thisVol_RAS_data.shape,
'affine': json.dumps(thisVol_RAS.affine.tolist()),
'TR': str(dcm.RepetitionTime / 1000)}
### Send the voxel array and header to the pynealSocket
self.sendVolToPynealSocket(volHeader, thisVol_RAS_data)
def sendVolToPynealSocket(self, volHeader, voxelArray):
""" Send the volume data to Pyneal
Send the image data and header information for the specified volume to
Pyneal via the `pynealSocket`.
Parameters
----------
volHeader : dict
key:value pairs for all of the relevant metadata for this volume
voxelArray : numpy array
3D numpy array of voxel data from the volume, reoriented to RAS+
"""
self.logger.debug('TO pynealSocket: vol {}'.format(volHeader['volIdx']))
### Send data out the socket, listen for response
self.pynealSocket.send_json(volHeader, zmq.SNDMORE) # header as json
self.pynealSocket.send(voxelArray, flags=0, copy=False, track=False)
pynealSocketResponse = self.pynealSocket.recv_string()
# log the success
self.logger.debug('FROM pynealSocket: {}'.format(pynealSocketResponse))
# check if that was the last volume, and if so, stop
if 'STOP' in pynealSocketResponse:
self.stop()
def stop(self):
""" set the `alive` flag to False, stopping the thread """
self.alive = False
def Siemens_launch_rtfMRI(scannerSettings, scannerDirs):
""" Launch a real-time session in a Siemens environment.
This method should be called from pynealScanner.py before starting the
scanner. Once called, this method will take care of:
- monitoring the sessionDir for new series files to appear (and
then returing the new series number)
- set up the socket connection to send volume data over
- creating a Queue to store newly arriving DICOM files
- start a separate thread to monitor the new series appearing
- start a separate thread to process DICOMs that are in the Queue
"""
# Create a reference to the logger. This assumes the logger has already
# been created and customized by pynealScanner.py
logger = logging.getLogger(__name__)
#### SET UP PYNEAL SOCKET (this is what we'll use to
#### send data (e.g. header, volume voxel data) to remote connections)
# figure out host and port number to use
host = scannerSettings.get_pynealSocketHost()
port = scannerSettings.get_pynealSocketPort()
logger.debug('Scanner Socket Host: {}'.format(host))
logger.debug('Scanner Socket Port: {}'.format(port))
# create a socket connection
from .general_utils import create_pynealSocket
pynealSocket = create_pynealSocket(host, port)
logger.debug('Created pynealSocket')
# wait for remote to connect on pynealSocket
logger.info('Connecting to pynealSocket...')
while True:
msg = 'hello from pyneal_scanner '
pynealSocket.send_string(msg)
msgResponse = pynealSocket.recv_string()
if msgResponse == msg:
break
logger.info('pynealSocket connected')
### Wait for a new series directory appear
logger.info('Waiting for new series files to appear...')
seriesNum = scannerDirs.waitForNewSeries()
logger.info('New Series Number: {}'.format(seriesNum))
### Start threads to A) watch for new mosaic files, and B) process
# them as they appear
# initialize the dicom queue to keep store newly arrived
# dicom mosaic images, and keep track of which have been processed
dicomQ = Queue()
# create instance of class that will monitor sessionDir for new mosaic
# images to appear. Pass in a copy of the dicom queue. Start the thread
scanWatcher = Siemens_monitorSessionDir(scannerDirs.sessionDir, seriesNum, dicomQ)
scanWatcher.start()
# create an instance of the class that will grab mosaic dicoms
# from the queue, reformat the data, and pass over the socket
# to pyneal. Start the thread going
mosaicProcessor = Siemens_processMosaic(dicomQ, pynealSocket)
mosaicProcessor.start()
| mit | 7,839,139,264,983,837,000 | 38.713535 | 128 | 0.623552 | false |
rnelson/adventofcode | advent2015/day08.py | 1 | 2659 | #!/usr/bin/env python
"""
http://adventofcode.com/day/8
Part 1
------
Space on the sleigh is limited this year, and so Santa will be
bringing his list as a digital copy. He needs to know how much
space it will take up when stored.
It is common in many programming languages to provide a way to
escape special characters in strings. For example, C, JavaScript,
Perl, Python, and even PHP handle special characters in very
similar ways.
However, it is important to realize the difference between the
number of characters in the code representation of the string
literal and the number of characters in the in-memory string
itself.
(examples removed because the interpreter was complaining
about the escaping - ha)
Disregarding the whitespace in the file, what is the number of
characters of code for string literals minus the number of characters
in memory for the values of the strings in total for the entire file?
For example, given the four strings above, the total number of
characters of string code (2 + 5 + 10 + 6 = 23) minus the total
number of characters in memory for string values (0 + 3 + 7 +
1 = 11) is 23 - 11 = 12.
Part 2
------
Now, let's go the other way. In addition to finding the number of
characters of code, you should now encode each code representation
as a new string and find the number of characters of the new encoded
representation, including the surrounding double quotes.
(examples removed because the interpreter was complaining
about the escaping - ha)
Your task is to find the total number of characters to represent
the newly encoded strings minus the number of characters of code in
each original string literal. For example, for the strings above,
the total encoded length (6 + 9 + 16 + 11 = 42) minus the characters
in the original code representation (23, just like in the first
part of this puzzle) is 42 - 23 = 19.
"""
from __future__ import print_function
import os
import re
import sys
INFILE = 'inputs/input08.txt'
def main():
total_length = 0
unescaped_length = 0
escaped_length = 0
with open(INFILE) as f:
# Part 1
for line in f:
input = line.strip()
total_length += len(input)
unescaped = input[1:-1].decode('string_escape')
unescaped_length += len(unescaped)
escaped = '"{}"'.format(re.escape(input))
escaped_length += len(escaped)
msg = '[Python] Puzzle 8-1: {}'
print(msg.format(total_length - unescaped_length))
# Part 2
msg = '[Python] Puzzle 8-2: {}'
print(msg.format(escaped_length - total_length))
if __name__ == '__main__':
main()
| mit | 7,221,743,905,681,149,000 | 29.918605 | 69 | 0.703272 | false |
bfollinprm/Nquintessence | cosmoslik/cosmoslik_plugins/likelihoods/wmap/wmap.py | 1 | 2594 | from numpy import zeros
from cosmoslik import Likelihood, SubprocessExtension
import os
class wmap(Likelihood):
"""
===============
WMAP Likelihood
===============
- Written by WMAP team (see `<http://lambda.gsfc.nasa.gov/>`_)
- CosmoSlik module by Marius Millea
- Updated July 1, 2012
Description
===========
This module wraps the official WMAP likelihood code.
Some minor modifications were made to allow:
- Choosing the WMAP data directory at runtime
- Choosing the lmin/lmax at runtime
Install Notes
=============
This build this module run::
./cosmoslik.py --build likelihoods.wmap
The Makefile for this module reads the following flags from ``Makefile.inc``:
- ``$(CFITSIO)``
- ``$(LAPACK)``
- ``$(F2PYFLAGS)``
Models
======
The WMAP module requires a `Model` which provides the following:
- ``cl_TT``
- ``cl_TE``
- ``cl_EE``
- ``cl_BB``
Extra-galactic foregrounds are ignored.
Parameters
==========
This module reads the following parameters from the ini file:
[wmap].data_dir
---------------
The path to the wmap/data directory.
[wmap].use
----------
A subset of ``['TT','TE','EE','BB']`` corresponding to
which likelihood terms to use.
[wmap].TT.lrange
----------------
The TT range in ell to use in the likelihood
[wmap].TE.lrange
----------------
The TE range in ell to use in the likelihood
"""
def __init__(self,
datadir,
use=['TT','TE','EE','BB'],
ttmin=2,ttmax=1200,
temin=2,temax=800):
self.use = use
if not os.path.exists(datadir): raise Exception("The WMAP data directory you specified does not exist: '%s'"%datadir)
self.pywmap = SubprocessExtension('pywmap',globals())
self.pywmap.wmapinit(ttmin,ttmax,temin,temax,os.path.normpath(datadir)+'/')
def __call__(self, cmb):
cltt, clte, clee, clbb = [zeros(1202) for _ in range(4)]
for cl,x in zip([cltt,clte,clee,clbb],['TT','TE','EE','BB']):
if x in self.use:
m = cmb['cl_%s'%x]
s = slice(0,min(len(m),len(cl)))
cl[s] = m[s]
liketerms = self.pywmap.wmaplnlike(cltt=cltt[2:],clte=clte[2:],clee=clee[2:],clbb=clbb[2:])
return sum(liketerms)
| mit | -6,767,131,946,119,144,000 | 24.683168 | 125 | 0.515806 | false |
yaricom/brainhash | src/experiment_cA7_dt_th_al_ah_bl_bh_gl.py | 1 | 2069 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The experiment with 10 Hz/5Hz, wisp, attention, 70, cA 7, delta, theta, alpha low, alpha high, beta low, beta high, gamm low, batch size = 5 and
balanced data set
@author: yaric
"""
import experiment as ex
import config
from time import time
n_hidden = 7
batch_size = 5
experiment_name = 'cA_%d_dt-th-a_l-a_h-b_l-b_h-g_l' % (n_hidden) # will be used as parent dir for analyzer results
# The sample records identifiers
signal_ids = ['IO_10_2', 'IO_TXT', 'IO_SKY', 'KS_10_2', 'RO_10_2']
noise_ids = ['noise']
# Setup analyzer configuration
analyzer_config = ex.defaultAnalyzerConfig()
analyzer_config['batch_size'] = batch_size
analyzer_config['learning_rate'] = 0.1
analyzer_config['n_hidden'] = n_hidden
analyzer_config['training_epochs'] = 50000
analyzer_config['encoder'] = 'cA'
analyzer_config['bands'] = 'delta,theta,alpha_l,alpha_h,beta_l,beta_h,gamma_l'
start = time()
#
# Run analyzer
#
print("\nStart analysis with parameters:\n%s\n" % analyzer_config)
print("Start analysis for signal records: %s" % signal_ids)
ex.runEEGAnalyzerWithIDs(ids_list=signal_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
print("Start analysis for noise records: %s" % noise_ids)
ex.runEEGAnalyzerWithIDs(ids_list=noise_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
#
# Run classifiers
#
signal_dir = "%s/%s" % (config.analyzer_out_dir, experiment_name)
noise_dir = "%s/%s/%s" % (config.analyzer_out_dir, experiment_name, noise_ids[0])
out_suffix = experiment_name
print("Run classifiers over analyzed records. \nSignal dir: %s\nNoise dir: %s"
% (signal_dir, noise_dir))
ex.runClassifier(signal_dir=signal_dir,
signal_records=signal_ids,
noise_dir=noise_dir,
out_suffix=out_suffix)
print("\n\nExperiment %s took %.2f seconds.\n"
% (experiment_name, time() - start))
| gpl-3.0 | 5,035,609,485,294,253,000 | 31.328125 | 145 | 0.640889 | false |
pbanaszkiewicz/amy | amy/extrequests/filters.py | 1 | 7324 | import re
from django.db.models import Q
from django.forms import widgets
import django_filters
from extrequests.models import SelfOrganisedSubmission, WorkshopInquiryRequest
from workshops.fields import Select2Widget
from workshops.filters import (
AllCountriesFilter,
AMYFilterSet,
ContinentFilter,
ForeignKeyAllValuesFilter,
NamesOrderingFilter,
StateFilterSet,
)
from workshops.models import Curriculum, Person, TrainingRequest, WorkshopRequest
# ------------------------------------------------------------
# TrainingRequest related filter and filter methods
# ------------------------------------------------------------
class TrainingRequestFilter(AMYFilterSet):
search = django_filters.CharFilter(
label="Name or Email",
method="filter_by_person",
)
group_name = django_filters.CharFilter(
field_name="group_name", lookup_expr="icontains", label="Group"
)
state = django_filters.ChoiceFilter(
label="State",
choices=(("no_d", "Pending or accepted"),) + TrainingRequest.STATE_CHOICES,
method="filter_training_requests_by_state",
)
matched = django_filters.ChoiceFilter(
label="Is Matched?",
choices=(
("", "Unknown"),
("u", "Unmatched"),
("p", "Matched trainee, unmatched training"),
("t", "Matched trainee and training"),
),
method="filter_matched",
)
nonnull_manual_score = django_filters.BooleanFilter(
label="Manual score applied",
method="filter_non_null_manual_score",
widget=widgets.CheckboxInput,
)
affiliation = django_filters.CharFilter(
method="filter_affiliation",
)
location = django_filters.CharFilter(lookup_expr="icontains")
order_by = NamesOrderingFilter(
fields=(
"created_at",
"score_total",
),
)
class Meta:
model = TrainingRequest
fields = [
"search",
"group_name",
"state",
"matched",
"affiliation",
"location",
]
def filter_matched(self, queryset, name, choice):
if choice == "":
return queryset
elif choice == "u": # unmatched
return queryset.filter(person=None)
elif choice == "p": # matched trainee, unmatched training
return (
queryset.filter(person__isnull=False)
.exclude(
person__task__role__name="learner",
person__task__event__tags__name="TTT",
)
.distinct()
)
else: # choice == 't' <==> matched trainee and training
return queryset.filter(
person__task__role__name="learner",
person__task__event__tags__name="TTT",
).distinct()
def filter_by_person(self, queryset, name, value):
if value == "":
return queryset
else:
# 'Harry Potter' -> ['Harry', 'Potter']
tokens = re.split(r"\s+", value)
# Each token must match email address or github username or
# personal, or family name.
for token in tokens:
queryset = queryset.filter(
Q(personal__icontains=token)
| Q(middle__icontains=token)
| Q(family__icontains=token)
| Q(email__icontains=token)
| Q(person__personal__icontains=token)
| Q(person__middle__icontains=token)
| Q(person__family__icontains=token)
| Q(person__email__icontains=token)
)
return queryset
def filter_affiliation(self, queryset, name, affiliation):
if affiliation == "":
return queryset
else:
q = Q(affiliation__icontains=affiliation) | Q(
person__affiliation__icontains=affiliation
)
return queryset.filter(q).distinct()
def filter_training_requests_by_state(self, queryset, name, choice):
if choice == "no_d":
return queryset.exclude(state="d")
else:
return queryset.filter(state=choice)
def filter_non_null_manual_score(self, queryset, name, manual_score):
if manual_score:
return queryset.filter(score_manual__isnull=False)
return queryset
# ------------------------------------------------------------
# WorkshopRequest related filter and filter methods
# ------------------------------------------------------------
class WorkshopRequestFilter(AMYFilterSet, StateFilterSet):
assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)
country = AllCountriesFilter(widget=Select2Widget)
continent = ContinentFilter(widget=Select2Widget, label="Continent")
requested_workshop_types = django_filters.ModelMultipleChoiceFilter(
label="Requested workshop types",
queryset=Curriculum.objects.all(),
widget=widgets.CheckboxSelectMultiple(),
)
order_by = django_filters.OrderingFilter(
fields=("created_at",),
)
class Meta:
model = WorkshopRequest
fields = [
"state",
"assigned_to",
"requested_workshop_types",
"country",
]
# ------------------------------------------------------------
# WorkshopInquiryRequest related filter and filter methods
# ------------------------------------------------------------
class WorkshopInquiryFilter(AMYFilterSet, StateFilterSet):
assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)
country = AllCountriesFilter(widget=Select2Widget)
continent = ContinentFilter(widget=Select2Widget, label="Continent")
requested_workshop_types = django_filters.ModelMultipleChoiceFilter(
label="Requested workshop types",
queryset=Curriculum.objects.all(),
widget=widgets.CheckboxSelectMultiple(),
)
order_by = django_filters.OrderingFilter(
fields=("created_at",),
)
class Meta:
model = WorkshopInquiryRequest
fields = [
"state",
"assigned_to",
"requested_workshop_types",
"country",
]
# ------------------------------------------------------------
# SelfOrganisedSubmission related filter and filter methods
# ------------------------------------------------------------
class SelfOrganisedSubmissionFilter(AMYFilterSet, StateFilterSet):
assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)
country = AllCountriesFilter(widget=Select2Widget)
continent = ContinentFilter(widget=Select2Widget, label="Continent")
workshop_types = django_filters.ModelMultipleChoiceFilter(
label="Requested workshop types",
queryset=Curriculum.objects.all(),
widget=widgets.CheckboxSelectMultiple(),
)
order_by = django_filters.OrderingFilter(
fields=("created_at",),
)
class Meta:
model = SelfOrganisedSubmission
fields = [
"state",
"assigned_to",
"workshop_types",
"workshop_format",
]
| mit | -316,208,959,969,467,600 | 31.264317 | 83 | 0.560213 | false |
jmcanterafonseca/fiware-cygnus | test/acceptance/tools/ckan_utils.py | 1 | 14123 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of fiware-cygnus (FI-WARE project).
#
# fiware-cygnus is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
# fiware-cygnus is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with fiware-cygnus. If not, see
# http://www.gnu.org/licenses/.
#
# For those usages not covered by the GNU Affero General Public License please contact:
# iot_support at tid.es
#
__author__ = 'Iván Arias León (ivan.ariasleon at telefonica dot com)'
# general constants
from tools import general_utils, http_utils
EMPTY = u''
WITHOUT = u'without'
# url, headers and payload constants
HEADER_AUTHORIZATION = u'authorization'
HEADER_CONTENT_TYPE = u'Content-Type'
HEADER_APPLICATION = u'application/json'
VERSION = u'ckan_version'
VERSION_VALUE_DEFAULT = u'2.0'
HOST = u'host'
HOST_VALUE_DEFAULT = u'127.0.0.1'
PORT = u'port'
PORT_VALUE_DEFAULT = u'80'
AUTHORIZATION = u'authorization'
VERIFY_VERSION = u'verify_version'
FALSE_VALUE = u'false'
ORION_URL = u'orion_url'
ORION_URL_DEFAULT = u'http://localhost:1026'
SSL = u'ssl'
RETRIES_DATASET_SEARCH = u'retries_dataset_search'
DELAY_TO_RETRY = u'delay_to_retry'
PATH_VERSION_CKAN = u'api/util/status'
PATH_API_CREATE = u'api/3/action'
PATH_PACKAGE_SHOW = u'package_show?id='
PATH_DSTORE_SEARCH_SQL = u'datastore_search_sql?sql='
ORGANIZATION_LIST = u'organization_list'
ORGANIZATION_CREATE = u'organization_create'
PACKAGE_CREATE = u'package_create'
RESOURCE_CREATE = u'resource_create'
DATASTORE_CREATE = u'datastore_create'
PACKAGE_SHOW = u'package_show'
DATASTORE_SEARCH_SQL = u'datastore_search_sql'
RESULT = u'result'
RECORDS = u'records'
NAME = u'name'
OWNER_ORG = u'owner_org'
ID = u'id'
TYPE = u'type'
RESOURCES = u'resources'
URL_EXAMPLE = u'http://foo.bar/newresource'
URL = u'url'
PACKAGE_ID = u'package_id'
RESOURCE_ID = u'resource_id'
FIELD = u'fields'
FORCE = u'force'
RECVTIME = u'recvTime'
TIMESTAMP = u'timestamp'
TRUE = u'true'
class Ckan:
def __init__(self, **kwargs):
"""
constructor
:param ckan_version: ckan version (OPTIONAL)
:param ckan_verify_version: determine whether the version is verified or not (True or False). (OPTIONAL)
:param authorization: API KEY (authorization) used in ckan requests (OPTIONAL)
:param host: ckan host (MANDATORY)
:param port: ckan port (MANDATORY)
:param orion_url: Orion URL used to compose the resource URL with the convenience operation URL to query it (OPTIONAL)
:param ssl: enable SSL for secure Http transportation; 'true' or 'false' (OPTIONAL)
:param capacity: capacity of the channel (OPTIONAL)
:param channel_transaction_capacity: amount of bytes that can be sent per transaction (OPTIONAL)
:param retries_number: number of retries when get values (OPTIONAL)
:param delay_to_retry: time to delay each retry (OPTIONAL)
endpoint_url: endpoint url used in ckan requests
"""
self.version = kwargs.get(VERSION, VERSION_VALUE_DEFAULT)
self.ckan_verify_version = kwargs.get(VERIFY_VERSION, FALSE_VALUE)
self.authorization = kwargs.get(AUTHORIZATION, EMPTY)
self.host = kwargs.get(HOST, HOST_VALUE_DEFAULT)
self.port = kwargs.get(PORT, PORT_VALUE_DEFAULT)
self.orion_url = kwargs.get(ORION_URL, ORION_URL_DEFAULT)
self.ssl = kwargs.get(SSL, FALSE_VALUE)
self.capacity = kwargs.get("capacity", "1000")
self.transaction_capacity= kwargs.get("transaction_capacity", "100")
self.retries_number = kwargs.get(RETRIES_DATASET_SEARCH, 15)
self.retry_delay = kwargs.get(DELAY_TO_RETRY, 10)
if self.ssl.lower() == "true":
self.endpoint = "https://"
if self.ssl.lower() == "false":
self.endpoint = "http://"
self.endpoint = self.endpoint + self.host+":"+self.port
def __create_url(self, operation, element=EMPTY):
"""
create the url for different operations
:param operation: operation type (dataset, etc)
:return: request url
"""
if operation == VERSION:
value = "%s/%s" % (self.endpoint, PATH_VERSION_CKAN)
if operation == ORGANIZATION_CREATE or operation == PACKAGE_CREATE or operation == RESOURCE_CREATE or operation == DATASTORE_CREATE or operation == ORGANIZATION_LIST:
value = "%s/%s/%s" % (self.endpoint, PATH_API_CREATE, operation) # organization Name
if operation == PACKAGE_SHOW:
value = "%s/%s/%s%s" % (self.endpoint, PATH_API_CREATE, PATH_PACKAGE_SHOW, element) # datasetName
if operation == DATASTORE_SEARCH_SQL:
value = "%s/%s/%s%s" % (self.endpoint, PATH_API_CREATE, PATH_DSTORE_SEARCH_SQL, element) # sql
return value
def __create_headers(self):
"""
create headers for different requests
:return header dict
"""
return {HEADER_AUTHORIZATION: self.authorization, HEADER_CONTENT_TYPE: HEADER_APPLICATION}
def __create_datastore_in_resource (self, resource_id, fields):
"""
create a datastore in a resource
:param resource_id: resource id
:param fields: field in datastore
"""
payload = general_utils.convert_dict_to_str({RESOURCE_ID: resource_id,
FIELD:fields,
FORCE: TRUE}, general_utils.JSON)
resp = http_utils.request(http_utils.POST, url=self.__create_url(DATASTORE_CREATE), headers=self.__create_headers(), data=payload)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - Creating datastore in resource id: %s" % (resource_id))
# ------------------------------ public methods ----------------------------------------
def verify_version (self):
"""
Verify if ckan is installed and that version is the expected, default version is 2.0
"""
if self.ckan_verify_version.lower() == "true":
resp= http_utils.request(http_utils.GET, url=self.__create_url(VERSION), headers=self.__create_headers())
body_dict = general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
assert self.version == str(body_dict[VERSION]), \
"Wrong ckan version verified: %s. Expected: %s. \n\nBody content: %s" % (str(body_dict[VERSION]), str(self.version), str(resp.text))
return True
def verify_if_organization_exist(self, name):
"""
Verify if the organization exist
:param name: organization name
:return: return True if de organization does not exist, False if it does exist
"""
resp = http_utils.request(http_utils.GET, url=self.__create_url(ORGANIZATION_LIST, name), headers=self.__create_headers())
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - list of the names of the site's organizations...")
body_dict = general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
for i in range(len(body_dict[RESULT])):
if body_dict[RESULT][i] == name: return True
return False
def create_organization (self, name):
"""
Create a new organization if it does not exist
:param name: organization name
"""
self.organization = name
if not(self.verify_if_organization_exist(name)):
payload = general_utils.convert_dict_to_str({NAME: name}, general_utils.JSON)
resp= http_utils.request(http_utils.POST, url=self.__create_url(ORGANIZATION_CREATE), headers=self.__create_headers(), data=payload)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating organization: %s ..." % (name))
return True
return False
def get_organization (self):
"""
get organization name
:return: organization name
"""
return self.organization
def verify_if_dataset_exist(self, name):
"""
Verify if the dataset exist
:param name: dataset name
:return: return True if de dataset does not exist, False if it does exist
"""
resp = http_utils.request(http_utils.GET, url=self.__create_url(PACKAGE_SHOW, name), headers=self.__create_headers())
if resp.status_code == http_utils.status_codes[http_utils.OK]:
bodyDict= general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
self.dataset_id = bodyDict[RESULT][ID]
return self.dataset_id
return False
def create_dataset (self, name):
"""
Create a new dataset if it does not exist
:param name: dataset name
"""
self.dataset = name
if not(self.verify_if_dataset_exist( name)):
payload = general_utils.convert_dict_to_str({NAME: self.dataset,
OWNER_ORG: self.organization}, general_utils.JSON)
resp= http_utils.request(http_utils.POST, url=self.__create_url(PACKAGE_CREATE), headers=self.__create_headers(), data=payload)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating dataset: %s ..." % (name))
bodyDict= general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
self.dataset_id = bodyDict[RESULT][ID]
return bodyDict[RESULT][ID]
return False
def get_dataset (self):
"""
get dataset name and dataset id
:return: dataset name and dataset id
"""
return self.dataset, self.dataset_id
def verify_if_resource_exist(self, name, dataset_name):
"""
Verify if the resource exist in a dataset
:param name: resource name
:param dataset_id:
:return: return True if de resource does not exist, False if it does exist
"""
resp = http_utils.request(http_utils.GET, url=self.__create_url(PACKAGE_SHOW, dataset_name), headers=self.__create_headers())
if resp.status_code == http_utils.status_codes[http_utils.OK]:
body_dict = general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
for i in range(len(body_dict[RESULT][RESOURCES])):
if body_dict[RESULT][RESOURCES][i][NAME] == name:
self.resource_id = body_dict[RESULT][RESOURCES][i][ID]
return self.resource_id
return False
def generate_field_datastore_to_resource (self, attributes_number, attributes_name, attribute_type, metadata_type):
"""
generate fields to datastore request
:return: fields list
"""
field = []
field.append({ID:RECVTIME, TYPE: TIMESTAMP})
for i in range(0, int(attributes_number)):
if attribute_type != WITHOUT: field.append({ID:attributes_name+"_"+str(i), TYPE: attribute_type})
if metadata_type != WITHOUT:field.append({ID:attributes_name+"_"+str(i)+"_md", TYPE: metadata_type})
return field
def create_resource(self, name, dataset_name, fields=[]):
self.resource = name
if not(self.verify_if_resource_exist(name, dataset_name)):
payload = general_utils.convert_dict_to_str({NAME: self.resource,
URL: URL_EXAMPLE,
PACKAGE_ID: self.dataset_id}, general_utils.JSON)
resp= http_utils.request(http_utils.POST, url=self.__create_url(RESOURCE_CREATE), headers=self.__create_headers(), data=payload)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating resource: %s ..." % (name))
bodyDict= general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
self.resource_id = bodyDict[RESULT][ID]
self.__create_datastore_in_resource (self.resource_id, fields)
return self.resource_id
return False
def get_resource (self):
"""
get resource name and resource id
:return: resource name and resource id
"""
return self.resource, self.resource_id
def datastore_search_last_sql (self, rows, resource_name, dataset_name):
"""
get last record in a resource
:param name: resource name
:param dataset_name: dataset name
:return: record dict
"""
resource_id = self.verify_if_resource_exist(resource_name, dataset_name)
if resource_id != False:
sql = 'SELECT * from "' + resource_id + '" ORDER BY 1 DESC LIMIT '+str (rows)
resp= http_utils.request(http_utils.POST, url=self.__create_url(DATASTORE_SEARCH_SQL, sql), headers=self.__create_headers(), data=EMPTY)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating resource: %s ..." % (resource_name))
return resp
return resource_id
| agpl-3.0 | -1,284,196,080,977,096,200 | 46.864407 | 174 | 0.610552 | false |
alphagov/notifications-delivery | tests/clients/test_aws_ses.py | 1 | 1062 | from moto import mock_ses
from notifications_delivery.clients.email.aws_ses import (AwsSesClient, AwsSesClientException)
@mock_ses
def test_send_email(ses_client):
aws_ses_client = AwsSesClient(region='eu-west-1')
source = "[email protected]"
to_address = "[email protected]"
subject = "Email subject"
body = "Email body"
# All source email addresses have to be verified before you
# can send on behalf of them.
ses_client.verify_email_identity(EmailAddress=source)
message_id = aws_ses_client.send_email(source, to_address, subject, body)
assert message_id
@mock_ses
def test_send_email_not_verified(ses_client):
aws_ses_client = AwsSesClient(region='eu-west-1')
source = "[email protected]"
to_address = "[email protected]"
subject = "Email subject"
body = "Email body"
try:
message_id = aws_ses_client.send_email(source, to_address, subject, body)
except AwsSesClientException as e:
assert 'Did not have authority to send from email [email protected]' in str(e)
| mit | 3,366,847,243,641,234,400 | 35.62069 | 94 | 0.704331 | false |
cocrawler/cocrawler | cocrawler/fetcher.py | 1 | 10249 | '''
async fetching of urls.
Assumes robots checks have already been done.
Success returns response object and response bytes (which were already
read in order to shake out all potential network-related exceptions.)
Failure returns enough details for the caller to do something smart:
503, other 5xx, DNS fail, connect timeout, error between connect and
full response, proxy failure. Plus an errorstring good enough for logging.
'''
import time
import traceback
from collections import namedtuple
import ssl
import urllib
import asyncio
import logging
import aiohttp
from . import stats
from . import config
from . import content
from .urls import URL
LOGGER = logging.getLogger(__name__)
# these errors get printed deep in aiohttp but they also bubble up
aiohttp_errors = {
'SSL handshake failed',
'SSL error errno:1 reason: CERTIFICATE_VERIFY_FAILED',
'SSL handshake failed on verifying the certificate',
'Fatal error on transport TCPTransport',
'Fatal error on SSL transport',
'SSL error errno:1 reason: UNKNOWN_PROTOCOL',
'Future exception was never retrieved',
'Unclosed connection',
'SSL error errno:1 reason: TLSV1_UNRECOGNIZED_NAME',
'SSL error errno:1 reason: SSLV3_ALERT_HANDSHAKE_FAILURE',
'SSL error errno:1 reason: TLSV1_ALERT_INTERNAL_ERROR',
}
class AsyncioSSLFilter(logging.Filter):
def filter(self, record):
stats.stats_sum('filter examined a {} {} log line'.format(record.name, record.levelname), 1)
if record.name == 'asyncio' and record.levelname == 'ERROR':
msg = record.getMessage()
for ae in aiohttp_errors:
if msg.startswith(ae):
stats.stats_sum('filter suppressed a asyncio ERROR log line', 1)
return False
return True
def establish_filters():
f = AsyncioSSLFilter()
logging.getLogger('asyncio').addFilter(f)
# XXX should be a policy plugin
# XXX cookie handling -- can be per-get -- make per-domain jar
def apply_url_policies(url, crawler):
headers = {}
headers['User-Agent'] = crawler.ua
if crawler.prevent_compression:
headers['Accept-Encoding'] = 'identity'
else:
headers['Accept-Encoding'] = content.get_accept_encoding()
if crawler.upgrade_insecure_requests:
headers['Upgrade-Insecure-Requests'] = '1'
proxy, prefetch_dns = global_policies()
get_kwargs = {'headers': headers, 'proxy': proxy}
return prefetch_dns, get_kwargs
def global_policies():
proxy = config.read('Fetcher', 'ProxyAll')
prefetch_dns = not proxy or config.read('GeoIP', 'ProxyGeoIP')
return proxy, prefetch_dns
FetcherResponse = namedtuple('FetcherResponse', ['response', 'body_bytes', 'ip', 'req_headers',
't_first_byte', 't_last_byte', 'is_truncated',
'last_exception'])
async def fetch(url, session,
allow_redirects=None, max_redirects=None,
stats_prefix='', max_page_size=-1, get_kwargs={}):
last_exception = None
is_truncated = False
response = None
try:
t0 = time.time()
last_exception = None
body_bytes = b''
blocks = []
left = max_page_size
ip = None
with stats.coroutine_state(stats_prefix+'fetcher fetching'):
with stats.record_latency(stats_prefix+'fetcher fetching', url=url.url):
response = await session.get(url.url,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
**get_kwargs)
t_first_byte = '{:.3f}'.format(time.time() - t0)
if 'proxy' not in get_kwargs and response.connection:
# this is racy, often the connection is already None unless the crawler is busy
addr = response.connection.transport.get_extra_info('peername')
if addr:
stats.stats_sum(stats_prefix+'fetch ip from connection', 1)
ip = [addr[0]] # ipv4 or ipv6
while left > 0:
# reading stream directly to dodge decompression and limit size.
# this means that aiohttp tracing on_response_chunk_receive doesn't work
block = await response.content.read(left)
if not block:
body_bytes = b''.join(blocks)
break
blocks.append(block)
left -= len(block)
else:
body_bytes = b''.join(blocks)
if not response.content.at_eof():
stats.stats_sum(stats_prefix+'fetch truncated length', 1)
response.close() # this does interrupt the network transfer
is_truncated = 'length'
t_last_byte = '{:.3f}'.format(time.time() - t0)
except asyncio.TimeoutError:
stats.stats_sum(stats_prefix+'fetch timeout', 1)
last_exception = 'TimeoutError'
body_bytes = b''.join(blocks)
if len(body_bytes):
# these body_bytes are currently dropped because last_exception is set
is_truncated = 'time'
stats.stats_sum(stats_prefix+'fetch timeout body bytes found', 1)
stats.stats_sum(stats_prefix+'fetch timeout body bytes found bytes', len(body_bytes))
except (aiohttp.ClientError) as e:
# ClientError is a catchall for a bunch of things
# e.g. DNS errors, '400' errors for http parser errors
# ClientConnectorCertificateError for an SSL cert that doesn't match hostname
# ClientConnectorSSLError see https://bugs.python.org/issue27970 for python not handling missing intermediate certs
# ClientConnectorError(None, None) caused by robots redir to DNS fail
# ServerDisconnectedError(None,) caused by servers that return 0 bytes for robots.txt fetches
# TooManyRedirects("0, message=''",) caused by too many robots.txt redirs
stats.stats_sum(stats_prefix+'fetch ClientError', 1)
detailed_name = str(type(e).__name__)
last_exception = 'ClientError: ' + detailed_name + ': ' + str(e)
body_bytes = b''.join(blocks)
if len(body_bytes):
# these body_bytes are currently dropped because last_exception is set
is_truncated = 'disconnect'
stats.stats_sum(stats_prefix+'fetch ClientError body bytes found', 1)
stats.stats_sum(stats_prefix+'fetch ClientError body bytes found bytes', len(body_bytes))
except ssl.CertificateError as e:
# many ssl errors raise and have tracebacks printed deep in python, fixed in 3.8
stats.stats_sum(stats_prefix+'fetch SSL CertificateError', 1)
last_exception = 'CertificateError: ' + str(e)
except ValueError as e:
# no A records found -- raised by our dns code
# aiohttp raises:
# ValueError Location: https:/// 'Host could not be detected' -- robots fetch
# ValueError Location: http:// /URL should be absolute/ -- robots fetch
# ValueError 'Can redirect only to http or https' -- robots fetch -- looked OK to curl!
stats.stats_sum(stats_prefix+'fetch other error - ValueError', 1)
last_exception = 'ValueErorr: ' + str(e)
except AttributeError as e:
stats.stats_sum(stats_prefix+'fetch other error - AttributeError', 1)
last_exception = 'AttributeError: ' + str(e)
except RuntimeError as e:
stats.stats_sum(stats_prefix+'fetch other error - RuntimeError', 1)
last_exception = 'RuntimeError: ' + str(e)
except asyncio.CancelledError:
raise
except Exception as e:
last_exception = 'Exception: ' + str(e)
stats.stats_sum(stats_prefix+'fetch surprising error', 1)
LOGGER.info('Saw surprising exception in fetcher working on %s:\n%s', url.url, last_exception)
traceback.print_exc()
# if redirs are allowed the url must be set to the final url
if response and str(response.url) != url.url:
if allow_redirects:
url = URL(str(response.url))
else:
# TODO: this fires for quoting: {{%20data.src%20}} comes out %7B%7B%20data.src%20%7D%7D
LOGGER.error('Surprised that I fetched %s and got %s', url.url, str(response.url))
if last_exception is not None:
if body_bytes:
LOGGER.info('we failed working on %s, the last exception is %s, dropped %d body bytes', url.url, last_exception, len(body_bytes))
else:
LOGGER.info('we failed working on %s, the last exception is %s', url.url, last_exception)
return FetcherResponse(None, None, None, None, None, None, False, last_exception)
fr = FetcherResponse(response, body_bytes, ip, response.request_info.headers,
t_first_byte, t_last_byte, is_truncated, None)
if response.status >= 500:
LOGGER.debug('server returned http status %d', response.status)
stats.stats_sum(stats_prefix+'fetch bytes', len(body_bytes) + len(response.raw_headers))
stats.stats_sum(stats_prefix+'fetch URLs', 1)
stats.stats_sum(stats_prefix+'fetch http code=' + str(response.status), 1)
# checks after fetch:
# hsts header?
# if ssl, check strict-transport-security header, remember max-age=foo part., other stuff like includeSubDomains
# did we receive cookies? was the security bit set?
return fr
def upgrade_scheme(url):
'''
Upgrade crawled scheme to https, if reasonable. This helps to reduce MITM attacks against the crawler.
https://chromium.googlesource.com/chromium/src/net/+/master/http/transport_security_state_static.json
Alternately, the return headers from a site might have strict-transport-security set ... a bit more
dangerous as we'd have to respect the timeout to avoid permanently learning something that's broken
TODO: use HTTPSEverwhere? would have to have a fallback if https failed, which it occasionally will
'''
return url
| apache-2.0 | -2,570,987,281,301,282,000 | 40.662602 | 141 | 0.630891 | false |
peterlei/fboss | fboss/system_tests/system_tests.py | 1 | 6328 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import importlib
import logging
import os
import sys
import unittest
user_requested_tags = []
Defaults = {
"test_dirs": ['tests'],
"config": 'test_topologies/example_topology.py',
"log_level": logging.INFO,
"log_dir": "results",
"log_file": "{dir}/result-{test}.log",
"test_topology": None,
"min_hosts": 2,
"tags": user_requested_tags
}
def _test_has_user_requested_tag(test_tags):
for tag in test_tags:
if tag in user_requested_tags:
return True
return False
def test_tags(*args):
def fn(cls):
if _test_has_user_requested_tag(list(args)):
cls.valid_tags = True
return cls
return fn
def generate_default_test_argparse(**kwargs):
""" Put all command line args into a function, so that other
programs (e.g., internal automation) can start with these args and build
on them.
"""
global Defaults
parser = argparse.ArgumentParser(description='FBOSS System Tests', **kwargs)
parser.add_argument('--test_dirs', default=Defaults['test_dirs'],
nargs='*')
parser.add_argument('--config', default=Defaults['config'])
parser.add_argument('--log_dir', default=Defaults['log_dir'])
parser.add_argument('--log_file', default=Defaults['log_file'])
parser.add_argument('--min_hosts', default=Defaults['min_hosts'])
parser.add_argument('--log_level', default=Defaults['log_level'])
parser.add_argument('--tags',
help="Provide list of test tags, default is all tests "
"Example tags qsfp, port etc",
default=Defaults['tags'])
return parser
def generate_default_test_options(**kwargs):
""" Global system parameters for the test suite.
This is conveniently formed from an argparse structure.
"""
return generate_default_test_argparse(**kwargs).parse_args()
def dynamic_generate_test_topology(options):
""" Read test topology from file, import it, and return
the test_topology specified in generate_test_topology()
This particular magic requires Python 3.5+
"""
if hasattr(options, 'test_topology'):
return options.test_topology
spec = importlib.util.spec_from_file_location("config", options.config)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
options.test_topology = module.generate_test_topology()
return options.test_topology
def setup_logging(options):
""" Make sure that if a log_dir is set, it exists
"""
if options.log_dir is not None:
if not os.path.exists(options.log_dir):
os.makedirs(options.log_dir)
class FbossBaseSystemTest(unittest.TestCase):
""" This Class is the base class of all Fboss System Tests """
_format = "%(asctime)s.%(msecs)03d %(name)-10s: %(levelname)-8s: %(message)s"
_datefmt = "%H:%M:%S"
def setUp(self):
if self.options is None:
raise Exception("options not set - did you call run_tests()?")
if (not hasattr(self.options, 'test_topology') or
self.options.test_topology is None):
raise Exception("options.test_topology not set - " +
"did you call run_tests()?")
self.test_topology = self.options.test_topology # save typing
my_name = str(self.__class__.__name__)
self.log = logging.getLogger(my_name)
self.log.setLevel(self.options.log_level)
logfile_opts = {'test': my_name, 'dir': self.options.log_dir}
logfile = self.options.log_file.format(**logfile_opts)
# close old log files
for handler in self.log.handlers:
self.log.removeHandler(handler)
handler.close()
# open one unique to this class of tests
handler = logging.FileHandler(logfile, mode='w+')
handler.setFormatter(logging.Formatter(self._format, self._datefmt))
self.log.addHandler(handler)
class TestTopologyValidation(FbossBaseSystemTest):
def test_topology_sanity(self):
self.log.info("Testing connection to switch")
self.assertTrue(self.test_topology.verify_switch())
self.log.info("Testing connection to hosts")
self.assertTrue(self.test_topology.verify_hosts())
def frob_options_into_tests(suite, options):
""" Make sure 'options' is available as a class variable
to all of the tests.
This is a horrible hack, but saves a lot of typing.
"""
for test in suite._tests:
if isinstance(test, unittest.suite.TestSuite):
# recursively iterate through all of the TestSuites
frob_options_into_tests(test, options)
else:
test.options = options
def add_interested_tests_to_test_suite(tests, suite):
if not isinstance(tests, unittest.suite.TestSuite):
# when user provides a tag , add testcases which has
# valid tags and add all testcases when user do not
# provide any tags
if hasattr(tests, "valid_tags") or not user_requested_tags:
suite.addTest(tests)
return
for test in tests:
add_interested_tests_to_test_suite(test, suite)
def run_tests(options):
""" Run all of the tests as described in options
:options : a dict of testing options, as described above
"""
setup_logging(options)
options.test_topology = dynamic_generate_test_topology(options)
suite = unittest.TestSuite()
# this test needs to run first
suite.addTest(TestTopologyValidation('test_topology_sanity'))
for directory in options.test_dirs:
testsdir = unittest.TestLoader().discover(start_dir=directory,
pattern='*test*.py')
add_interested_tests_to_test_suite(testsdir, suite)
frob_options_into_tests(suite, options)
return unittest.TextTestRunner(verbosity=2).run(suite)
def main(args):
options_parser = generate_default_test_argparse()
options = options_parser.parse_args()
run_tests(options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | 2,836,203,080,817,214,000 | 34.155556 | 82 | 0.642225 | false |
lukaszkoczwara/presence-analyzer-lkoczwara | src/presence_analyzer/helpers.py | 1 | 1777 | # -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
from json import dumps
from functools import wraps
from flask import Response
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
return Response(dumps(function(*args, **kwargs)),
mimetype='application/json')
return inner
def group_by_weekday(items):
"""
Groups presence entries by weekday.
"""
result = {i: [] for i in range(7)}
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].append(interval(start, end))
return result
def seconds_since_midnight(time):
"""
Calculates amount of seconds since midnight.
"""
return time.hour * 3600 + time.minute * 60 + time.second
def interval(start, end):
"""
Calculates inverval in seconds between two datetime.time objects.
"""
return seconds_since_midnight(end) - seconds_since_midnight(start)
def mean(items):
"""
Calculates arithmetic mean. Returns zero for empty lists.
"""
return float(sum(items)) / len(items) if len(items) > 0 else 0
def group_start_end_times_by_weekday(items):
"""
Groups start and end times in sec. by weekday.
"""
result = {i: {'start': [], 'end': []} for i in range(7)}
for date, start_end in items.iteritems():
start = start_end['start']
end = start_end['end']
result[date.weekday()]['start'].append(seconds_since_midnight(start))
result[date.weekday()]['end'].append(seconds_since_midnight(end))
return result
| mit | -492,233,173,606,348,740 | 24.924242 | 79 | 0.601013 | false |
CompSci17/Survey-System | survey_system_files/results.py | 1 | 11810 | from .models import Answers, RadioResults, SelectResults, ImportanceOrderResults, CheckboxResults
from chartit import DataPool, Chart
class Results( ):
def render_results( self, questions, survey ):
"""
Sorts out logic behind how we present our answers.
@param questions QuerySet Questions we're working with
@param survey Object The survey we're rendering results for
@return Returns a tuple of answers to be utilised in the view.
Text/Textarea are of the form: ( input_type, list_of_answers, survey_object )
Every other input is of the form: ( input_type, chart_object, survey_object )
"""
# A list to hold our output tuples
output = []
for question in questions:
# For every question in the QuerySet, we're going to check and process
# it dependent on input type
if question.input_type == 'text':
# get question's results
results = self.get_results( question )
combined_results = []
for result in results:
# For every answer we have, put it inside a list
combined_results.append( str( result.text ) )
# Add our input type, list and primary key to our output list
output.append( ( "text", combined_results, question.pk ) )
elif question.input_type == 'textarea':
# get question's results
results = self.get_results( question )
combined_results = []
for result in results:
# For every answer we have, put it inside a list
combined_results.append( str( result.text ) )
# Add our input type, list and primary key to our output list
output.append( ( "textarea", combined_results, question.pk ) )
elif question.input_type == 'radio':
# Get all the options offered by the question
options = self.get_choices( question.choices )
# Dictionary for counting the occurrences of a selection
counter = {}
# Get our question's results
answers = self.get_results( question )
for option in options:
# For every option, add it to our dictionary; starting with 0
counter.update( { option.strip().replace( ",", "" ) : 0 } )
for answer in answers:
# For every answer, increment the answer in the dictionary
counter[ str( answer.text ).strip().replace( ",", "" ) ] += 1
for option in options:
# Check if the count for this question already exists
existence_check = RadioResults.objects.filter(
survey__exact = survey,
question__exact = question,
answer__exact = option.strip().replace( ",", "" )
)
if existence_check.exists( ):
# If it exists, pass in the primary key
result = RadioResults(
pk = existence_check[0].pk,
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_count = counter[ str( option ).strip().replace( ",", "" ) ]
)
else:
# If it doesn't exist, leave out the primary key
result = RadioResults(
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_count = counter[ str( option ).strip().replace( ",", "" ) ]
)
# Save our set of results
result.save()
# Get our chart object for the list
piechart = self.radio_pie_chart( question )
# Add our input type, chart object and primary key to our output list
output.append( ( "radio", piechart, question.pk ) )
elif question.input_type == 'select':
# Get all the options offered by the question
options = self.get_choices( question.choices )
# Dictionary for counting the occurrences of a selection
counter = {}
# Get our question's results
answers = self.get_results( question )
for option in options:
# For every option, add it to our dictionary; starting with 0
counter.update( { option.strip().replace( ",", "" ) : 0 } )
for answer in answers:
# For every answer, increment the answer in the dictionary
counter[ str( answer.text ).strip().replace( ",", "" ) ] += 1
for option in options:
# Check if the count for this question already exists
existence_check = SelectResults.objects.filter(
survey__exact = survey,
question__exact = question,
answer__exact = option.strip().replace( ",", "" )
)
if existence_check.exists( ):
# If it exists, pass in the primary key
result = SelectResults(
pk = existence_check[0].pk,
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_count = counter[ str( option ).strip().replace( ",", "" ) ]
)
else:
# If it doesn't exist, leave out the primary key
result = SelectResults(
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_count = counter[ str( option ).strip().replace( ",", "" ) ]
)
# Save our set of results
result.save()
# Get our chart object for the list
piechart = self.select_pie_chart( question )
# Add our input type, chart object and primary key to our output list
output.append( ( "select", piechart, question.pk ) )
elif question.input_type == 'checkbox':
# Get all the question's answers
answers = self.get_results( question )
# We'll use this to keep track of the answer count
counter = {}
# Get all the question's options/choices
options = self.get_choices( question.choices )
for option in options:
# initialise each option in the counter with 0
counter.update( { option.strip() : 0 } )
for answer in answers:
# Get a list of all the answers
delimited_answers = answer.text.split( "," )
for indiv_answer in delimited_answers:
# For every answer, increment it in the counter
counter[ indiv_answer.strip() ] += 1
for option in counter:
# Check if the question already has a count going in the database
existence_check = CheckboxResults.objects.filter(
survey__exact = survey,
question__exact = question,
answer__exact = option.strip()
)
if existence_check.exists():
# If it exists, just update it
result = CheckboxResults(
pk = existence_check[0].pk,
survey = survey,
question = question,
answer = option,
answer_count = counter[ option.strip() ]
)
else:
# If it doesn't exist, create it
result = CheckboxResults(
survey = survey,
question = question,
answer = option,
answer_count = counter[ option.strip() ]
)
# Save the result in the model
result.save()
# Create new bar chart
bar_chart = self.checkbox_bar_chart( question )
# Append the checkbox details to the returned output
output.append( ( "checkbox", bar_chart, question.pk ) )
elif question.input_type == 'order':
# Get all the question's options
options = self.get_choices( question.choices )
# Get the number of options
number_of_options = len( options )
# We'll use this to keep track of the answer count
counter = {}
for integer_counter in range( 1, number_of_options + 1 ):
# Initialise dict using integers with their own dictionaries
counter.update( { integer_counter: { } } )
for option in options:
# For every option, initialise the above integer's dicts with the option's counter at 0
counter[ integer_counter ].update( { str( option ).strip().replace( ",", "" ) : 0 } )
# Get the question's answers
answers = self.get_results( question )
for answer in answers:
# For every answer, split it at every comma
split_answers = answer.text.split( "," )
for i, result in enumerate( split_answers ):
# Increment the choice's counter by 1
counter[ i + 1 ][ result.strip().replace( ",", "" ) ] += 1
for position in counter:
for option in counter[ position ]:
existence_check = ImportanceOrderResults.objects.filter(
survey__exact = survey,
question__exact = question,
answer__exact = option.strip().replace( ",", "" ),
answer_position__exact = position
)
if existence_check.exists():
result = ImportanceOrderResults(
pk = existence_check[0].pk,
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_position = position,
answer_count = counter[ position ][ str( option ).strip().replace( ",", "" ) ]
)
else:
result = ImportanceOrderResults(
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_position = position,
answer_count = counter[ position ][ str( option ).strip().replace( ",", "" ) ]
)
result.save()
output.append( ( "order_of_importance", counter, str( question.pk ) ) )
return output
def get_choices( self, choices ):
"""
Get all the chocies/options for a question, delimiting them
by comma.
@param choices String String of choices from the question model
@return A list of choices/options
"""
CHOICES=[]
# Delimit our choices
choices_delimited = choices.split( ',' )
for choice in choices_delimited:
# For every choice, append the value to a list
CHOICES.append( str( choice ) )
# Return a list of choices/options
return CHOICES
def get_results( self, question ):
"""
Get all the answers for a question
@return QuerySet with all the answers for a question
"""
answers = Answers.objects.filter( question__exact = question )
return answers
def radio_pie_chart( request, question ):
"""
@return Piechart object for radio results
"""
ds = DataPool(
series=
[{'options': {
'source': RadioResults.objects.filter( question__exact = question )},
'terms': [
'answer',
'answer_count']}
])
chart = Chart(
datasource = ds,
series_options =
[{'options':{
'type': 'pie',
'stacking': False},
'terms':{
'answer': [
'answer_count']
}}],
chart_options =
{
'title': {
'text': question.text
}
}
)
return chart
def select_pie_chart( request, question ):
"""
@return Piechart object for select results
"""
ds = DataPool(
series=
[{'options': {
'source': SelectResults.objects.filter( question__exact = question )},
'terms': [
'answer',
'answer_count']}
])
chart = Chart(
datasource = ds,
series_options =
[{'options':{
'type': 'pie',
'stacking': False},
'terms':{
'answer': [
'answer_count']
}}],
chart_options =
{
'title': {
'text': question.text
}
}
)
return chart
def checkbox_bar_chart( request, question ):
"""
@return Barchart for checkbox results
"""
ds = DataPool(
series=
[{'options': {
'source': CheckboxResults.objects.filter( question__exact = question ) },
'terms': [
'answer',
'answer_count']}
])
chart = Chart(
datasource = ds,
series_options =
[{'options':{
'type': 'column',
'stacking': True},
'terms':{
'answer': [
'answer_count']
}}],
chart_options =
{'title': {
'text': question.text },
'xAxis': {
'title': {
'text': 'Answers'}}})
return chart | mit | -1,745,348,226,818,002,700 | 27.807317 | 97 | 0.590686 | false |
luckielordie/conan | conans/model/info.py | 1 | 13408 | import os
from conans.client.build.cppstd_flags import cppstd_default
from conans.errors import ConanException
from conans.model.env_info import EnvValues
from conans.model.options import OptionsValues
from conans.model.ref import PackageReference
from conans.model.values import Values
from conans.paths import CONANINFO
from conans.util.config_parser import ConfigParser
from conans.util.files import load
from conans.util.sha import sha1
class RequirementInfo(object):
def __init__(self, value_str, indirect=False):
""" parse the input into fields name, version...
"""
ref = PackageReference.loads(value_str)
self.package = ref
self.full_name = ref.conan.name
self.full_version = ref.conan.version
self.full_user = ref.conan.user
self.full_channel = ref.conan.channel
self.full_package_id = ref.package_id
# sha values
if indirect:
self.unrelated_mode()
else:
self.semver()
def dumps(self):
if not self.name:
return ""
result = ["%s/%s" % (self.name, self.version)]
if self.user or self.channel:
result.append("@%s/%s" % (self.user, self.channel))
if self.package_id:
result.append(":%s" % self.package_id)
return "".join(result)
@property
def sha(self):
return "/".join([str(n) for n in [self.name, self.version, self.user, self.channel,
self.package_id]])
def unrelated_mode(self):
self.name = self.version = self.user = self.channel = self.package_id = None
def semver_mode(self):
self.name = self.full_name
self.version = self.full_version.stable()
self.user = self.channel = self.package_id = None
semver = semver_mode
def full_version_mode(self):
self.name = self.full_name
self.version = self.full_version
self.user = self.channel = self.package_id = None
def patch_mode(self):
self.name = self.full_name
self.version = self.full_version.patch()
self.user = self.channel = self.package_id = None
def base_mode(self):
self.name = self.full_name
self.version = self.full_version.base
self.user = self.channel = self.package_id = None
def minor_mode(self):
self.name = self.full_name
self.version = self.full_version.minor()
self.user = self.channel = self.package_id = None
def major_mode(self):
self.name = self.full_name
self.version = self.full_version.major()
self.user = self.channel = self.package_id = None
def full_recipe_mode(self):
self.name = self.full_name
self.version = self.full_version
self.user = self.full_user
self.channel = self.full_channel
self.package_id = None
def full_package_mode(self):
self.name = self.full_name
self.version = self.full_version
self.user = self.full_user
self.channel = self.full_channel
self.package_id = self.full_package_id
class RequirementsInfo(object):
def __init__(self, requires):
# {PackageReference: RequirementInfo}
self._data = {r: RequirementInfo(str(r)) for r in requires}
def copy(self):
return RequirementsInfo(self._data.keys())
def clear(self):
self._data = {}
def remove(self, *args):
for name in args:
del self._data[self._get_key(name)]
def add(self, indirect_reqs):
""" necessary to propagate from upstream the real
package requirements
"""
for r in indirect_reqs:
self._data[r] = RequirementInfo(str(r), indirect=True)
def refs(self):
""" used for updating downstream requirements with this
"""
return list(self._data.keys())
def _get_key(self, item):
for reference in self._data:
if reference.conan.name == item:
return reference
raise ConanException("No requirement matching for %s" % (item))
def __getitem__(self, item):
"""get by package name
Necessary to access from conaninfo
self.requires["Boost"].version = "2.X"
"""
return self._data[self._get_key(item)]
@property
def pkg_names(self):
return [r.conan.name for r in self._data.keys()]
@property
def sha(self):
result = []
# Remove requirements without a name, i.e. indirect transitive requirements
data = {k: v for k, v in self._data.items() if v.name}
for key in sorted(data):
result.append(data[key].sha)
return sha1('\n'.join(result).encode())
def dumps(self):
result = []
for ref in sorted(self._data):
dumped = self._data[ref].dumps()
if dumped:
result.append(dumped)
return "\n".join(result)
def unrelated_mode(self):
self.clear()
def semver_mode(self):
for r in self._data.values():
r.semver_mode()
def patch_mode(self):
for r in self._data.values():
r.patch_mode()
def minor_mode(self):
for r in self._data.values():
r.minor_mode()
def major_mode(self):
for r in self._data.values():
r.major_mode()
def base_mode(self):
for r in self._data.values():
r.base_mode()
def full_version_mode(self):
for r in self._data.values():
r.full_version_mode()
def full_recipe_mode(self):
for r in self._data.values():
r.full_recipe_mode()
def full_package_mode(self):
for r in self._data.values():
r.full_package_mode()
class RequirementsList(list):
@staticmethod
def loads(text):
return RequirementsList.deserialize(text.splitlines())
def dumps(self):
return "\n".join(self.serialize())
def serialize(self):
return [str(r) for r in sorted(self)]
@staticmethod
def deserialize(data):
return RequirementsList([PackageReference.loads(line) for line in data])
class ConanInfo(object):
def copy(self):
""" Useful for build_id implementation
"""
result = ConanInfo()
result.settings = self.settings.copy()
result.options = self.options.copy()
result.requires = self.requires.copy()
return result
@staticmethod
def create(settings, options, requires, indirect_requires):
result = ConanInfo()
result.full_settings = settings
result.settings = settings.copy()
result.full_options = options
result.options = options.copy()
result.options.clear_indirect()
result.full_requires = RequirementsList(requires)
result.requires = RequirementsInfo(requires)
result.requires.add(indirect_requires)
result.full_requires.extend(indirect_requires)
result.recipe_hash = None
result.env_values = EnvValues()
result.vs_toolset_compatible()
result.discard_build_settings()
result.default_std_matching()
return result
@staticmethod
def loads(text):
parser = ConfigParser(text, ["settings", "full_settings", "options", "full_options",
"requires", "full_requires", "scope", "recipe_hash",
"env"], raise_unexpected_field=False)
result = ConanInfo()
result.settings = Values.loads(parser.settings)
result.full_settings = Values.loads(parser.full_settings)
result.options = OptionsValues.loads(parser.options)
result.full_options = OptionsValues.loads(parser.full_options)
result.full_requires = RequirementsList.loads(parser.full_requires)
result.requires = RequirementsInfo(result.full_requires)
result.recipe_hash = parser.recipe_hash or None
# TODO: Missing handling paring of requires, but not necessary now
result.env_values = EnvValues.loads(parser.env)
return result
def dumps(self):
def indent(text):
if not text:
return ""
return '\n'.join(" " + line for line in text.splitlines())
result = list()
result.append("[settings]")
result.append(indent(self.settings.dumps()))
result.append("\n[requires]")
result.append(indent(self.requires.dumps()))
result.append("\n[options]")
result.append(indent(self.options.dumps()))
result.append("\n[full_settings]")
result.append(indent(self.full_settings.dumps()))
result.append("\n[full_requires]")
result.append(indent(self.full_requires.dumps()))
result.append("\n[full_options]")
result.append(indent(self.full_options.dumps()))
result.append("\n[recipe_hash]\n%s" % indent(self.recipe_hash))
result.append("\n[env]")
result.append(indent(self.env_values.dumps()))
return '\n'.join(result) + "\n"
def __eq__(self, other):
""" currently just for testing purposes
"""
return self.dumps() == other.dumps()
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def load_file(conan_info_path):
""" load from file
"""
try:
config_text = load(conan_info_path)
except IOError:
raise ConanException("Does not exist %s" % conan_info_path)
else:
return ConanInfo.loads(config_text)
@staticmethod
def load_from_package(package_folder):
info_path = os.path.join(package_folder, CONANINFO)
return ConanInfo.load_file(info_path)
def package_id(self):
""" The package_id of a conans is the sha1 of its specific requirements,
options and settings
"""
computed_id = getattr(self, "_package_id", None)
if computed_id:
return computed_id
result = []
result.append(self.settings.sha)
# Only are valid requires for OPtions those Non-Dev who are still in requires
self.options.filter_used(self.requires.pkg_names)
result.append(self.options.sha)
result.append(self.requires.sha)
self._package_id = sha1('\n'.join(result).encode())
return self._package_id
def serialize_min(self):
"""
This info will be shown in search results.
"""
conan_info_json = {"settings": dict(self.settings.serialize()),
"options": dict(self.options.serialize()["options"]),
"full_requires": self.full_requires.serialize(),
"recipe_hash": self.recipe_hash}
return conan_info_json
def header_only(self):
self.settings.clear()
self.options.clear()
self.requires.unrelated_mode()
def vs_toolset_compatible(self):
"""Default behaviour, same package for toolset v140 with compiler=Visual Studio 15 than
using Visual Studio 14"""
if self.full_settings.compiler != "Visual Studio":
return
toolsets_versions = {
"v141": "15",
"v140": "14",
"v120": "12",
"v110": "11",
"v100": "10",
"v90": "9",
"v80": "8"}
toolset = str(self.full_settings.compiler.toolset)
version = toolsets_versions.get(toolset)
if version is not None:
self.settings.compiler.version = version
del self.settings.compiler.toolset
def vs_toolset_incompatible(self):
"""Will generate different packages for v140 and visual 15 than the visual 14"""
if self.full_settings.compiler != "Visual Studio":
return
self.settings.compiler.version = self.full_settings.compiler.version
self.settings.compiler.toolset = self.full_settings.compiler.toolset
def discard_build_settings(self):
# When os is defined, os_build is irrelevant for the consumer.
# only when os_build is alone (installers, etc) it has to be present in the package_id
if self.full_settings.os and self.full_settings.os_build:
del self.settings.os_build
if self.full_settings.arch and self.full_settings.arch_build:
del self.settings.arch_build
def include_build_settings(self):
self.settings.os_build = self.full_settings.os_build
self.settings.arch_build = self.full_settings.arch_build
def default_std_matching(self):
"""
If we are building with gcc 7, and we specify -s cppstd=gnu14, it's the default, so the
same as specifying None, packages are the same
"""
if self.full_settings.cppstd and \
self.full_settings.compiler and \
self.full_settings.compiler.version:
default = cppstd_default(str(self.full_settings.compiler),
str(self.full_settings.compiler.version))
if default == str(self.full_settings.cppstd):
self.settings.cppstd = None
def default_std_non_matching(self):
if self.full_settings.cppstd:
self.settings.cppstd = self.full_settings.cppstd
| mit | -8,335,431,246,790,530,000 | 32.7733 | 95 | 0.598747 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.