repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Razi91/BiblioTeKa | user/migrations/0001_initial.py | 1 | 3850 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('books', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('birthday', models.DateField()),
('pesel', models.CharField(max_length=11)),
('credits', models.DecimalField(decimal_places=2, max_digits=5)),
('user', models.OneToOneField(related_name='client', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Loan',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('date', models.DateTimeField(default=datetime.datetime.now)),
('returned', models.DateTimeField(null=True)),
('book', models.ForeignKey(to='books.BookEntity')),
('client', models.ForeignKey(related_name='loans', to='user.Client')),
('pricing', models.ForeignKey(to='books.Pricing')),
],
),
migrations.CreateModel(
name='Reservation',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('date', models.DateTimeField(default=datetime.datetime.now)),
('book', models.ForeignKey(to='books.BookEdition', null=True)),
('client', models.ForeignKey(related_name='reservations', to='user.Client')),
('pricing', models.ForeignKey(to='books.Pricing')),
],
),
migrations.CreateModel(
name='Staff',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
('prize', models.DecimalField(decimal_places=2, max_digits=5)),
('credits', models.DecimalField(decimal_places=2, max_digits=5)),
],
),
migrations.CreateModel(
name='SubscriptionActive',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('credits', models.DecimalField(decimal_places=2, max_digits=5)),
('begin', models.DateField(default=datetime.datetime.now)),
('end', models.DateField()),
('client', models.ForeignKey(to='user.Client')),
('subscription', models.ForeignKey(to='user.Subscription')),
],
),
migrations.AddField(
model_name='reservation',
name='subscription',
field=models.ForeignKey(to='user.SubscriptionActive', null=True),
),
migrations.AddField(
model_name='reservation',
name='title',
field=models.ForeignKey(to='books.BookTitle'),
),
migrations.AddField(
model_name='loan',
name='subscription',
field=models.ForeignKey(to='user.SubscriptionActive', null=True),
),
]
| gpl-2.0 | -7,719,696,728,595,929,000 | 41.777778 | 114 | 0.554286 | false |
pshchelo/heat | heat/engine/resources/aws/autoscaling/scaling_policy.py | 1 | 3521 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.heat import scaling_policy as heat_sp
LOG = logging.getLogger(__name__)
class AWSScalingPolicy(heat_sp.AutoScalingPolicy):
PROPERTIES = (
AUTO_SCALING_GROUP_NAME, SCALING_ADJUSTMENT, ADJUSTMENT_TYPE,
COOLDOWN, MIN_ADJUSTMENT_STEP,
) = (
'AutoScalingGroupName', 'ScalingAdjustment', 'AdjustmentType',
'Cooldown', 'MinAdjustmentStep',
)
EXACT_CAPACITY, CHANGE_IN_CAPACITY, PERCENT_CHANGE_IN_CAPACITY = (
'ExactCapacity', 'ChangeInCapacity', 'PercentChangeInCapacity')
ATTRIBUTES = (
ALARM_URL,
) = (
'AlarmUrl',
)
properties_schema = {
AUTO_SCALING_GROUP_NAME: properties.Schema(
properties.Schema.STRING,
_('AutoScaling group name to apply policy to.'),
required=True
),
SCALING_ADJUSTMENT: properties.Schema(
properties.Schema.INTEGER,
_('Size of adjustment.'),
required=True,
update_allowed=True
),
ADJUSTMENT_TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of adjustment (absolute or percentage).'),
required=True,
constraints=[
constraints.AllowedValues([CHANGE_IN_CAPACITY,
EXACT_CAPACITY,
PERCENT_CHANGE_IN_CAPACITY]),
],
update_allowed=True
),
COOLDOWN: properties.Schema(
properties.Schema.INTEGER,
_('Cooldown period, in seconds.'),
update_allowed=True
),
MIN_ADJUSTMENT_STEP: properties.Schema(
properties.Schema.INTEGER,
_('Minimum number of resources that are added or removed '
'when the AutoScaling group scales up or down. This can '
'be used only when specifying PercentChangeInCapacity '
'for the AdjustmentType property.'),
constraints=[
constraints.Range(
min=0,
),
],
update_allowed=True
),
}
attributes_schema = {
ALARM_URL: attributes.Schema(
_("A signed url to handle the alarm. (Heat extension)."),
type=attributes.Schema.STRING
),
}
def _get_adjustement_type(self):
return self.properties[self.ADJUSTMENT_TYPE]
def FnGetRefId(self):
if self.resource_id is not None:
return six.text_type(self._get_signed_url())
else:
return six.text_type(self.name)
def resource_mapping():
return {
'AWS::AutoScaling::ScalingPolicy': AWSScalingPolicy,
}
| apache-2.0 | 1,899,387,159,158,926,800 | 31.601852 | 78 | 0.599546 | false |
barct/odoo-coop | electric_utility/models/services.py | 1 | 1284 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
class ServiceCategoryGroup(models.Model):
_name = "electric_utility.service_category_group"
code = fields.Char("Code", length=7)
name = fields.Char("Name")
segments = fields.Char("Segments")
def define_segment(self, value):
level = 1
if self.segments:
segs = self.segments.split(",")
s = None
for s in segs:
if value <= float(s):
break
level += 1
return self.code, level
def get_segments(self):
self.ensure_one()
class ServiceCategory(models.Model):
_name = "electric_utility.service_category"
code = fields.Char("Code", length=7)
ersep_code = fields.Char("ERSeP Code", length=7)
group_id = fields.Many2one("electric_utility.service_category_group")
name = fields.Char("Name")
_sql_constraints = [('service_category_unique_keys',
'unique(code)', 'Code must be unique!'), ]
| gpl-3.0 | 2,807,370,585,143,536,600 | 31.923077 | 78 | 0.522586 | false |
daineseh/kodi-plugin.video.ted-talks-chinese | youtube_dl/extractor/vevo.py | 14 | 17113 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
sanitized_Request,
parse_iso8601,
)
class VevoBaseIE(InfoExtractor):
def _extract_json(self, webpage, video_id, item):
return self._parse_json(
self._search_regex(
r'window\.__INITIAL_STORE__\s*=\s*({.+?});\s*</script>',
webpage, 'initial store'),
video_id)['default'][item]
class VevoIE(VevoBaseIE):
'''
Accepts urls from vevo.com or in the format 'vevo:{id}'
(currently used by MTVIE and MySpaceIE)
'''
_VALID_URL = r'''(?x)
(?:https?://www\.vevo\.com/watch/(?!playlist|genre)(?:[^/]+/(?:[^/]+/)?)?|
https?://cache\.vevo\.com/m/html/embed\.html\?video=|
https?://videoplayer\.vevo\.com/embed/embedded\?videoId=|
vevo:)
(?P<id>[^&?#]+)'''
_TESTS = [{
'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
'md5': '95ee28ee45e70130e3ab02b0f579ae23',
'info_dict': {
'id': 'GB1101300280',
'ext': 'mp4',
'title': 'Hurts - Somebody to Die For',
'timestamp': 1372057200,
'upload_date': '20130624',
'uploader': 'Hurts',
'track': 'Somebody to Die For',
'artist': 'Hurts',
'genre': 'Pop',
},
'expected_warnings': ['Unable to download SMIL file'],
}, {
'note': 'v3 SMIL format',
'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
'md5': 'f6ab09b034f8c22969020b042e5ac7fc',
'info_dict': {
'id': 'USUV71302923',
'ext': 'mp4',
'title': 'Cassadee Pope - I Wish I Could Break Your Heart',
'timestamp': 1392796919,
'upload_date': '20140219',
'uploader': 'Cassadee Pope',
'track': 'I Wish I Could Break Your Heart',
'artist': 'Cassadee Pope',
'genre': 'Country',
},
'expected_warnings': ['Unable to download SMIL file'],
}, {
'note': 'Age-limited video',
'url': 'https://www.vevo.com/watch/justin-timberlake/tunnel-vision-explicit/USRV81300282',
'info_dict': {
'id': 'USRV81300282',
'ext': 'mp4',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
'age_limit': 18,
'timestamp': 1372888800,
'upload_date': '20130703',
'uploader': 'Justin Timberlake',
'track': 'Tunnel Vision (Explicit)',
'artist': 'Justin Timberlake',
'genre': 'Pop',
},
'expected_warnings': ['Unable to download SMIL file'],
}, {
'note': 'No video_info',
'url': 'http://www.vevo.com/watch/k-camp-1/Till-I-Die/USUV71503000',
'md5': '8b83cc492d72fc9cf74a02acee7dc1b0',
'info_dict': {
'id': 'USUV71503000',
'ext': 'mp4',
'title': 'K Camp - Till I Die',
'age_limit': 18,
'timestamp': 1449468000,
'upload_date': '20151207',
'uploader': 'K Camp',
'track': 'Till I Die',
'artist': 'K Camp',
'genre': 'Rap/Hip-Hop',
},
}, {
'note': 'Only available via webpage',
'url': 'http://www.vevo.com/watch/GBUV71600656',
'md5': '67e79210613865b66a47c33baa5e37fe',
'info_dict': {
'id': 'GBUV71600656',
'ext': 'mp4',
'title': 'ABC - Viva Love',
'age_limit': 0,
'timestamp': 1461830400,
'upload_date': '20160428',
'uploader': 'ABC',
'track': 'Viva Love',
'artist': 'ABC',
'genre': 'Pop',
},
'expected_warnings': ['Failed to download video versions info'],
}, {
# no genres available
'url': 'http://www.vevo.com/watch/INS171400764',
'only_matching': True,
}]
_SMIL_BASE_URL = 'http://smil.lvl3.vevo.com'
_SOURCE_TYPES = {
0: 'youtube',
1: 'brightcove',
2: 'http',
3: 'hls_ios',
4: 'hls',
5: 'smil', # http
7: 'f4m_cc',
8: 'f4m_ak',
9: 'f4m_l3',
10: 'ism',
13: 'smil', # rtmp
18: 'dash',
}
_VERSIONS = {
0: 'youtube', # only in AuthenticateVideo videoVersions
1: 'level3',
2: 'akamai',
3: 'level3',
4: 'amazon',
}
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
formats = []
els = smil.findall('.//{http://www.w3.org/2001/SMIL20/Language}video')
for el in els:
src = el.attrib['src']
m = re.match(r'''(?xi)
(?P<ext>[a-z0-9]+):
(?P<path>
[/a-z0-9]+ # The directory and main part of the URL
_(?P<tbr>[0-9]+)k
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
_(?P<vcodec>[a-z0-9]+)
_(?P<vbr>[0-9]+)
_(?P<acodec>[a-z0-9]+)
_(?P<abr>[0-9]+)
\.[a-z0-9]+ # File extension
)''', src)
if not m:
continue
format_url = self._SMIL_BASE_URL + m.group('path')
formats.append({
'url': format_url,
'format_id': 'smil_' + m.group('tbr'),
'vcodec': m.group('vcodec'),
'acodec': m.group('acodec'),
'tbr': int(m.group('tbr')),
'vbr': int(m.group('vbr')),
'abr': int(m.group('abr')),
'ext': m.group('ext'),
'width': int(m.group('width')),
'height': int(m.group('height')),
})
return formats
def _initialize_api(self, video_id):
req = sanitized_Request(
'http://www.vevo.com/auth', data=b'')
webpage = self._download_webpage(
req, None,
note='Retrieving oauth token',
errnote='Unable to retrieve oauth token')
if 'THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION' in webpage:
self.raise_geo_restricted(
'%s said: This page is currently unavailable in your region' % self.IE_NAME)
auth_info = self._parse_json(webpage, video_id)
self._api_url_template = self.http_scheme() + '//apiv2.vevo.com/%s?token=' + auth_info['access_token']
def _call_api(self, path, *args, **kwargs):
return self._download_json(self._api_url_template % path, *args, **kwargs)
def _real_extract(self, url):
video_id = self._match_id(url)
json_url = 'http://api.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
response = self._download_json(
json_url, video_id, 'Downloading video info',
'Unable to download info', fatal=False) or {}
video_info = response.get('video') or {}
artist = None
featured_artist = None
uploader = None
view_count = None
formats = []
if not video_info:
try:
self._initialize_api(video_id)
except ExtractorError:
ytid = response.get('errorInfo', {}).get('ytid')
if ytid:
self.report_warning(
'Video is geoblocked, trying with the YouTube video %s' % ytid)
return self.url_result(ytid, 'Youtube', ytid)
raise
video_info = self._call_api(
'video/%s' % video_id, video_id, 'Downloading api video info',
'Failed to download video info')
video_versions = self._call_api(
'video/%s/streams' % video_id, video_id,
'Downloading video versions info',
'Failed to download video versions info',
fatal=False)
# Some videos are only available via webpage (e.g.
# https://github.com/rg3/youtube-dl/issues/9366)
if not video_versions:
webpage = self._download_webpage(url, video_id)
video_versions = self._extract_json(webpage, video_id, 'streams')[video_id][0]
timestamp = parse_iso8601(video_info.get('releaseDate'))
artists = video_info.get('artists')
if artists:
artist = uploader = artists[0]['name']
view_count = int_or_none(video_info.get('views', {}).get('total'))
for video_version in video_versions:
version = self._VERSIONS.get(video_version['version'])
version_url = video_version.get('url')
if not version_url:
continue
if '.ism' in version_url:
continue
elif '.mpd' in version_url:
formats.extend(self._extract_mpd_formats(
version_url, video_id, mpd_id='dash-%s' % version,
note='Downloading %s MPD information' % version,
errnote='Failed to download %s MPD information' % version,
fatal=False))
elif '.m3u8' in version_url:
formats.extend(self._extract_m3u8_formats(
version_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls-%s' % version,
note='Downloading %s m3u8 information' % version,
errnote='Failed to download %s m3u8 information' % version,
fatal=False))
else:
m = re.search(r'''(?xi)
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
_(?P<vcodec>[a-z0-9]+)
_(?P<vbr>[0-9]+)
_(?P<acodec>[a-z0-9]+)
_(?P<abr>[0-9]+)
\.(?P<ext>[a-z0-9]+)''', version_url)
if not m:
continue
formats.append({
'url': version_url,
'format_id': 'http-%s-%s' % (version, video_version['quality']),
'vcodec': m.group('vcodec'),
'acodec': m.group('acodec'),
'vbr': int(m.group('vbr')),
'abr': int(m.group('abr')),
'ext': m.group('ext'),
'width': int(m.group('width')),
'height': int(m.group('height')),
})
else:
timestamp = int_or_none(self._search_regex(
r'/Date\((\d+)\)/',
video_info['releaseDate'], 'release date', fatal=False),
scale=1000)
artists = video_info.get('mainArtists')
if artists:
artist = uploader = artists[0]['artistName']
featured_artists = video_info.get('featuredArtists')
if featured_artists:
featured_artist = featured_artists[0]['artistName']
smil_parsed = False
for video_version in video_info['videoVersions']:
version = self._VERSIONS.get(video_version['version'])
if version == 'youtube':
continue
else:
source_type = self._SOURCE_TYPES.get(video_version['sourceType'])
renditions = compat_etree_fromstring(video_version['data'])
if source_type == 'http':
for rend in renditions.findall('rendition'):
attr = rend.attrib
formats.append({
'url': attr['url'],
'format_id': 'http-%s-%s' % (version, attr['name']),
'height': int_or_none(attr.get('frameheight')),
'width': int_or_none(attr.get('frameWidth')),
'tbr': int_or_none(attr.get('totalBitrate')),
'vbr': int_or_none(attr.get('videoBitrate')),
'abr': int_or_none(attr.get('audioBitrate')),
'vcodec': attr.get('videoCodec'),
'acodec': attr.get('audioCodec'),
})
elif source_type == 'hls':
formats.extend(self._extract_m3u8_formats(
renditions.find('rendition').attrib['url'], video_id,
'mp4', 'm3u8_native', m3u8_id='hls-%s' % version,
note='Downloading %s m3u8 information' % version,
errnote='Failed to download %s m3u8 information' % version,
fatal=False))
elif source_type == 'smil' and version == 'level3' and not smil_parsed:
formats.extend(self._extract_smil_formats(
renditions.find('rendition').attrib['url'], video_id, False))
smil_parsed = True
self._sort_formats(formats)
track = video_info['title']
if featured_artist:
artist = '%s ft. %s' % (artist, featured_artist)
title = '%s - %s' % (artist, track) if artist else track
genres = video_info.get('genres')
genre = (
genres[0] if genres and isinstance(genres, list) and
isinstance(genres[0], compat_str) else None)
is_explicit = video_info.get('isExplicit')
if is_explicit is True:
age_limit = 18
elif is_explicit is False:
age_limit = 0
else:
age_limit = None
duration = video_info.get('duration')
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': video_info.get('imageUrl') or video_info.get('thumbnailUrl'),
'timestamp': timestamp,
'uploader': uploader,
'duration': duration,
'view_count': view_count,
'age_limit': age_limit,
'track': track,
'artist': uploader,
'genre': genre,
}
class VevoPlaylistIE(VevoBaseIE):
_VALID_URL = r'https?://www\.vevo\.com/watch/(?P<kind>playlist|genre)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.vevo.com/watch/playlist/dadbf4e7-b99f-4184-9670-6f0e547b6a29',
'info_dict': {
'id': 'dadbf4e7-b99f-4184-9670-6f0e547b6a29',
'title': 'Best-Of: Birdman',
},
'playlist_count': 10,
}, {
'url': 'http://www.vevo.com/watch/genre/rock',
'info_dict': {
'id': 'rock',
'title': 'Rock',
},
'playlist_count': 20,
}, {
'url': 'http://www.vevo.com/watch/playlist/dadbf4e7-b99f-4184-9670-6f0e547b6a29?index=0',
'md5': '32dcdfddddf9ec6917fc88ca26d36282',
'info_dict': {
'id': 'USCMV1100073',
'ext': 'mp4',
'title': 'Birdman - Y.U. MAD',
'timestamp': 1323417600,
'upload_date': '20111209',
'uploader': 'Birdman',
'track': 'Y.U. MAD',
'artist': 'Birdman',
'genre': 'Rap/Hip-Hop',
},
'expected_warnings': ['Unable to download SMIL file'],
}, {
'url': 'http://www.vevo.com/watch/genre/rock?index=0',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
playlist_kind = mobj.group('kind')
webpage = self._download_webpage(url, playlist_id)
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
index = qs.get('index', [None])[0]
if index:
video_id = self._search_regex(
r'<meta[^>]+content=(["\'])vevo://video/(?P<id>.+?)\1[^>]*>',
webpage, 'video id', default=None, group='id')
if video_id:
return self.url_result('vevo:%s' % video_id, VevoIE.ie_key())
playlists = self._extract_json(webpage, playlist_id, '%ss' % playlist_kind)
playlist = (list(playlists.values())[0]
if playlist_kind == 'playlist' else playlists[playlist_id])
entries = [
self.url_result('vevo:%s' % src, VevoIE.ie_key())
for src in playlist['isrcs']]
return self.playlist_result(
entries, playlist.get('playlistId') or playlist_id,
playlist.get('name'), playlist.get('description'))
| gpl-2.0 | 1,389,593,283,926,902,800 | 37.804989 | 118 | 0.476772 | false |
MaximNevrov/neutron | neutron/extensions/l3_ext_ha_mode.py | 4 | 4105 | # Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions
HA_INFO = 'ha'
EXTENDED_ATTRIBUTES_2_0 = {
'routers': {
HA_INFO: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED, 'is_visible': True,
'enforce_policy': True,
'convert_to': attributes.convert_to_boolean_if_not_none}
}
}
class HAmodeUpdateOfDvrNotSupported(NotImplementedError):
message = _("Currently update of HA mode for a distributed router is "
"not supported.")
class DVRmodeUpdateOfHaNotSupported(NotImplementedError):
message = _("Currently update of distributed mode for an HA router is "
"not supported.")
class HAmodeUpdateOfDvrHaNotSupported(NotImplementedError):
message = _("Currently update of HA mode for a DVR/HA router is "
"not supported.")
class DVRmodeUpdateOfDvrHaNotSupported(NotImplementedError):
message = _("Currently update of distributed mode for a DVR/HA router "
"is not supported")
class UpdateToDvrHamodeNotSupported(NotImplementedError):
message = _("Currently updating a router to DVR/HA is not supported.")
class UpdateToNonDvrHamodeNotSupported(NotImplementedError):
message = _("Currently updating a router from DVR/HA to non-DVR "
" non-HA is not supported.")
class MaxVRIDAllocationTriesReached(exceptions.NeutronException):
message = _("Failed to allocate a VRID in the network %(network_id)s "
"for the router %(router_id)s after %(max_tries)s tries.")
class NoVRIDAvailable(exceptions.Conflict):
message = _("No more Virtual Router Identifier (VRID) available when "
"creating router %(router_id)s. The limit of number "
"of HA Routers per tenant is 254.")
class HANetworkCIDRNotValid(exceptions.NeutronException):
message = _("The HA Network CIDR specified in the configuration file "
"isn't valid; %(cidr)s.")
class HANotEnoughAvailableAgents(exceptions.NeutronException):
message = _("Not enough l3 agents available to ensure HA. Minimum "
"required %(min_agents)s, available %(num_agents)s.")
class HAMaximumAgentsNumberNotValid(exceptions.NeutronException):
message = _("max_l3_agents_per_router %(max_agents)s config parameter "
"is not valid. It has to be greater than or equal to "
"min_l3_agents_per_router %(min_agents)s.")
class HAMinimumAgentsNumberNotValid(exceptions.NeutronException):
message = (_("min_l3_agents_per_router config parameter is not valid. "
"It has to be equal to or more than %s for HA.") %
constants.MINIMUM_AGENTS_FOR_HA)
class L3_ext_ha_mode(extensions.ExtensionDescriptor):
"""Extension class supporting virtual router in HA mode."""
@classmethod
def get_name(cls):
return "HA Router extension"
@classmethod
def get_alias(cls):
return constants.L3_HA_MODE_EXT_ALIAS
@classmethod
def get_description(cls):
return "Add HA capability to routers."
@classmethod
def get_updated(cls):
return "2014-04-26T00:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 | 1,659,035,712,149,861,000 | 33.788136 | 79 | 0.68039 | false |
pkill-nine/qutebrowser | qutebrowser/keyinput/modeparsers.py | 2 | 11880 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""KeyChainParser for "hint" and "normal" modes.
Module attributes:
STARTCHARS: Possible chars for starting a commandline input.
"""
import traceback
from PyQt5.QtCore import pyqtSlot, Qt
from qutebrowser.commands import cmdexc
from qutebrowser.config import config
from qutebrowser.keyinput import keyparser
from qutebrowser.utils import usertypes, log, message, objreg, utils
STARTCHARS = ":/?"
LastPress = usertypes.enum('LastPress', ['none', 'filtertext', 'keystring'])
class NormalKeyParser(keyparser.CommandKeyParser):
"""KeyParser for normal mode with added STARTCHARS detection and more.
Attributes:
_partial_timer: Timer to clear partial keypresses.
"""
def __init__(self, win_id, parent=None):
super().__init__(win_id, parent, supports_count=True,
supports_chains=True)
self.read_config('normal')
self._partial_timer = usertypes.Timer(self, 'partial-match')
self._partial_timer.setSingleShot(True)
self._inhibited = False
self._inhibited_timer = usertypes.Timer(self, 'normal-inhibited')
self._inhibited_timer.setSingleShot(True)
def __repr__(self):
return utils.get_repr(self)
def _handle_single_key(self, e):
"""Override _handle_single_key to abort if the key is a startchar.
Args:
e: the KeyPressEvent from Qt.
Return:
A self.Match member.
"""
txt = e.text().strip()
if self._inhibited:
self._debug_log("Ignoring key '{}', because the normal mode is "
"currently inhibited.".format(txt))
return self.Match.none
match = super()._handle_single_key(e)
if match == self.Match.partial:
timeout = config.get('input', 'partial-timeout')
if timeout != 0:
self._partial_timer.setInterval(timeout)
self._partial_timer.timeout.connect(self._clear_partial_match)
self._partial_timer.start()
return match
def set_inhibited_timeout(self, timeout):
if timeout != 0:
self._debug_log("Inhibiting the normal mode for {}ms.".format(
timeout))
self._inhibited = True
self._inhibited_timer.setInterval(timeout)
self._inhibited_timer.timeout.connect(self._clear_inhibited)
self._inhibited_timer.start()
@pyqtSlot()
def _clear_partial_match(self):
"""Clear a partial keystring after a timeout."""
self._debug_log("Clearing partial keystring {}".format(
self._keystring))
self._keystring = ''
self.keystring_updated.emit(self._keystring)
@pyqtSlot()
def _clear_inhibited(self):
"""Reset inhibition state after a timeout."""
self._debug_log("Releasing inhibition state of normal mode.")
self._inhibited = False
@pyqtSlot()
def _stop_timers(self):
super()._stop_timers()
self._partial_timer.stop()
try:
self._partial_timer.timeout.disconnect(self._clear_partial_match)
except TypeError:
# no connections
pass
self._inhibited_timer.stop()
try:
self._inhibited_timer.timeout.disconnect(self._clear_inhibited)
except TypeError:
# no connections
pass
class PromptKeyParser(keyparser.CommandKeyParser):
"""KeyParser for yes/no prompts."""
def __init__(self, win_id, parent=None):
super().__init__(win_id, parent, supports_count=False,
supports_chains=True)
# We don't want an extra section for this in the config, so we just
# abuse the prompt section.
self.read_config('prompt')
def __repr__(self):
return utils.get_repr(self)
class HintKeyParser(keyparser.CommandKeyParser):
"""KeyChainParser for hints.
Attributes:
_filtertext: The text to filter with.
_last_press: The nature of the last keypress, a LastPress member.
"""
def __init__(self, win_id, parent=None):
super().__init__(win_id, parent, supports_count=False,
supports_chains=True)
self._filtertext = ''
self._last_press = LastPress.none
self.read_config('hint')
self.keystring_updated.connect(self.on_keystring_updated)
def _handle_special_key(self, e):
"""Override _handle_special_key to handle string filtering.
Return True if the keypress has been handled, and False if not.
Args:
e: the KeyPressEvent from Qt.
Return:
True if event has been handled, False otherwise.
"""
log.keyboard.debug("Got special key 0x{:x} text {}".format(
e.key(), e.text()))
hintmanager = objreg.get('hintmanager', scope='tab',
window=self._win_id, tab='current')
if e.key() == Qt.Key_Backspace:
log.keyboard.debug("Got backspace, mode {}, filtertext '{}', "
"keystring '{}'".format(self._last_press,
self._filtertext,
self._keystring))
if self._last_press == LastPress.filtertext and self._filtertext:
self._filtertext = self._filtertext[:-1]
hintmanager.filter_hints(self._filtertext)
return True
elif self._last_press == LastPress.keystring and self._keystring:
self._keystring = self._keystring[:-1]
self.keystring_updated.emit(self._keystring)
if not self._keystring and self._filtertext:
# Switch back to hint filtering mode (this can happen only
# in numeric mode after the number has been deleted).
hintmanager.filter_hints(self._filtertext)
self._last_press = LastPress.filtertext
return True
else:
return super()._handle_special_key(e)
elif hintmanager.current_mode() != 'number':
return super()._handle_special_key(e)
elif not e.text():
return super()._handle_special_key(e)
else:
self._filtertext += e.text()
hintmanager.filter_hints(self._filtertext)
self._last_press = LastPress.filtertext
return True
def handle(self, e):
"""Handle a new keypress and call the respective handlers.
Args:
e: the KeyPressEvent from Qt
Returns:
True if the match has been handled, False otherwise.
"""
match = self._handle_single_key(e)
if match == self.Match.partial:
self.keystring_updated.emit(self._keystring)
self._last_press = LastPress.keystring
return True
elif match == self.Match.definitive:
self._last_press = LastPress.none
return True
elif match == self.Match.other:
pass
elif match == self.Match.none:
# We couldn't find a keychain so we check if it's a special key.
return self._handle_special_key(e)
else:
raise ValueError("Got invalid match type {}!".format(match))
def execute(self, cmdstr, keytype, count=None):
"""Handle a completed keychain."""
if not isinstance(keytype, self.Type):
raise TypeError("Type {} is no Type member!".format(keytype))
if keytype == self.Type.chain:
hintmanager = objreg.get('hintmanager', scope='tab',
window=self._win_id, tab='current')
hintmanager.handle_partial_key(cmdstr)
else:
# execute as command
super().execute(cmdstr, keytype, count)
def update_bindings(self, strings, preserve_filter=False):
"""Update bindings when the hint strings changed.
Args:
strings: A list of hint strings.
preserve_filter: Whether to keep the current value of
`self._filtertext`.
"""
self.bindings = {s: s for s in strings}
if not preserve_filter:
self._filtertext = ''
@pyqtSlot(str)
def on_keystring_updated(self, keystr):
"""Update hintmanager when the keystring was updated."""
hintmanager = objreg.get('hintmanager', scope='tab',
window=self._win_id, tab='current')
hintmanager.handle_partial_key(keystr)
class CaretKeyParser(keyparser.CommandKeyParser):
"""KeyParser for caret mode."""
passthrough = True
def __init__(self, win_id, parent=None):
super().__init__(win_id, parent, supports_count=True,
supports_chains=True)
self.read_config('caret')
class RegisterKeyParser(keyparser.CommandKeyParser):
"""KeyParser for modes that record a register key.
Attributes:
_mode: One of KeyMode.set_mark, KeyMode.jump_mark, KeyMode.record_macro
and KeyMode.run_macro.
"""
def __init__(self, win_id, mode, parent=None):
super().__init__(win_id, parent, supports_count=False,
supports_chains=False)
self._mode = mode
self.read_config('register')
def handle(self, e):
"""Override handle to always match the next key and use the register.
Args:
e: the KeyPressEvent from Qt.
Return:
True if event has been handled, False otherwise.
"""
if super().handle(e):
return True
key = e.text()
if key == '' or utils.keyevent_to_string(e) is None:
# this is not a proper register key, let it pass and keep going
return False
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
macro_recorder = objreg.get('macro-recorder')
try:
if self._mode == usertypes.KeyMode.set_mark:
tabbed_browser.set_mark(key)
elif self._mode == usertypes.KeyMode.jump_mark:
tabbed_browser.jump_mark(key)
elif self._mode == usertypes.KeyMode.record_macro:
macro_recorder.record_macro(key)
elif self._mode == usertypes.KeyMode.run_macro:
macro_recorder.run_macro(self._win_id, key)
else:
raise ValueError(
"{} is not a valid register mode".format(self._mode))
except (cmdexc.CommandMetaError, cmdexc.CommandError) as err:
message.error(str(err), stack=traceback.format_exc())
self.request_leave.emit(self._mode, "valid register key", True)
return True
@pyqtSlot(str)
def on_keyconfig_changed(self, mode):
"""RegisterKeyParser has no config section (no bindable keys)."""
pass
| gpl-3.0 | -2,268,448,873,351,734,500 | 35.109422 | 79 | 0.589731 | false |
botswana-harvard/bcpp-subject | bcpp_subject/admin/uncircumcised_admin.py | 1 | 1451 | from django.contrib import admin
from edc_base.modeladmin_mixins import audit_fieldset_tuple
from edc_base.fieldsets import Remove
from bcpp_visit_schedule.constants import T1, T2, T3
from ..admin_site import bcpp_subject_admin
from ..forms import UncircumcisedForm
from ..models import Uncircumcised
from .modeladmin_mixins import CrfModelAdminMixin
fields = ('circumcised', 'health_benefits_smc',
'future_circ', 'future_reasons_smc')
@admin.register(Uncircumcised, site=bcpp_subject_admin)
class UncircumcisedAdmin(CrfModelAdminMixin, admin.ModelAdmin):
form = UncircumcisedForm
conditional_fieldlists = {
T1: Remove(*fields),
T2: Remove(*fields),
T3: Remove(*fields),
}
fieldsets = (
(None, {
'fields': [
'subject_visit',
'circumcised',
'health_benefits_smc',
'reason_circ',
'reason_circ_other',
'future_circ',
'future_reasons_smc',
'service_facilities',
'aware_free',
]}), audit_fieldset_tuple)
radio_fields = {
'circumcised': admin.VERTICAL,
'reason_circ': admin.VERTICAL,
'future_circ': admin.VERTICAL,
'future_reasons_smc': admin.VERTICAL,
'service_facilities': admin.VERTICAL,
'aware_free': admin.VERTICAL}
filter_horizontal = ('health_benefits_smc',)
| gpl-3.0 | 4,346,488,993,507,111,400 | 29.229167 | 63 | 0.611992 | false |
postlund/home-assistant | homeassistant/components/fitbit/sensor.py | 3 | 19391 | """Support for the Fitbit API."""
import datetime
import logging
import os
import time
from fitbit import Fitbit
from fitbit.api import FitbitOauth2Client
from oauthlib.oauth2.rfc6749.errors import MismatchingStateError, MissingTokenError
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_UNIT_SYSTEM
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.util.json import load_json, save_json
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
ATTR_ACCESS_TOKEN = "access_token"
ATTR_REFRESH_TOKEN = "refresh_token"
ATTR_CLIENT_ID = "client_id"
ATTR_CLIENT_SECRET = "client_secret"
ATTR_LAST_SAVED_AT = "last_saved_at"
CONF_MONITORED_RESOURCES = "monitored_resources"
CONF_CLOCK_FORMAT = "clock_format"
ATTRIBUTION = "Data provided by Fitbit.com"
FITBIT_AUTH_CALLBACK_PATH = "/api/fitbit/callback"
FITBIT_AUTH_START = "/api/fitbit"
FITBIT_CONFIG_FILE = "fitbit.conf"
FITBIT_DEFAULT_RESOURCES = ["activities/steps"]
SCAN_INTERVAL = datetime.timedelta(minutes=30)
DEFAULT_CONFIG = {"client_id": "CLIENT_ID_HERE", "client_secret": "CLIENT_SECRET_HERE"}
FITBIT_RESOURCES_LIST = {
"activities/activityCalories": ["Activity Calories", "cal", "fire"],
"activities/calories": ["Calories", "cal", "fire"],
"activities/caloriesBMR": ["Calories BMR", "cal", "fire"],
"activities/distance": ["Distance", "", "map-marker"],
"activities/elevation": ["Elevation", "", "walk"],
"activities/floors": ["Floors", "floors", "walk"],
"activities/heart": ["Resting Heart Rate", "bpm", "heart-pulse"],
"activities/minutesFairlyActive": ["Minutes Fairly Active", "minutes", "walk"],
"activities/minutesLightlyActive": ["Minutes Lightly Active", "minutes", "walk"],
"activities/minutesSedentary": [
"Minutes Sedentary",
"minutes",
"seat-recline-normal",
],
"activities/minutesVeryActive": ["Minutes Very Active", "minutes", "run"],
"activities/steps": ["Steps", "steps", "walk"],
"activities/tracker/activityCalories": ["Tracker Activity Calories", "cal", "fire"],
"activities/tracker/calories": ["Tracker Calories", "cal", "fire"],
"activities/tracker/distance": ["Tracker Distance", "", "map-marker"],
"activities/tracker/elevation": ["Tracker Elevation", "", "walk"],
"activities/tracker/floors": ["Tracker Floors", "floors", "walk"],
"activities/tracker/minutesFairlyActive": [
"Tracker Minutes Fairly Active",
"minutes",
"walk",
],
"activities/tracker/minutesLightlyActive": [
"Tracker Minutes Lightly Active",
"minutes",
"walk",
],
"activities/tracker/minutesSedentary": [
"Tracker Minutes Sedentary",
"minutes",
"seat-recline-normal",
],
"activities/tracker/minutesVeryActive": [
"Tracker Minutes Very Active",
"minutes",
"run",
],
"activities/tracker/steps": ["Tracker Steps", "steps", "walk"],
"body/bmi": ["BMI", "BMI", "human"],
"body/fat": ["Body Fat", "%", "human"],
"body/weight": ["Weight", "", "human"],
"devices/battery": ["Battery", None, None],
"sleep/awakeningsCount": ["Awakenings Count", "times awaken", "sleep"],
"sleep/efficiency": ["Sleep Efficiency", "%", "sleep"],
"sleep/minutesAfterWakeup": ["Minutes After Wakeup", "minutes", "sleep"],
"sleep/minutesAsleep": ["Sleep Minutes Asleep", "minutes", "sleep"],
"sleep/minutesAwake": ["Sleep Minutes Awake", "minutes", "sleep"],
"sleep/minutesToFallAsleep": ["Sleep Minutes to Fall Asleep", "minutes", "sleep"],
"sleep/startTime": ["Sleep Start Time", None, "clock"],
"sleep/timeInBed": ["Sleep Time in Bed", "minutes", "hotel"],
}
FITBIT_MEASUREMENTS = {
"en_US": {
"duration": "ms",
"distance": "mi",
"elevation": "ft",
"height": "in",
"weight": "lbs",
"body": "in",
"liquids": "fl. oz.",
"blood glucose": "mg/dL",
"battery": "",
},
"en_GB": {
"duration": "milliseconds",
"distance": "kilometers",
"elevation": "meters",
"height": "centimeters",
"weight": "stone",
"body": "centimeters",
"liquids": "milliliters",
"blood glucose": "mmol/L",
"battery": "",
},
"metric": {
"duration": "milliseconds",
"distance": "kilometers",
"elevation": "meters",
"height": "centimeters",
"weight": "kilograms",
"body": "centimeters",
"liquids": "milliliters",
"blood glucose": "mmol/L",
"battery": "",
},
}
BATTERY_LEVELS = {"High": 100, "Medium": 50, "Low": 20, "Empty": 0}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_MONITORED_RESOURCES, default=FITBIT_DEFAULT_RESOURCES
): vol.All(cv.ensure_list, [vol.In(FITBIT_RESOURCES_LIST)]),
vol.Optional(CONF_CLOCK_FORMAT, default="24H"): vol.In(["12H", "24H"]),
vol.Optional(CONF_UNIT_SYSTEM, default="default"): vol.In(
["en_GB", "en_US", "metric", "default"]
),
}
)
def request_app_setup(hass, config, add_entities, config_path, discovery_info=None):
"""Assist user with configuring the Fitbit dev application."""
configurator = hass.components.configurator
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
config_path = hass.config.path(FITBIT_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
error_msg = (
"You didn't correctly modify fitbit.conf",
" please try again",
)
configurator.notify_errors(_CONFIGURING["fitbit"], error_msg)
else:
setup_platform(hass, config, add_entities, discovery_info)
else:
setup_platform(hass, config, add_entities, discovery_info)
start_url = f"{hass.config.api.base_url}{FITBIT_AUTH_CALLBACK_PATH}"
description = """Please create a Fitbit developer app at
https://dev.fitbit.com/apps/new.
For the OAuth 2.0 Application Type choose Personal.
Set the Callback URL to {}.
They will provide you a Client ID and secret.
These need to be saved into the file located at: {}.
Then come back here and hit the below button.
""".format(
start_url, config_path
)
submit = "I have saved my Client ID and Client Secret into fitbit.conf."
_CONFIGURING["fitbit"] = configurator.request_config(
"Fitbit",
fitbit_configuration_callback,
description=description,
submit_caption=submit,
description_image="/static/images/config_fitbit_app.png",
)
def request_oauth_completion(hass):
"""Request user complete Fitbit OAuth2 flow."""
configurator = hass.components.configurator
if "fitbit" in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING["fitbit"], "Failed to register, please try again."
)
return
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
start_url = f"{hass.config.api.base_url}{FITBIT_AUTH_START}"
description = f"Please authorize Fitbit by visiting {start_url}"
_CONFIGURING["fitbit"] = configurator.request_config(
"Fitbit",
fitbit_configuration_callback,
description=description,
submit_caption="I have authorized Fitbit.",
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fitbit sensor."""
config_path = hass.config.path(FITBIT_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
request_app_setup(
hass, config, add_entities, config_path, discovery_info=None
)
return False
else:
save_json(config_path, DEFAULT_CONFIG)
request_app_setup(hass, config, add_entities, config_path, discovery_info=None)
return False
if "fitbit" in _CONFIGURING:
hass.components.configurator.request_done(_CONFIGURING.pop("fitbit"))
access_token = config_file.get(ATTR_ACCESS_TOKEN)
refresh_token = config_file.get(ATTR_REFRESH_TOKEN)
expires_at = config_file.get(ATTR_LAST_SAVED_AT)
if None not in (access_token, refresh_token):
authd_client = Fitbit(
config_file.get(ATTR_CLIENT_ID),
config_file.get(ATTR_CLIENT_SECRET),
access_token=access_token,
refresh_token=refresh_token,
expires_at=expires_at,
refresh_cb=lambda x: None,
)
if int(time.time()) - expires_at > 3600:
authd_client.client.refresh_token()
unit_system = config.get(CONF_UNIT_SYSTEM)
if unit_system == "default":
authd_client.system = authd_client.user_profile_get()["user"]["locale"]
if authd_client.system != "en_GB":
if hass.config.units.is_metric:
authd_client.system = "metric"
else:
authd_client.system = "en_US"
else:
authd_client.system = unit_system
dev = []
registered_devs = authd_client.get_devices()
clock_format = config.get(CONF_CLOCK_FORMAT)
for resource in config.get(CONF_MONITORED_RESOURCES):
# monitor battery for all linked FitBit devices
if resource == "devices/battery":
for dev_extra in registered_devs:
dev.append(
FitbitSensor(
authd_client,
config_path,
resource,
hass.config.units.is_metric,
clock_format,
dev_extra,
)
)
else:
dev.append(
FitbitSensor(
authd_client,
config_path,
resource,
hass.config.units.is_metric,
clock_format,
)
)
add_entities(dev, True)
else:
oauth = FitbitOauth2Client(
config_file.get(ATTR_CLIENT_ID), config_file.get(ATTR_CLIENT_SECRET)
)
redirect_uri = "{}{}".format(
hass.config.api.base_url, FITBIT_AUTH_CALLBACK_PATH
)
fitbit_auth_start_url, _ = oauth.authorize_token_url(
redirect_uri=redirect_uri,
scope=[
"activity",
"heartrate",
"nutrition",
"profile",
"settings",
"sleep",
"weight",
],
)
hass.http.register_redirect(FITBIT_AUTH_START, fitbit_auth_start_url)
hass.http.register_view(FitbitAuthCallbackView(config, add_entities, oauth))
request_oauth_completion(hass)
class FitbitAuthCallbackView(HomeAssistantView):
"""Handle OAuth finish callback requests."""
requires_auth = False
url = FITBIT_AUTH_CALLBACK_PATH
name = "api:fitbit:callback"
def __init__(self, config, add_entities, oauth):
"""Initialize the OAuth callback view."""
self.config = config
self.add_entities = add_entities
self.oauth = oauth
@callback
def get(self, request):
"""Finish OAuth callback request."""
hass = request.app["hass"]
data = request.query
response_message = """Fitbit has been successfully authorized!
You can close this window now!"""
result = None
if data.get("code") is not None:
redirect_uri = "{}{}".format(
hass.config.api.base_url, FITBIT_AUTH_CALLBACK_PATH
)
try:
result = self.oauth.fetch_access_token(data.get("code"), redirect_uri)
except MissingTokenError as error:
_LOGGER.error("Missing token: %s", error)
response_message = """Something went wrong when
attempting authenticating with Fitbit. The error
encountered was {}. Please try again!""".format(
error
)
except MismatchingStateError as error:
_LOGGER.error("Mismatched state, CSRF error: %s", error)
response_message = """Something went wrong when
attempting authenticating with Fitbit. The error
encountered was {}. Please try again!""".format(
error
)
else:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Fitbit.
An unknown error occurred. Please try again!
"""
if result is None:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Fitbit.
An unknown error occurred. Please try again!
"""
html_response = """<html><head><title>Fitbit Auth</title></head>
<body><h1>{}</h1></body></html>""".format(
response_message
)
if result:
config_contents = {
ATTR_ACCESS_TOKEN: result.get("access_token"),
ATTR_REFRESH_TOKEN: result.get("refresh_token"),
ATTR_CLIENT_ID: self.oauth.client_id,
ATTR_CLIENT_SECRET: self.oauth.client_secret,
ATTR_LAST_SAVED_AT: int(time.time()),
}
save_json(hass.config.path(FITBIT_CONFIG_FILE), config_contents)
hass.async_add_job(setup_platform, hass, self.config, self.add_entities)
return html_response
class FitbitSensor(Entity):
"""Implementation of a Fitbit sensor."""
def __init__(
self, client, config_path, resource_type, is_metric, clock_format, extra=None
):
"""Initialize the Fitbit sensor."""
self.client = client
self.config_path = config_path
self.resource_type = resource_type
self.is_metric = is_metric
self.clock_format = clock_format
self.extra = extra
self._name = FITBIT_RESOURCES_LIST[self.resource_type][0]
if self.extra:
self._name = "{0} Battery".format(self.extra.get("deviceVersion"))
unit_type = FITBIT_RESOURCES_LIST[self.resource_type][1]
if unit_type == "":
split_resource = self.resource_type.split("/")
try:
measurement_system = FITBIT_MEASUREMENTS[self.client.system]
except KeyError:
if self.is_metric:
measurement_system = FITBIT_MEASUREMENTS["metric"]
else:
measurement_system = FITBIT_MEASUREMENTS["en_US"]
unit_type = measurement_system[split_resource[-1]]
self._unit_of_measurement = unit_type
self._state = 0
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self.resource_type == "devices/battery" and self.extra:
battery_level = BATTERY_LEVELS[self.extra.get("battery")]
return icon_for_battery_level(battery_level=battery_level, charging=None)
return "mdi:{}".format(FITBIT_RESOURCES_LIST[self.resource_type][2])
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
if self.extra:
attrs["model"] = self.extra.get("deviceVersion")
attrs["type"] = self.extra.get("type").lower()
return attrs
def update(self):
"""Get the latest data from the Fitbit API and update the states."""
if self.resource_type == "devices/battery" and self.extra:
self._state = self.extra.get("battery")
else:
container = self.resource_type.replace("/", "-")
response = self.client.time_series(self.resource_type, period="7d")
raw_state = response[container][-1].get("value")
if self.resource_type == "activities/distance":
self._state = format(float(raw_state), ".2f")
elif self.resource_type == "activities/tracker/distance":
self._state = format(float(raw_state), ".2f")
elif self.resource_type == "body/bmi":
self._state = format(float(raw_state), ".1f")
elif self.resource_type == "body/fat":
self._state = format(float(raw_state), ".1f")
elif self.resource_type == "body/weight":
self._state = format(float(raw_state), ".1f")
elif self.resource_type == "sleep/startTime":
if raw_state == "":
self._state = "-"
elif self.clock_format == "12H":
hours, minutes = raw_state.split(":")
hours, minutes = int(hours), int(minutes)
setting = "AM"
if hours > 12:
setting = "PM"
hours -= 12
elif hours == 0:
hours = 12
self._state = f"{hours}:{minutes:02d} {setting}"
else:
self._state = raw_state
else:
if self.is_metric:
self._state = raw_state
else:
try:
self._state = "{0:,}".format(int(raw_state))
except TypeError:
self._state = raw_state
if self.resource_type == "activities/heart":
self._state = response[container][-1].get("value").get("restingHeartRate")
token = self.client.client.session.token
config_contents = {
ATTR_ACCESS_TOKEN: token.get("access_token"),
ATTR_REFRESH_TOKEN: token.get("refresh_token"),
ATTR_CLIENT_ID: self.client.client.client_id,
ATTR_CLIENT_SECRET: self.client.client.client_secret,
ATTR_LAST_SAVED_AT: int(time.time()),
}
save_json(self.config_path, config_contents)
| apache-2.0 | -3,180,641,553,304,962,000 | 36.290385 | 88 | 0.569646 | false |
bigfatnoob/DISCAW | Models/usp05.py | 1 | 7537 | """
# The USP05 Data Set
Standard header:
"""
from __future__ import division,print_function
import sys
sys.dont_write_bytecode = True
from lib import *
"""
@attribute ObjType {FT,PJ,RQ}
@attribute IntComplx {5.0,2.0,1.0,4.0,3.0,3.5,2.5,4.5,NULL}
@attribute DataFile {18.0,9.0,7.0,12.0,2.0,5.0,4.0,3.0,1.0,11.0,0.0,75.0,13.0,6.0,8.0,NULL,32.0}
@attribute DataEn {94.0,240.0,15.0,90.0,314.0,1.0,4.0,3.0,2.0,6.0,0.0,20.0,60.0,30.0,5.0,17.0,10.0,7.0,45.0,48.0,12.0,83.0,150.0,36.0,186.0,9.0,11.0,52.0,25.0,14.0,8.0,NULL,50.0,13.0}
@attribute DataOut {NULL,0.0,1.0,2.0,4.0,20.0,5.0,50.0,12.0,76.0,6.0,69.0,200.0,34.0,108.0,9.0,3.0,8.0,7.0,10.0,18.0,16.0,17.0,13.0,14.0,11.0}
@attribute UFP {NULL,0.0,2.0,3.0,4.0,50.0,46.0,66.0,48.0,36.0,44.0,14.0,8.0,10.0,20.0,25.0,35.0,1.0,6.0,49.0,19.0,64.0,55.0,30.0,180.0,190.0,250.0,1085.0,510.0,210.0,1714.0,11.0,5.0,7.0,17.0,27.0,34.0,154.0,18.0,321.0,90.0,75.0,60.0,40.0,95.0,29.0,23.0,15.0,32.0,31.0,26.0,37.0,12.0,16.0,224.0,22.0,235.0,59.0,147.0,153.0,166.0,137.0,33.0,56.0,57.0,76.0,104.0,105.0}
@attribute AppExpr numeric
@attribute Effort numeric
Data:
"""
def usp05(weighFeature = False,
split="variance"):
vl=1;l=2;n=3;h=4;vh=5;xh=6;_=0;
FT=0;PJ=1;RQ=2;NULL=0;
return data(indep= [
# 0..6
'ObjType','IntComplx','DataFile','DataEn','DataOut','UFP','AppExpr'],
less = ['effort'],
_rows=[
[FT,5,18,94,NULL,NULL,4,2.5],
[FT,5,9,240,NULL,NULL,4,2.5],
[FT,2,9,15,0,0,4,2],
[FT,2,9,15,0,0,4,2],
[FT,2,9,15,0,0,5,3.5],
[FT,1,7,90,0,0,4,2],
[FT,2,9,90,0,0,5,2],
[FT,2,9,90,0,0,5,2],
[FT,5,12,314,0,0,5,16],
[FT,2,2,1,1,2,2,1],
[FT,1,2,4,1,0,1,2],
[FT,1,2,4,1,0,1,1],
[FT,4,2,3,1,0,3,5],
[FT,1,2,1,1,0,2,2],
[FT,1,2,1,1,0,2,2],
[FT,1,2,1,1,0,3,3],
[FT,2,5,2,2,0,2,7],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,4,4,1,0,2,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,4,4,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,4,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,4,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,3,1,3,1,1],
[FT,1,2,2,1,3,1,1],
[FT,5,3,1,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,3,1,3,1,1],
[FT,1,2,2,1,3,1,1],
[FT,5,4,1,1,0,5,1],
[FT,1,2,1,1,0,2,1],
[FT,1,4,1,1,0,2,1],
[FT,4,4,6,1,4,4,1],
[FT,1,4,4,1,0,3,1],
[FT,2,4,2,1,0,3,1],
[FT,3,3,2,1,50,2,40],
[FT,2,3,1,1,46,2,40],
[FT,3,1,2,4,66,2,20],
[FT,3,2,1,2,48,2,20],
[FT,2,2,1,1,36,2,10],
[FT,4,2,3,1,44,2,20],
[FT,2,7,3,2,14,2,8],
[FT,3,2,2,1,8,4,3],
[FT,2,2,3,1,10,1,3],
[FT,2,12,0,0,10,1,6],
[FT,4,1,20,20,20,1,10],
[FT,3,5,20,5,25,2,6],
[FT,4,11,60,50,35,1,12],
[FT,1,4,30,12,20,3,8],
[FT,1,0,0,0,1,5,0.5],
[FT,1,0,0,0,1,4,1],
[FT,2,3,2,1,6,1,24],
[FT,1,2,2,0,4,4,0.5],
[FT,1,2,2,0,4,4,0.5],
[FT,1,2,1,0,4,4,0.5],
[FT,1,2,0,2,6,4,0.5],
[FT,3,0,15,1,49,4,24],
[FT,2,0,5,1,19,4,8],
[FT,3,0,20,1,64,4,20],
[FT,2,0,17,1,55,4,4],
[FT,4,0,10,0,30,4,30],
[FT,3,0,7,1,25,4,8],
[FT,3,0,45,0,180,5,5],
[PJ,4,75,48,76,190,4,75],
[PJ,3,13,12,6,250,2,220],
[PJ,3,7,83,69,1085,3,400],
[PJ,3,12,150,200,510,2,100],
[PJ,2,5,36,34,210,4,70],
[PJ,3,12,186,108,1714,3,69],
[RQ,3,5,4,2,10,5,2.5],
[RQ,3,5,4,2,10,5,2.5],
[RQ,3,4,0,9,10,5,2],
[RQ,3,3,7,4,11,5,1.5],
[RQ,2,3,3,2,4,5,2],
[RQ,4,6,6,2,5,5,2.5],
[RQ,3,4,4,4,2,5,2.5],
[RQ,1,9,15,0,0,5,2],
[RQ,1,9,15,0,0,5,1],
[RQ,1,9,15,0,0,5,1],
[RQ,1,9,15,0,0,5,0.5],
[RQ,3,8,1,1,14,3,7],
[RQ,3,8,4,1,14,3,5],
[RQ,3,3,1,1,6,3,15],
[RQ,3,2,3,1,4,2,2],
[RQ,3,3,2,1,8,2,8],
[RQ,1,2,1,1,7,1,2],
[RQ,1,2,1,1,7,1,2],
[RQ,4,5,9,1,8,3,11],
[RQ,4,5,11,1,8,3,11],
[RQ,2,3,2,6,7,2,5],
[RQ,2,3,2,6,8,2,3],
[RQ,3,4,1,4,7,2,3],
[RQ,3,3,9,1,8,3,2],
[RQ,3,3,11,1,5,3,2],
[RQ,2,2,4,1,5,3,2],
[RQ,3,2,4,1,5,2,2],
[RQ,2,3,1,5,17,2,3],
[RQ,5,4,10,3,27,5,20],
[RQ,3,8,2,2,5,3,5],
[RQ,1,1,1,1,0,1,1],
[RQ,1,2,1,5,2,2,1],
[RQ,1,1,1,8,0,1,1],
[RQ,5,1,3,1,34,2,20],
[RQ,2,2,1,1,36,2,10],
[RQ,4,13,3,1,154,2,30],
[RQ,2,1,2,0,18,2,10],
[RQ,3.5,6,52,7,321,3.5,20],
[RQ,2.5,3,4,1,14,1,15],
[RQ,3.5,4,5,10,30,1,20],
[RQ,3.5,2,3,1,14,1,20],
[RQ,3.5,2,30,18,90,2,15],
[RQ,4,2,25,16,75,1,15],
[RQ,4.5,5,7,5,30,1,40],
[RQ,2,2,3,2,10,1,3],
[RQ,4,2,25,16,75,1,15],
[RQ,3,2,3,1,14,1,20],
[RQ,4,4,25,12,50,4,10],
[RQ,2,2,20,10,60,2,6],
[RQ,3,1,14,8,40,3,8],
[RQ,3,1,8,10,35,3,8],
[RQ,4,12,2,20,95,1,12],
[RQ,2,2,4,10,30,2,10],
[RQ,2,3,1,1,5,4,8],
[RQ,1,0,0,0,1,4,2],
[RQ,1,1,0,0,2,5,1],
[RQ,1,0,0,0,1,5,1.5],
[RQ,5,3,17,17,29,5,25],
[RQ,5,3,17,17,29,5,9],
[RQ,4,1,5,2,10,5,15],
[RQ,3,3,17,17,23,5,2],
[RQ,3,0,3,3,4,2,5],
[RQ,5,2,2,1,4,5,45],
[RQ,4,3,11,1,19,5,35],
[RQ,5,3,4,4,14,5,50],
[RQ,5,2,2,2,5,5,25],
[RQ,5,1,3,3,10,5,35],
[RQ,4,2,2,2,7,5,20],
[RQ,3,3,9,4,20,5,25],
[RQ,3,3,1,1,6,4,10],
[RQ,2,3,2,1,6,4,33],
[RQ,4,3,8,1,14,4,24],
[RQ,4,3,9,1,15,4,36],
[RQ,1,1,1,0,6,4,1],
[RQ,1,1,2,0,4,4,1],
[RQ,4,0,4,2,4,4,1],
[RQ,3,2,4,10,32,4,2],
[RQ,3,3,12,4,31,4,2],
[RQ,5,4,9,6,26,4,2],
[RQ,2,1,9,9,23,4,1],
[RQ,1,1,9,9,37,4,1],
[RQ,1,1,12,0,18,4,1],
[RQ,2,1,1,0,20,4,1],
[RQ,2,1,12,0,36,4,1],
[RQ,3,2,1,0,4,4,1],
[RQ,3,2,1,0,4,4,1],
[RQ,2,2,10,0,12,4,1],
[RQ,2,2,10,10,10,4,1],
[RQ,3,1,12,12,10,4,1],
[RQ,1,0,0,0,6,4,0.5],
[RQ,1,0,0,12,8,4,0.5],
[RQ,NULL,NULL,NULL,NULL,NULL,4,8],
[RQ,2,0,4,1,16,4,6],
[RQ,2,0,5,1,19,4,6],
[RQ,4,0,5,1,19,4,4],
[RQ,2,0,1,1,7,4,1],
[RQ,1,1,3,0,16,1,4],
[RQ,2,0,1,0,3,4,6],
[RQ,4,32,0,0,224,1,12],
[RQ,3,NULL,NULL,NULL,NULL,1,6],
[RQ,1,1,10,0,7,5,6],
[RQ,2,0,6,1,22,4,4],
[RQ,2,0,6,1,22,4,4],
[RQ,2,3,50,1,235,3,7],
[RQ,2,1,3,1,27,3,2],
[RQ,3,3,6,1,59,3,3],
[RQ,2,1,2,1,23,3,3],
[RQ,2,3,13,13,147,3,4],
[RQ,3,4,12,13,153,3,5],
[RQ,4,4,14,14,166,3,6],
[RQ,2,2,13,13,137,3,2],
[RQ,3,2,2,1,33,3,6],
[RQ,2,1,4,1,31,3,2],
[RQ,1,1,4,4,46,3,1],
[RQ,3,2,4,4,56,3,4],
[RQ,4,3,3,3,57,3,4],
[RQ,3,2,4,8,76,3,3],
[RQ,1,2,1,1,29,3,2],
[RQ,3,3,6,10,104,3,5],
[RQ,2,1,0,8,50,3,3],
[RQ,1,5,0,11,105,2,0.5]
],
_tunings =[[
# vlow low nom high vhigh xhigh
#scale factors:
'Prec', 6.20, 4.96, 3.72, 2.48, 1.24, _ ],[
'Flex', 5.07, 4.05, 3.04, 2.03, 1.01, _ ],[
'Resl', 7.07, 5.65, 4.24, 2.83, 1.41, _ ],[
'Pmat', 7.80, 6.24, 4.68, 3.12, 1.56, _ ],[
'Team', 5.48, 4.38, 3.29, 2.19, 1.01, _ ]],
weighFeature = weighFeature,
_split = split,
_isCocomo = False
)
"""
Demo code:
"""
def _usp05(): print(usp05())
#if __name__ == '__main__': eval(todo('_nasa93()')) | mit | -4,062,622,610,972,723,700 | 28.794466 | 366 | 0.431737 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/plat-unixware7/FCNTL.py | 1 | 1277 | # Generated by h2py from /usr/include/sys/fcntl.h
# Included from sys/types.h
def quad_low(x): return x.val[0]
ADT_EMASKSIZE = 8
SHRT_MIN = -32768
SHRT_MAX = 32767
INT_MIN = (-2147483647-1)
INT_MAX = 2147483647
LONG_MIN = (-2147483647-1)
LONG_MAX = 2147483647
OFF32_MAX = LONG_MAX
ISTAT_ASSERTED = 0
ISTAT_ASSUMED = 1
ISTAT_NONE = 2
OFF_MAX = OFF32_MAX
CLOCK_MAX = LONG_MAX
P_MYID = (-1)
P_MYHOSTID = (-1)
# Included from sys/select.h
FD_SETSIZE = 4096
NBBY = 8
NULL = 0
O_RDONLY = 0
O_WRONLY = 1
O_RDWR = 2
O_NDELAY = 0x04
O_APPEND = 0x08
O_SYNC = 0x10
O_NONBLOCK = 0x80
O_LARGEFILE = 0x80000
O_CREAT = 0x100
O_TRUNC = 0x200
O_EXCL = 0x400
O_NOCTTY = 0x800
F_DUPFD = 0
F_GETFD = 1
F_SETFD = 2
F_GETFL = 3
F_SETFL = 4
F_GETLK = 14
F_O_GETLK = 5
F_GETLK = 5
F_GETLK = 14
F_SETLK = 6
F_SETLKW = 7
F_CHKFL = 8
F_ALLOCSP = 10
F_FREESP = 11
F_RSETLK = 20
F_RGETLK = 21
F_RSETLKW = 22
F_GETOWN = 23
F_SETOWN = 24
F_DUP2 = 25
F_GETLK64 = 100
F_SETLKW64 = 101
F_SETLK64 = 102
F_RSETLK64 = 103
F_RGETLK64 = 104
F_RSETLKW64 = 105
F_FREESP64 = 106
F_RDCHK = 0x6001
F_GETLK = F_GETLK64
F_SETLKW = F_SETLKW64
F_SETLK = F_SETLK64
F_RSETLK = F_RSETLK64
F_RGETLK = F_RGETLK64
F_RSETLKW = F_RSETLKW64
F_FREESP = F_FREESP64
F_RDLCK = 01
F_WRLCK = 02
F_UNLCK = 03
O_ACCMODE = 3
FD_CLOEXEC = 1
| mit | 7,352,615,238,216,990,000 | 15.584416 | 49 | 0.675803 | false |
catapult-project/catapult-csm | telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr6/module.py | 23 | 18075 |
ds = {
'name': [
list(1)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
+ list(2)
]
}
import os
| bsd-3-clause | 3,110,521,482,160,537,600 | 16.913776 | 17 | 0.277953 | false |
alvin319/CarnotKE | jyhton/lib-python/2.7/test/string_tests.py | 7 | 63425 | """
Common tests shared by test_str, test_unicode, test_userstring and test_string.
"""
import unittest, string, sys, struct
from test import test_support
from UserList import UserList
import _testcapi
class Sequence:
def __init__(self, seq='wxyz'): self.seq = seq
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
class BadSeq1(Sequence):
def __init__(self): self.seq = [7, 'hello', 123L]
class BadSeq2(Sequence):
def __init__(self): self.seq = ['a', 'b', 'c']
def __len__(self): return 8
class CommonTest(unittest.TestCase):
# This testcase contains test that can be used in all
# stringlike classes. Currently this is str, unicode
# UserString and the string module.
# The type to be tested
# Change in subclasses to change the behaviour of fixtesttype()
type2test = None
# All tests pass their arguments to the testing methods
# as str objects. fixtesttype() can be used to propagate
# these arguments to the appropriate type
def fixtype(self, obj):
if isinstance(obj, str):
return self.__class__.type2test(obj)
elif isinstance(obj, list):
return [self.fixtype(x) for x in obj]
elif isinstance(obj, tuple):
return tuple([self.fixtype(x) for x in obj])
elif isinstance(obj, dict):
return dict([
(self.fixtype(key), self.fixtype(value))
for (key, value) in obj.iteritems()
])
else:
return obj
# check that object.method(*args) returns result
def checkequal(self, result, object, methodname, *args):
result = self.fixtype(result)
object = self.fixtype(object)
args = self.fixtype(args)
realresult = getattr(object, methodname)(*args)
self.assertEqual(
result,
realresult
)
# if the original is returned make sure that
# this doesn't happen with subclasses
if object == realresult:
class subtype(self.__class__.type2test):
pass
object = subtype(object)
realresult = getattr(object, methodname)(*args)
self.assertTrue(object is not realresult)
# check that object.method(*args) raises exc
def checkraises(self, exc, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
self.assertRaises(
exc,
getattr(object, methodname),
*args
)
# call object.method(*args) without any checks
def checkcall(self, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
getattr(object, methodname)(*args)
def test_hash(self):
# SF bug 1054139: += optimization was not invalidating cached hash value
a = self.type2test('DNSSEC')
b = self.type2test('')
for c in a:
b += c
hash(b)
self.assertEqual(hash(a), hash(b))
def test_capitalize(self):
self.checkequal(' hello ', ' hello ', 'capitalize')
self.checkequal('Hello ', 'Hello ','capitalize')
self.checkequal('Hello ', 'hello ','capitalize')
self.checkequal('Aaaa', 'aaaa', 'capitalize')
self.checkequal('Aaaa', 'AaAa', 'capitalize')
self.checkraises(TypeError, 'hello', 'capitalize', 42)
def test_count(self):
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(2, 'aaa', 'count', 'a', 1)
self.checkequal(0, 'aaa', 'count', 'a', 10)
self.checkequal(1, 'aaa', 'count', 'a', -1)
self.checkequal(3, 'aaa', 'count', 'a', -10)
self.checkequal(1, 'aaa', 'count', 'a', 0, 1)
self.checkequal(3, 'aaa', 'count', 'a', 0, 10)
self.checkequal(2, 'aaa', 'count', 'a', 0, -1)
self.checkequal(0, 'aaa', 'count', 'a', 0, -10)
self.checkequal(3, 'aaa', 'count', '', 1)
self.checkequal(1, 'aaa', 'count', '', 3)
self.checkequal(0, 'aaa', 'count', '', 10)
self.checkequal(2, 'aaa', 'count', '', -1)
self.checkequal(4, 'aaa', 'count', '', -10)
self.checkequal(1, '', 'count', '')
self.checkequal(0, '', 'count', '', 1, 1)
self.checkequal(0, '', 'count', '', sys.maxint, 0)
self.checkequal(0, '', 'count', 'xx')
self.checkequal(0, '', 'count', 'xx', 1, 1)
self.checkequal(0, '', 'count', 'xx', sys.maxint, 0)
self.checkraises(TypeError, 'hello', 'count')
self.checkraises(TypeError, 'hello', 'count', 42)
# For a variety of combinations,
# verify that str.count() matches an equivalent function
# replacing all occurrences and then differencing the string lengths
charset = ['', 'a', 'b']
digits = 7
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
n = len(i)
for j in teststrings:
r1 = i.count(j)
if j:
r2, rem = divmod(n - len(i.replace(j, '')), len(j))
else:
r2, rem = len(i)+1, 0
if rem or r1 != r2:
self.assertEqual(rem, 0, '%s != 0 for %s' % (rem, i))
self.assertEqual(r1, r2, '%s != %s for %s' % (r1, r2, i))
def test_find(self):
self.checkequal(0, 'abcdefghiabc', 'find', 'abc')
self.checkequal(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequal(-1, 'abcdefghiabc', 'find', 'def', 4)
self.checkequal(0, 'abc', 'find', '', 0)
self.checkequal(3, 'abc', 'find', '', 3)
self.checkequal(-1, 'abc', 'find', '', 4)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'find', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'find')
self.checkraises(TypeError, 'hello', 'find', 42)
self.checkequal(0, '', 'find', '')
self.checkequal(-1, '', 'find', '', 1, 1)
self.checkequal(-1, '', 'find', '', sys.maxint, 0)
self.checkequal(-1, '', 'find', 'xx')
self.checkequal(-1, '', 'find', 'xx', 1, 1)
self.checkequal(-1, '', 'find', 'xx', sys.maxint, 0)
# issue 7458
self.checkequal(-1, 'ab', 'find', 'xxx', sys.maxsize + 1, 0)
# For a variety of combinations,
# verify that str.find() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
for j in teststrings:
loc = i.find(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
def test_rfind(self):
self.checkequal(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequal(12, 'abcdefghiabc', 'rfind', '')
self.checkequal(0, 'abcdefghiabc', 'rfind', 'abcd')
self.checkequal(-1, 'abcdefghiabc', 'rfind', 'abcz')
self.checkequal(3, 'abc', 'rfind', '', 0)
self.checkequal(3, 'abc', 'rfind', '', 3)
self.checkequal(-1, 'abc', 'rfind', '', 4)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'rfind', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rfind', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rfind')
self.checkraises(TypeError, 'hello', 'rfind', 42)
# For a variety of combinations,
# verify that str.rfind() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
for j in teststrings:
loc = i.rfind(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], self.fixtype(j))
# issue 7458
self.checkequal(-1, 'ab', 'rfind', 'xxx', sys.maxsize + 1, 0)
def test_index(self):
self.checkequal(0, 'abcdefghiabc', 'index', '')
self.checkequal(3, 'abcdefghiabc', 'index', 'def')
self.checkequal(0, 'abcdefghiabc', 'index', 'abc')
self.checkequal(9, 'abcdefghiabc', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghiabc', 'index', 'hib')
self.checkraises(ValueError, 'abcdefghiab', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', 8)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', -1)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'index', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'index')
self.checkraises(TypeError, 'hello', 'index', 42)
def test_rindex(self):
self.checkequal(12, 'abcdefghiabc', 'rindex', '')
self.checkequal(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequal(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequal(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghiabc', 'rindex', 'hib')
self.checkraises(ValueError, 'defghiabc', 'rindex', 'def', 1)
self.checkraises(ValueError, 'defghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, 8)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, -1)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'rindex', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rindex', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rindex')
self.checkraises(TypeError, 'hello', 'rindex', 42)
def test_lower(self):
self.checkequal('hello', 'HeLLo', 'lower')
self.checkequal('hello', 'hello', 'lower')
self.checkraises(TypeError, 'hello', 'lower', 42)
def test_upper(self):
self.checkequal('HELLO', 'HeLLo', 'upper')
self.checkequal('HELLO', 'HELLO', 'upper')
self.checkraises(TypeError, 'hello', 'upper', 42)
def test_expandtabs(self):
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\r\nab\r\ndef\ng\r\nhi', 'abc\r\nab\r\ndef\ng\r\nhi', 'expandtabs', 4)
self.checkequal(' a\n b', ' \ta\n\tb', 'expandtabs', 1)
self.checkraises(TypeError, 'hello', 'expandtabs', 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxint < (1 << 32) and struct.calcsize('P') == 4:
self.checkraises(OverflowError,
'\ta\n\tb', 'expandtabs', sys.maxint)
def test_split(self):
self.checkequal(['this', 'is', 'the', 'split', 'function'],
'this is the split function', 'split')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'split')
self.checkequal(['a', 'b c d'], 'a b c d', 'split', None, 1)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None,
sys.maxint-1)
self.checkequal(['a b c d'], 'a b c d', 'split', None, 0)
self.checkequal(['a b c d'], ' a b c d', 'split', None, 0)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal([], ' ', 'split')
self.checkequal(['a'], ' a ', 'split')
self.checkequal(['a', 'b'], ' a b ', 'split')
self.checkequal(['a', 'b '], ' a b ', 'split', None, 1)
self.checkequal(['a', 'b c '], ' a b c ', 'split', None, 1)
self.checkequal(['a', 'b', 'c '], ' a b c ', 'split', None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'split')
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'split')
self.checkequal(['a'] + [aaa[4:]], aaa, 'split', None, 1)
self.checkequal(['a']*19 + ['a '], aaa, 'split', None, 19)
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|')
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', 'b|c|d'], 'a|b|c|d', 'split', '|', 1)
self.checkequal(['a', 'b', 'c|d'], 'a|b|c|d', 'split', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|',
sys.maxint-2)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', '', 'b||c||d'], 'a||b||c||d', 'split', '|', 2)
self.checkequal(['endcase ', ''], 'endcase |', 'split', '|')
self.checkequal(['', ' startcase'], '| startcase', 'split', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'split', '|')
self.checkequal(['a', '', 'b\x00c\x00d'], 'a\x00\x00b\x00c\x00d', 'split', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'split', '|')
self.checkequal(['a']*15 +['a|a|a|a|a'],
('a|'*20)[:-1], 'split', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequal(['a', 'b//c//d'], 'a//b//c//d', 'split', '//', 1)
self.checkequal(['a', 'b', 'c//d'], 'a//b//c//d', 'split', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//',
sys.maxint-10)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'split', '//', 0)
self.checkequal(['a', '', 'b////c////d'], 'a////b////c////d', 'split', '//', 2)
self.checkequal(['endcase ', ''], 'endcase test', 'split', 'test')
self.checkequal(['', ' begincase'], 'test begincase', 'split', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'split', 'test')
self.checkequal(['a', 'bc'], 'abbbc', 'split', 'bb')
self.checkequal(['', ''], 'aaa', 'split', 'aaa')
self.checkequal(['aaa'], 'aaa', 'split', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'split', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'split', 'aab')
self.checkequal([''], '', 'split', 'aaa')
self.checkequal(['aa'], 'aa', 'split', 'aaa')
self.checkequal(['A', 'bobb'], 'Abbobbbobb', 'split', 'bbobb')
self.checkequal(['A', 'B', ''], 'AbbobbBbbobb', 'split', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH', 19)
self.checkequal(['a']*18 + ['aBLAHa'], ('aBLAH'*20)[:-4],
'split', 'BLAH', 18)
# mixed use of str and unicode
self.checkequal([u'a', u'b', u'c d'], 'a b c d', 'split', u' ', 2)
# argument type
self.checkraises(TypeError, 'hello', 'split', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'split', '')
self.checkraises(ValueError, 'hello', 'split', '', 0)
def test_rsplit(self):
self.checkequal(['this', 'is', 'the', 'rsplit', 'function'],
'this is the rsplit function', 'rsplit')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'rsplit')
self.checkequal(['a b c', 'd'], 'a b c d', 'rsplit', None, 1)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None,
sys.maxint-20)
self.checkequal(['a b c d'], 'a b c d', 'rsplit', None, 0)
self.checkequal(['a b c d'], 'a b c d ', 'rsplit', None, 0)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal([], ' ', 'rsplit')
self.checkequal(['a'], ' a ', 'rsplit')
self.checkequal(['a', 'b'], ' a b ', 'rsplit')
self.checkequal([' a', 'b'], ' a b ', 'rsplit', None, 1)
self.checkequal([' a b','c'], ' a b c ', 'rsplit',
None, 1)
self.checkequal([' a', 'b', 'c'], ' a b c ', 'rsplit',
None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'rsplit', None, 88)
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'rsplit')
self.checkequal([aaa[:-4]] + ['a'], aaa, 'rsplit', None, 1)
self.checkequal([' a a'] + ['a']*18, aaa, 'rsplit', None, 18)
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|')
self.checkequal(['a|b|c', 'd'], 'a|b|c|d', 'rsplit', '|', 1)
self.checkequal(['a|b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|',
sys.maxint-100)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'rsplit', '|', 0)
self.checkequal(['a||b||c', '', 'd'], 'a||b||c||d', 'rsplit', '|', 2)
self.checkequal(['', ' begincase'], '| begincase', 'rsplit', '|')
self.checkequal(['endcase ', ''], 'endcase |', 'rsplit', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'rsplit', '|')
self.checkequal(['a\x00\x00b', 'c', 'd'], 'a\x00\x00b\x00c\x00d', 'rsplit', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'rsplit', '|')
self.checkequal(['a|a|a|a|a']+['a']*15,
('a|'*20)[:-1], 'rsplit', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//')
self.checkequal(['a//b//c', 'd'], 'a//b//c//d', 'rsplit', '//', 1)
self.checkequal(['a//b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//',
sys.maxint-5)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'rsplit', '//', 0)
self.checkequal(['a////b////c', '', 'd'], 'a////b////c////d', 'rsplit', '//', 2)
self.checkequal(['', ' begincase'], 'test begincase', 'rsplit', 'test')
self.checkequal(['endcase ', ''], 'endcase test', 'rsplit', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'rsplit', 'test')
self.checkequal(['ab', 'c'], 'abbbc', 'rsplit', 'bb')
self.checkequal(['', ''], 'aaa', 'rsplit', 'aaa')
self.checkequal(['aaa'], 'aaa', 'rsplit', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'rsplit', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'rsplit', 'aab')
self.checkequal([''], '', 'rsplit', 'aaa')
self.checkequal(['aa'], 'aa', 'rsplit', 'aaa')
self.checkequal(['bbob', 'A'], 'bbobbbobbA', 'rsplit', 'bbobb')
self.checkequal(['', 'B', 'A'], 'bbobbBbbobbA', 'rsplit', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH', 19)
self.checkequal(['aBLAHa'] + ['a']*18, ('aBLAH'*20)[:-4],
'rsplit', 'BLAH', 18)
# mixed use of str and unicode
self.checkequal([u'a b', u'c', u'd'], 'a b c d', 'rsplit', u' ', 2)
# argument type
self.checkraises(TypeError, 'hello', 'rsplit', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'rsplit', '')
self.checkraises(ValueError, 'hello', 'rsplit', '', 0)
def test_strip(self):
self.checkequal('hello', ' hello ', 'strip')
self.checkequal('hello ', ' hello ', 'lstrip')
self.checkequal(' hello', ' hello ', 'rstrip')
self.checkequal('hello', 'hello', 'strip')
# strip/lstrip/rstrip with None arg
self.checkequal('hello', ' hello ', 'strip', None)
self.checkequal('hello ', ' hello ', 'lstrip', None)
self.checkequal(' hello', ' hello ', 'rstrip', None)
self.checkequal('hello', 'hello', 'strip', None)
# strip/lstrip/rstrip with str arg
self.checkequal('hello', 'xyzzyhelloxyzzy', 'strip', 'xyz')
self.checkequal('helloxyzzy', 'xyzzyhelloxyzzy', 'lstrip', 'xyz')
self.checkequal('xyzzyhello', 'xyzzyhelloxyzzy', 'rstrip', 'xyz')
self.checkequal('hello', 'hello', 'strip', 'xyz')
# strip/lstrip/rstrip with unicode arg
if test_support.have_unicode:
self.checkequal(unicode('hello', 'ascii'), 'xyzzyhelloxyzzy',
'strip', unicode('xyz', 'ascii'))
self.checkequal(unicode('helloxyzzy', 'ascii'), 'xyzzyhelloxyzzy',
'lstrip', unicode('xyz', 'ascii'))
self.checkequal(unicode('xyzzyhello', 'ascii'), 'xyzzyhelloxyzzy',
'rstrip', unicode('xyz', 'ascii'))
# XXX
#self.checkequal(unicode('hello', 'ascii'), 'hello',
# 'strip', unicode('xyz', 'ascii'))
self.checkraises(TypeError, 'hello', 'strip', 42, 42)
self.checkraises(TypeError, 'hello', 'lstrip', 42, 42)
self.checkraises(TypeError, 'hello', 'rstrip', 42, 42)
def test_ljust(self):
self.checkequal('abc ', 'abc', 'ljust', 10)
self.checkequal('abc ', 'abc', 'ljust', 6)
self.checkequal('abc', 'abc', 'ljust', 3)
self.checkequal('abc', 'abc', 'ljust', 2)
self.checkequal('abc*******', 'abc', 'ljust', 10, '*')
self.checkraises(TypeError, 'abc', 'ljust')
def test_rjust(self):
self.checkequal(' abc', 'abc', 'rjust', 10)
self.checkequal(' abc', 'abc', 'rjust', 6)
self.checkequal('abc', 'abc', 'rjust', 3)
self.checkequal('abc', 'abc', 'rjust', 2)
self.checkequal('*******abc', 'abc', 'rjust', 10, '*')
self.checkraises(TypeError, 'abc', 'rjust')
def test_center(self):
self.checkequal(' abc ', 'abc', 'center', 10)
self.checkequal(' abc ', 'abc', 'center', 6)
self.checkequal('abc', 'abc', 'center', 3)
self.checkequal('abc', 'abc', 'center', 2)
self.checkequal('***abc****', 'abc', 'center', 10, '*')
self.checkraises(TypeError, 'abc', 'center')
def test_swapcase(self):
self.checkequal('hEllO CoMPuTErS', 'HeLLo cOmpUteRs', 'swapcase')
self.checkraises(TypeError, 'hello', 'swapcase', 42)
def test_replace(self):
EQ = self.checkequal
# Operations on the empty string
EQ("", "", "replace", "", "")
EQ("A", "", "replace", "", "A")
EQ("", "", "replace", "A", "")
EQ("", "", "replace", "A", "A")
EQ("", "", "replace", "", "", 100)
EQ("", "", "replace", "", "", sys.maxint)
# interleave (from=="", 'to' gets inserted everywhere)
EQ("A", "A", "replace", "", "")
EQ("*A*", "A", "replace", "", "*")
EQ("*1A*1", "A", "replace", "", "*1")
EQ("*-#A*-#", "A", "replace", "", "*-#")
EQ("*-A*-A*-", "AA", "replace", "", "*-")
EQ("*-A*-A*-", "AA", "replace", "", "*-", -1)
EQ("*-A*-A*-", "AA", "replace", "", "*-", sys.maxint)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 4)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 3)
EQ("*-A*-A", "AA", "replace", "", "*-", 2)
EQ("*-AA", "AA", "replace", "", "*-", 1)
EQ("AA", "AA", "replace", "", "*-", 0)
# single character deletion (from=="A", to=="")
EQ("", "A", "replace", "A", "")
EQ("", "AAA", "replace", "A", "")
EQ("", "AAA", "replace", "A", "", -1)
EQ("", "AAA", "replace", "A", "", sys.maxint)
EQ("", "AAA", "replace", "A", "", 4)
EQ("", "AAA", "replace", "A", "", 3)
EQ("A", "AAA", "replace", "A", "", 2)
EQ("AA", "AAA", "replace", "A", "", 1)
EQ("AAA", "AAA", "replace", "A", "", 0)
EQ("", "AAAAAAAAAA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "", -1)
EQ("BCD", "ABACADA", "replace", "A", "", sys.maxint)
EQ("BCD", "ABACADA", "replace", "A", "", 5)
EQ("BCD", "ABACADA", "replace", "A", "", 4)
EQ("BCDA", "ABACADA", "replace", "A", "", 3)
EQ("BCADA", "ABACADA", "replace", "A", "", 2)
EQ("BACADA", "ABACADA", "replace", "A", "", 1)
EQ("ABACADA", "ABACADA", "replace", "A", "", 0)
EQ("BCD", "ABCAD", "replace", "A", "")
EQ("BCD", "ABCADAA", "replace", "A", "")
EQ("BCD", "BCD", "replace", "A", "")
EQ("*************", "*************", "replace", "A", "")
EQ("^A^", "^"+"A"*1000+"^", "replace", "A", "", 999)
# substring deletion (from=="the", to=="")
EQ("", "the", "replace", "the", "")
EQ("ater", "theater", "replace", "the", "")
EQ("", "thethe", "replace", "the", "")
EQ("", "thethethethe", "replace", "the", "")
EQ("aaaa", "theatheatheathea", "replace", "the", "")
EQ("that", "that", "replace", "the", "")
EQ("thaet", "thaet", "replace", "the", "")
EQ("here and re", "here and there", "replace", "the", "")
EQ("here and re and re", "here and there and there",
"replace", "the", "", sys.maxint)
EQ("here and re and re", "here and there and there",
"replace", "the", "", -1)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 3)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 2)
EQ("here and re and there", "here and there and there",
"replace", "the", "", 1)
EQ("here and there and there", "here and there and there",
"replace", "the", "", 0)
EQ("here and re and re", "here and there and there", "replace", "the", "")
EQ("abc", "abc", "replace", "the", "")
EQ("abcdefg", "abcdefg", "replace", "the", "")
# substring deletion (from=="bob", to=="")
EQ("bob", "bbobob", "replace", "bob", "")
EQ("bobXbob", "bbobobXbbobob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaabob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaa", "replace", "bob", "")
# single character replace in place (len(from)==len(to)==1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "o")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", sys.maxint)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", -1)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 3)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 2)
EQ("WhO goes there?", "Who goes there?", "replace", "o", "O", 1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "O", 0)
EQ("Who goes there?", "Who goes there?", "replace", "a", "q")
EQ("who goes there?", "Who goes there?", "replace", "W", "w")
EQ("wwho goes there?ww", "WWho goes there?WW", "replace", "W", "w")
EQ("Who goes there!", "Who goes there?", "replace", "?", "!")
EQ("Who goes there!!", "Who goes there??", "replace", "?", "!")
EQ("Who goes there?", "Who goes there?", "replace", ".", "!")
# substring replace in place (len(from)==len(to) > 1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**")
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", sys.maxint)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", -1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 4)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 3)
EQ("Th** ** a tissue", "This is a tissue", "replace", "is", "**", 2)
EQ("Th** is a tissue", "This is a tissue", "replace", "is", "**", 1)
EQ("This is a tissue", "This is a tissue", "replace", "is", "**", 0)
EQ("cobob", "bobob", "replace", "bob", "cob")
EQ("cobobXcobocob", "bobobXbobobob", "replace", "bob", "cob")
EQ("bobob", "bobob", "replace", "bot", "bot")
# replace single character (len(from)==1, len(to)>1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK")
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", -1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", sys.maxint)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", 2)
EQ("ReyKKjavik", "Reykjavik", "replace", "k", "KK", 1)
EQ("Reykjavik", "Reykjavik", "replace", "k", "KK", 0)
EQ("A----B----C----", "A.B.C.", "replace", ".", "----")
EQ("Reykjavik", "Reykjavik", "replace", "q", "KK")
# replace substring (len(from)>1, len(to)!=len(from))
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham")
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", sys.maxint)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", -1)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 4)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 3)
EQ("ham, ham, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 2)
EQ("ham, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 1)
EQ("spam, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 0)
EQ("bobob", "bobobob", "replace", "bobob", "bob")
EQ("bobobXbobob", "bobobobXbobobob", "replace", "bobob", "bob")
EQ("BOBOBOB", "BOBOBOB", "replace", "bob", "bobby")
with test_support.check_py3k_warnings():
ba = buffer('a')
bb = buffer('b')
EQ("bbc", "abc", "replace", ba, bb)
EQ("aac", "abc", "replace", bb, ba)
#
self.checkequal('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.checkequal('onetwothree', 'one!two!three!', 'replace', '!', '')
self.checkequal('one@two@three!', 'one!two!three!', 'replace', '!', '@', 2)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 3)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 4)
self.checkequal('one!two!three!', 'one!two!three!', 'replace', '!', '@', 0)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@', 2)
self.checkequal('-a-b-c-', 'abc', 'replace', '', '-')
self.checkequal('-a-b-c', 'abc', 'replace', '', '-', 3)
self.checkequal('abc', 'abc', 'replace', '', '-', 0)
self.checkequal('', '', 'replace', '', '')
self.checkequal('abc', 'abc', 'replace', 'ab', '--', 0)
self.checkequal('abc', 'abc', 'replace', 'xy', '--')
# Next three for SF bug 422088: [OSF1 alpha] string.replace(); died with
# MemoryError due to empty result (platform malloc issue when requesting
# 0 bytes).
self.checkequal('', '123', 'replace', '123', '')
self.checkequal('', '123123', 'replace', '123', '')
self.checkequal('x', '123x123', 'replace', '123', '')
self.checkraises(TypeError, 'hello', 'replace')
self.checkraises(TypeError, 'hello', 'replace', 42)
self.checkraises(TypeError, 'hello', 'replace', 42, 'h')
self.checkraises(TypeError, 'hello', 'replace', 'h', 42)
def test_replace_overflow(self):
# Check for overflow checking on 32 bit machines
if sys.maxint != 2147483647 or struct.calcsize("P") > 4:
return
A2_16 = "A" * (2**16)
self.checkraises(OverflowError, A2_16, "replace", "", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "A", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "AA", A2_16+A2_16)
def test_zfill(self):
self.checkequal('123', '123', 'zfill', 2)
self.checkequal('123', '123', 'zfill', 3)
self.checkequal('0123', '123', 'zfill', 4)
self.checkequal('+123', '+123', 'zfill', 3)
self.checkequal('+123', '+123', 'zfill', 4)
self.checkequal('+0123', '+123', 'zfill', 5)
self.checkequal('-123', '-123', 'zfill', 3)
self.checkequal('-123', '-123', 'zfill', 4)
self.checkequal('-0123', '-123', 'zfill', 5)
self.checkequal('000', '', 'zfill', 3)
self.checkequal('34', '34', 'zfill', 1)
self.checkequal('0034', '34', 'zfill', 4)
self.checkraises(TypeError, '123', 'zfill')
# XXX alias for py3k forward compatibility
BaseTest = CommonTest
class MixinStrUnicodeUserStringTest:
# additional tests that only work for
# stringlike objects, i.e. str, unicode, UserString
# (but not the string module)
def test_islower(self):
self.checkequal(False, '', 'islower')
self.checkequal(True, 'a', 'islower')
self.checkequal(False, 'A', 'islower')
self.checkequal(False, '\n', 'islower')
self.checkequal(True, 'abc', 'islower')
self.checkequal(False, 'aBc', 'islower')
self.checkequal(True, 'abc\n', 'islower')
self.checkraises(TypeError, 'abc', 'islower', 42)
def test_isupper(self):
self.checkequal(False, '', 'isupper')
self.checkequal(False, 'a', 'isupper')
self.checkequal(True, 'A', 'isupper')
self.checkequal(False, '\n', 'isupper')
self.checkequal(True, 'ABC', 'isupper')
self.checkequal(False, 'AbC', 'isupper')
self.checkequal(True, 'ABC\n', 'isupper')
self.checkraises(TypeError, 'abc', 'isupper', 42)
def test_istitle(self):
self.checkequal(False, '', 'istitle')
self.checkequal(False, 'a', 'istitle')
self.checkequal(True, 'A', 'istitle')
self.checkequal(False, '\n', 'istitle')
self.checkequal(True, 'A Titlecased Line', 'istitle')
self.checkequal(True, 'A\nTitlecased Line', 'istitle')
self.checkequal(True, 'A Titlecased, Line', 'istitle')
self.checkequal(False, 'Not a capitalized String', 'istitle')
self.checkequal(False, 'Not\ta Titlecase String', 'istitle')
self.checkequal(False, 'Not--a Titlecase String', 'istitle')
self.checkequal(False, 'NOT', 'istitle')
self.checkraises(TypeError, 'abc', 'istitle', 42)
def test_isspace(self):
self.checkequal(False, '', 'isspace')
self.checkequal(False, 'a', 'isspace')
self.checkequal(True, ' ', 'isspace')
self.checkequal(True, '\t', 'isspace')
self.checkequal(True, '\r', 'isspace')
self.checkequal(True, '\n', 'isspace')
self.checkequal(True, ' \t\r\n', 'isspace')
self.checkequal(False, ' \t\r\na', 'isspace')
self.checkraises(TypeError, 'abc', 'isspace', 42)
def test_isalpha(self):
self.checkequal(False, '', 'isalpha')
self.checkequal(True, 'a', 'isalpha')
self.checkequal(True, 'A', 'isalpha')
self.checkequal(False, '\n', 'isalpha')
self.checkequal(True, 'abc', 'isalpha')
self.checkequal(False, 'aBc123', 'isalpha')
self.checkequal(False, 'abc\n', 'isalpha')
self.checkraises(TypeError, 'abc', 'isalpha', 42)
def test_isalnum(self):
self.checkequal(False, '', 'isalnum')
self.checkequal(True, 'a', 'isalnum')
self.checkequal(True, 'A', 'isalnum')
self.checkequal(False, '\n', 'isalnum')
self.checkequal(True, '123abc456', 'isalnum')
self.checkequal(True, 'a1b3c', 'isalnum')
self.checkequal(False, 'aBc000 ', 'isalnum')
self.checkequal(False, 'abc\n', 'isalnum')
self.checkraises(TypeError, 'abc', 'isalnum', 42)
def test_isdigit(self):
self.checkequal(False, '', 'isdigit')
self.checkequal(False, 'a', 'isdigit')
self.checkequal(True, '0', 'isdigit')
self.checkequal(True, '0123456789', 'isdigit')
self.checkequal(False, '0123456789a', 'isdigit')
self.checkraises(TypeError, 'abc', 'isdigit', 42)
def test_title(self):
self.checkequal(' Hello ', ' hello ', 'title')
self.checkequal('Hello ', 'hello ', 'title')
self.checkequal('Hello ', 'Hello ', 'title')
self.checkequal('Format This As Title String', "fOrMaT thIs aS titLe String", 'title')
self.checkequal('Format,This-As*Title;String', "fOrMaT,thIs-aS*titLe;String", 'title', )
self.checkequal('Getint', "getInt", 'title')
self.checkraises(TypeError, 'hello', 'title', 42)
def test_splitlines(self):
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\rghi", 'splitlines')
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi\n", 'splitlines')
self.checkequal(['abc', 'def', 'ghi', ''], "abc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['', 'abc', 'def', 'ghi', ''], "\nabc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'], "\nabc\ndef\r\nghi\n\r", 'splitlines', 1)
self.checkraises(TypeError, 'abc', 'splitlines', 42, 42)
def test_startswith(self):
self.checkequal(True, 'hello', 'startswith', 'he')
self.checkequal(True, 'hello', 'startswith', 'hello')
self.checkequal(False, 'hello', 'startswith', 'hello world')
self.checkequal(True, 'hello', 'startswith', '')
self.checkequal(False, 'hello', 'startswith', 'ello')
self.checkequal(True, 'hello', 'startswith', 'ello', 1)
self.checkequal(True, 'hello', 'startswith', 'o', 4)
self.checkequal(False, 'hello', 'startswith', 'o', 5)
self.checkequal(True, 'hello', 'startswith', '', 5)
self.checkequal(False, 'hello', 'startswith', 'lo', 6)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'startswith', 'lowo', 3, 6)
# test negative indices
self.checkequal(True, 'hello', 'startswith', 'he', 0, -1)
self.checkequal(True, 'hello', 'startswith', 'he', -53, -1)
self.checkequal(False, 'hello', 'startswith', 'hello', 0, -1)
self.checkequal(False, 'hello', 'startswith', 'hello world', -1, -10)
self.checkequal(False, 'hello', 'startswith', 'ello', -5)
self.checkequal(True, 'hello', 'startswith', 'ello', -4)
self.checkequal(False, 'hello', 'startswith', 'o', -2)
self.checkequal(True, 'hello', 'startswith', 'o', -1)
self.checkequal(True, 'hello', 'startswith', '', -3, -3)
self.checkequal(False, 'hello', 'startswith', 'lo', -9)
self.checkraises(TypeError, 'hello', 'startswith')
self.checkraises(TypeError, 'hello', 'startswith', 42)
# test tuple arguments
self.checkequal(True, 'hello', 'startswith', ('he', 'ha'))
self.checkequal(False, 'hello', 'startswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'startswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'startswith', ())
self.checkequal(True, 'helloworld', 'startswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'startswith', ('hellowo', 'ello',
'rld'), 3)
self.checkequal(True, 'hello', 'startswith', ('lo', 'he'), 0, -1)
self.checkequal(False, 'hello', 'startswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'startswith', ('he', 'hel'), 0, 2)
self.checkraises(TypeError, 'hello', 'startswith', (42,))
def test_endswith(self):
self.checkequal(True, 'hello', 'endswith', 'lo')
self.checkequal(False, 'hello', 'endswith', 'he')
self.checkequal(True, 'hello', 'endswith', '')
self.checkequal(False, 'hello', 'endswith', 'hello world')
self.checkequal(False, 'helloworld', 'endswith', 'worl')
self.checkequal(True, 'helloworld', 'endswith', 'worl', 3, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', 3, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 1, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 2, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 4, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, 8)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 1)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 0)
# test negative indices
self.checkequal(True, 'hello', 'endswith', 'lo', -2)
self.checkequal(False, 'hello', 'endswith', 'he', -2)
self.checkequal(True, 'hello', 'endswith', '', -3, -3)
self.checkequal(False, 'hello', 'endswith', 'hello world', -10, -2)
self.checkequal(False, 'helloworld', 'endswith', 'worl', -6)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, -1)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', -7, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -99, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -8, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -7, -3)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, -4)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', -8, -2)
self.checkraises(TypeError, 'hello', 'endswith')
self.checkraises(TypeError, 'hello', 'endswith', 42)
# test tuple arguments
self.checkequal(False, 'hello', 'endswith', ('he', 'ha'))
self.checkequal(True, 'hello', 'endswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'endswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'endswith', ())
self.checkequal(True, 'helloworld', 'endswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'endswith', ('hellowo', 'ello',
'rld'), 3, -1)
self.checkequal(True, 'hello', 'endswith', ('hell', 'ell'), 0, -1)
self.checkequal(False, 'hello', 'endswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'endswith', ('he', 'hell'), 0, 4)
self.checkraises(TypeError, 'hello', 'endswith', (42,))
def test___contains__(self):
self.checkequal(True, '', '__contains__', '')
self.checkequal(True, 'abc', '__contains__', '')
self.checkequal(False, 'abc', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', '\0')
self.checkequal(True, 'abc\0', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', 'a')
self.checkequal(True, 'asdf', '__contains__', 'asdf')
self.checkequal(False, 'asd', '__contains__', 'asdf')
self.checkequal(False, '', '__contains__', 'asdf')
def test_subscript(self):
self.checkequal(u'a', 'abc', '__getitem__', 0)
self.checkequal(u'c', 'abc', '__getitem__', -1)
self.checkequal(u'a', 'abc', '__getitem__', 0L)
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal(u'a', 'abc', '__getitem__', slice(0, 1))
self.checkequal(u'', 'abc', '__getitem__', slice(0, 0))
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_slice(self):
self.checkequal('abc', 'abc', '__getslice__', 0, 1000)
self.checkequal('abc', 'abc', '__getslice__', 0, 3)
self.checkequal('ab', 'abc', '__getslice__', 0, 2)
self.checkequal('bc', 'abc', '__getslice__', 1, 3)
self.checkequal('b', 'abc', '__getslice__', 1, 2)
self.checkequal('', 'abc', '__getslice__', 2, 2)
self.checkequal('', 'abc', '__getslice__', 1000, 1000)
self.checkequal('', 'abc', '__getslice__', 2000, 1000)
self.checkequal('', 'abc', '__getslice__', 2, 1)
self.checkraises(TypeError, 'abc', '__getslice__', 'def')
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = string.ascii_letters + string.digits
indices = (0, None, 1, 3, 41, -1, -2, -37)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
L = list(s)[start:stop:step]
self.checkequal(u"".join(L), s, '__getitem__',
slice(start, stop, step))
def test_mul(self):
self.checkequal('', 'abc', '__mul__', -1)
self.checkequal('', 'abc', '__mul__', 0)
self.checkequal('abc', 'abc', '__mul__', 1)
self.checkequal('abcabcabc', 'abc', '__mul__', 3)
self.checkraises(TypeError, 'abc', '__mul__')
self.checkraises(TypeError, 'abc', '__mul__', '')
# XXX: on a 64-bit system, this doesn't raise an overflow error,
# but either raises a MemoryError, or succeeds (if you have 54TiB)
#self.checkraises(OverflowError, 10000*'abc', '__mul__', 2000000000)
def test_join(self):
# join now works with any sequence type
# moved here, because the argument order is
# different in string.join (see the test in
# test.test_string.StringTest.test_join)
self.checkequal('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequal('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequal('bd', '', 'join', ('', 'b', '', 'd'))
self.checkequal('ac', '', 'join', ('a', '', 'c', ''))
self.checkequal('w x y z', ' ', 'join', Sequence())
self.checkequal('abc', 'a', 'join', ('abc',))
self.checkequal('z', 'a', 'join', UserList(['z']))
if test_support.have_unicode:
self.checkequal(unicode('a.b.c'), unicode('.'), 'join', ['a', 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', [unicode('a'), 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', unicode('b'), 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', 'b', unicode('c')])
self.checkraises(TypeError, '.', 'join', ['a', unicode('b'), 3])
for i in [5, 25, 125]:
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
['a' * i] * i)
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
('a' * i,) * i)
self.checkraises(TypeError, ' ', 'join', BadSeq1())
self.checkequal('a b c', ' ', 'join', BadSeq2())
self.checkraises(TypeError, ' ', 'join')
self.checkraises(TypeError, ' ', 'join', 7)
self.checkraises(TypeError, ' ', 'join', Sequence([7, 'hello', 123L]))
try:
def f():
yield 4 + ""
self.fixtype(' ').join(f())
except TypeError, e:
if '+' not in str(e):
self.fail('join() ate exception message')
else:
self.fail('exception not raised')
def test_formatting(self):
self.checkequal('+hello+', '+%s+', '__mod__', 'hello')
self.checkequal('+10+', '+%d+', '__mod__', 10)
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('"', "%c", '__mod__', 34)
self.checkequal('$', "%c", '__mod__', 36)
self.checkequal('10', "%d", '__mod__', 10)
self.checkequal('\x7f', "%c", '__mod__', 0x7f)
for ordinal in (-100, 0x200000):
# unicode raises ValueError, str raises OverflowError
self.checkraises((ValueError, OverflowError), '%c', '__mod__', ordinal)
longvalue = sys.maxint + 10L
slongvalue = str(longvalue)
if slongvalue[-1] in ("L","l"): slongvalue = slongvalue[:-1]
self.checkequal(' 42', '%3ld', '__mod__', 42)
self.checkequal('42', '%d', '__mod__', 42L)
self.checkequal('42', '%d', '__mod__', 42.0)
self.checkequal(slongvalue, '%d', '__mod__', longvalue)
self.checkcall('%d', '__mod__', float(longvalue))
self.checkequal('0042.00', '%07.2f', '__mod__', 42)
self.checkequal('0042.00', '%07.2F', '__mod__', 42)
self.checkraises(TypeError, 'abc', '__mod__')
self.checkraises(TypeError, '%(foo)s', '__mod__', 42)
self.checkraises(TypeError, '%s%s', '__mod__', (42,))
self.checkraises(TypeError, '%c', '__mod__', (None,))
self.checkraises(ValueError, '%(foo', '__mod__', {})
self.checkraises(TypeError, '%(foo)s %(bar)s', '__mod__', ('foo', 42))
self.checkraises(TypeError, '%d', '__mod__', "42") # not numeric
self.checkraises(TypeError, '%d', '__mod__', (42+0j)) # no int/long conversion provided
# argument names with properly nested brackets are supported
self.checkequal('bar', '%((foo))s', '__mod__', {'(foo)': 'bar'})
# 100 is a magic number in PyUnicode_Format, this forces a resize
self.checkequal(103*'a'+'x', '%sx', '__mod__', 103*'a')
self.checkraises(TypeError, '%*s', '__mod__', ('foo', 'bar'))
self.checkraises(TypeError, '%10.*f', '__mod__', ('foo', 42.))
self.checkraises(ValueError, '%10', '__mod__', (42,))
width = int(_testcapi.PY_SSIZE_T_MAX + 1)
if width <= sys.maxint:
self.checkraises(OverflowError, '%*s', '__mod__', (width, ''))
prec = int(_testcapi.INT_MAX + 1)
if prec <= sys.maxint:
self.checkraises(OverflowError, '%.*f', '__mod__', (prec, 1. / 7))
# Issue 15989
width = int(1 << (_testcapi.PY_SSIZE_T_MAX.bit_length() + 1))
if width <= sys.maxint:
self.checkraises(OverflowError, '%*s', '__mod__', (width, ''))
prec = int(_testcapi.UINT_MAX + 1)
if prec <= sys.maxint:
self.checkraises(OverflowError, '%.*f', '__mod__', (prec, 1. / 7))
class X(object): pass
self.checkraises(TypeError, 'abc', '__mod__', X())
def test_floatformatting(self):
# float formatting
for prec in xrange(100):
format = '%%.%if' % prec
value = 0.01
for x in xrange(60):
value = value * 3.14159265359 / 3.0 * 10.0
self.checkcall(format, "__mod__", value)
def test_inplace_rewrites(self):
# Check that strings don't copy and modify cached single-character strings
self.checkequal('a', 'A', 'lower')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'upper')
self.checkequal(True, 'a', 'islower')
self.checkequal('a', 'A', 'replace', 'A', 'a')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'capitalize')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'swapcase')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'title')
self.checkequal(True, 'a', 'islower')
def test_partition(self):
self.checkequal(('this is the par', 'ti', 'tion method'),
'this is the partition method', 'partition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'partition', '://')
self.checkequal(('http://www.python.org', '', ''), S, 'partition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'partition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'partition', 'org')
self.checkraises(ValueError, S, 'partition', '')
self.checkraises(TypeError, S, 'partition', None)
# mixed use of str and unicode
self.assertEqual('a/b/c'.partition(u'/'), ('a', '/', 'b/c'))
def test_rpartition(self):
self.checkequal(('this is the rparti', 'ti', 'on method'),
'this is the rpartition method', 'rpartition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'rpartition', '://')
self.checkequal(('', '', 'http://www.python.org'), S, 'rpartition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'rpartition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'rpartition', 'org')
self.checkraises(ValueError, S, 'rpartition', '')
self.checkraises(TypeError, S, 'rpartition', None)
# mixed use of str and unicode
self.assertEqual('a/b/c'.rpartition(u'/'), ('a/b', '/', 'c'))
def test_none_arguments(self):
# issue 11828
s = 'hello'
self.checkequal(2, s, 'find', 'l', None)
self.checkequal(3, s, 'find', 'l', -2, None)
self.checkequal(2, s, 'find', 'l', None, -2)
self.checkequal(0, s, 'find', 'h', None, None)
self.checkequal(3, s, 'rfind', 'l', None)
self.checkequal(3, s, 'rfind', 'l', -2, None)
self.checkequal(2, s, 'rfind', 'l', None, -2)
self.checkequal(0, s, 'rfind', 'h', None, None)
self.checkequal(2, s, 'index', 'l', None)
self.checkequal(3, s, 'index', 'l', -2, None)
self.checkequal(2, s, 'index', 'l', None, -2)
self.checkequal(0, s, 'index', 'h', None, None)
self.checkequal(3, s, 'rindex', 'l', None)
self.checkequal(3, s, 'rindex', 'l', -2, None)
self.checkequal(2, s, 'rindex', 'l', None, -2)
self.checkequal(0, s, 'rindex', 'h', None, None)
self.checkequal(2, s, 'count', 'l', None)
self.checkequal(1, s, 'count', 'l', -2, None)
self.checkequal(1, s, 'count', 'l', None, -2)
self.checkequal(0, s, 'count', 'x', None, None)
self.checkequal(True, s, 'endswith', 'o', None)
self.checkequal(True, s, 'endswith', 'lo', -2, None)
self.checkequal(True, s, 'endswith', 'l', None, -2)
self.checkequal(False, s, 'endswith', 'x', None, None)
self.checkequal(True, s, 'startswith', 'h', None)
self.checkequal(True, s, 'startswith', 'l', -2, None)
self.checkequal(True, s, 'startswith', 'h', None, -2)
self.checkequal(False, s, 'startswith', 'x', None, None)
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
s = 'hello'
x = 'x'
self.assertRaisesRegexp(TypeError, r'\bfind\b', s.find,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brfind\b', s.rfind,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bindex\b', s.index,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brindex\b', s.rindex,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'^count\(', s.count,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'^startswith\(', s.startswith,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'^endswith\(', s.endswith,
x, None, None, None)
class MixinStrStringUserStringTest:
# Additional tests for 8bit strings, i.e. str, UserString and
# the string module
def test_maketrans(self):
self.assertEqual(
''.join(map(chr, xrange(256))).replace('abc', 'xyz'),
string.maketrans('abc', 'xyz')
)
self.assertRaises(ValueError, string.maketrans, 'abc', 'xyzw')
def test_translate(self):
table = string.maketrans('abc', 'xyz')
self.checkequal('xyzxyz', 'xyzabcdef', 'translate', table, 'def')
table = string.maketrans('a', 'A')
self.checkequal('Abc', 'abc', 'translate', table)
self.checkequal('xyz', 'xyz', 'translate', table)
self.checkequal('yz', 'xyz', 'translate', table, 'x')
self.checkequal('yx', 'zyzzx', 'translate', None, 'z')
self.checkequal('zyzzx', 'zyzzx', 'translate', None, '')
self.checkequal('zyzzx', 'zyzzx', 'translate', None)
self.checkraises(ValueError, 'xyz', 'translate', 'too short', 'strip')
self.checkraises(ValueError, 'xyz', 'translate', 'too short')
class MixinStrUserStringTest:
# Additional tests that only work with
# 8bit compatible object, i.e. str and UserString
if test_support.have_unicode:
def test_encoding_decoding(self):
codecs = [('rot13', 'uryyb jbeyq'),
('base64', 'aGVsbG8gd29ybGQ=\n'),
('hex', '68656c6c6f20776f726c64'),
('uu', 'begin 666 <data>\n+:&5L;&\\@=V]R;&0 \n \nend\n')]
for encoding, data in codecs:
self.checkequal(data, 'hello world', 'encode', encoding)
self.checkequal('hello world', data, 'decode', encoding)
# zlib is optional, so we make the test optional too...
try:
import zlib
except ImportError:
pass
else:
data = 'x\x9c\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\x01\x00\x1a\x0b\x04]'
self.checkequal(data, 'hello world', 'encode', 'zlib')
self.checkequal('hello world', data, 'decode', 'zlib')
self.checkraises(TypeError, 'xyz', 'decode', 42)
self.checkraises(TypeError, 'xyz', 'encode', 42)
class MixinStrUnicodeTest:
# Additional tests that only work with str and unicode.
def test_bug1001011(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
# Check the optimisation still occurs for standard objects.
t = self.type2test
class subclass(t):
pass
s1 = subclass("abcd")
s2 = t().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is t)
s1 = t("abcd")
s2 = t().join([s1])
self.assertTrue(s1 is s2)
# Should also test mixed-type join.
if t is unicode:
s1 = subclass("abcd")
s2 = "".join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is t)
s1 = t("abcd")
s2 = "".join([s1])
self.assertTrue(s1 is s2)
elif t is str:
s1 = subclass("abcd")
s2 = u"".join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is unicode) # promotes!
s1 = t("abcd")
s2 = u"".join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is unicode) # promotes!
else:
self.fail("unexpected type for MixinStrUnicodeTest %r" % t)
| apache-2.0 | -1,142,582,501,104,987,000 | 45.946706 | 108 | 0.516106 | false |
sialm/par_king | client/ParKingClient.py | 1 | 11117 | # from i2clibraries import i2c_hmc58831
from socket import socket
from socket import AF_INET
from socket import SOCK_STREAM
from socket import error as socket_error
from time import sleep
from time import time
from struct import pack
from datetime import datetime
from threading import Thread
import config
import ParKingPacket
from i2clibraries import i2c_hmc5883l
import RPi.GPIO as GPIO # import RPi.GPIO module
class ParKingClient:
THRESHOLD = 125
LOWER_THRESHOLD = 10
TIME_FORMAT_STRING = '%Y-%m-%d %H:%M:%S'
#######################################################################################################################
# SETUP METHODS
#######################################################################################################################
def __init__(self, service_port, host_ip, spots_available, data_log_mode=False):
'''
This will create a ParKingClient
:param service_port:
:param host_ip:
:param data_log_mode:
:return:
'''
self.data_log_mode = data_log_mode
if self.data_log_mode:
self.log_file = self.create_logs()
self.data_file = self.create_data_file()
else:
self.log_file = None
self.data_file = None
self.index_for_csv = 1
self.host_ip = host_ip
self.service_port = service_port
self.running = False
self.sock = socket(AF_INET, SOCK_STREAM)
self.connect()
self.send_init_packet(spots_available)
alive_thread = Thread(target=self.keep_alive, args=())
alive_thread.daemon = True
alive_thread.start()
GPIO.setmode(GPIO.BCM)
self.write_to_log('creating sensor 1')
self.sensor_1 = i2c_hmc5883l.i2c_hmc5883l(1)
self.sensor_1.setContinuousMode()
self.sensor_1.setDeclination(0,6)
self.write_to_log('sensor one created')
if not config.ONE_SENSOR:
self.write_to_log('creating sensor 2')
self.sensor_2 = i2c_hmc5883l.i2c_hmc5883l(0)
self.sensor_2.setContinuousMode()
self.sensor_2.setDeclination(0,6)
self.write_to_log('sensor two created')
sleep(2)
(x, y, z) = self.read_from_sensor_1()
self.z_base_line_1 = z
self.last_z_signal_1 = 0
if not config.ONE_SENSOR:
(x, y, z) = self.read_from_sensor_2()
self.z_base_line_2 = z
self.last_z_signal_2 = 0
def create_logs(self):
"""
Creates a unique log file per session
:return: log file
"""
try:
file_name = 'log_file'
log_file = open(file_name, 'w')
return log_file
except Exception as e:
print('Log file error, shutting down.')
self.tear_down()
def create_data_file(self):
"""
Creates a unique log file per session
:return: log file
"""
try:
file_name = 'data.csv'
data_file = open(file_name, 'w')
return data_file
except Exception as e:
print('data file error, shutting down.')
self.tear_down()
def tear_down(self):
"""l
Called upon exit, this should tear down the existing resources that are not managed by daemons
:return:
"""
GPIO.cleanup()
self.write_to_log('teardown started')
if self.sock:
close_packet = ParKingPacket.pack_close_packet(config.UNIQUE_ID)
self.write_to_log('closing connection with server')
self.sock.sendall(close_packet)
self.write_to_log('closing listening socket')
self.sock.close()
if self.data_file:
self.write_to_log('closing data file')
self.data_file.close()
if self.log_file:
self.write_to_log('closing log file')
self.log_file.close()
def connect(self):
"""
This connects to the server. In the event that it fails to connect it will tear down the ParKingClient
:return:
"""
try:
self.write_to_log('opening socket')
self.sock.connect((self.host_ip, self.service_port))
except socket_error as e:
print('Could not create socket, tearing down.')
self.tear_down()
self.write_to_log('socket opened!')
def read_from_sensor_1(self):
"""
This will pull the value from the sensor. If the sensor is at it's max negative value, it will return None
to avoid this we move it to one plus the max negative value. We then shift it everything up so we don't have to
worry about artifacts while crossing zero.
:return:
"""
(x,y,z) = self.sensor_1.getAxes()
if (z is None):
z = -4095
z = z + 4096
return (x,y,z)
def read_from_sensor_2(self):
"""
This will pull the value from the sensor. If the sensor is at it's max negative value, it will return None
to avoid this we move it to one plus the max negative value. We then shift it everything up so we don't have to
worry about artifacts while crossing zero.
:return:
"""
return (1,1,1)
(x,y,z) = self.sensor_2.getAxes()
if (z is None):
z = -4095
z = z + 4096
return (x,y,z)
#######################################################################################################################
# RUN METHODS
#######################################################################################################################
def run(self):
self.write_to_log('Running')
self.running = True
if config.ONE_SENSOR:
self.run_in_lane()
elif (config.SENSOR_CONFIG is config.TWO_LANE):
goes_in_thread = Thread(target=self.run_in_lane, args=())
goes_in_thread.daemon = True
goes_in_thread.start()
self.run_out_lane()
def run_in_lane(self):
"""
Monitor traffic on one lane.
:return:
"""
self.write_to_log('run_in_lane.')
tripped = False
for i in range(100):
# calibrate sensor
(x,y,z_1) = self.read_from_sensor_1()
self.z_base_line_1 = self.z_base_line_1*.95 + .05*z_1
sleep(0.05)
self.write_to_log('in_lane calibration complete.')
while self.running:
sleep(.5)
(x,y,z_1) = self.read_from_sensor_1()
z_val_1 = abs(z_1 - self.z_base_line_1)
z_max_1 = z_val_1
self.write_to_log('z : ' + str(z_val_1))
self.write_to_data_file(str(z_val_1))
if z_val_1 > self.THRESHOLD:
tripped = True
if z_val_1 < self.LOWER_THRESHOLD:
if tripped:
self.write_to_log('in lane : sending goes ins packet')
t = Thread(target=self.send_goes_in_packet, args=(z_max_1, ))
t.daemon = True
t.start()
tripped = False
else:
self.z_base_line_1 = self.z_base_line_1*.95 + .05*z_1
def run_out_lane(self):
self.write_to_log('run_out_lane.')
for i in range(100):
# calibrate sensor
(x,y,z_2) = self.read_from_sensor_2()
self.z_base_line_2 = self.z_base_line_2*.95 + .05*z_2
sleep(0.05)
self.write_to_log('out_lane calibration complete.')
while self.running:
sleep(0.5)
(x,y,z_2) = self.read_from_sensor_2()
z_val_2 = z_2 - self.z_base_line_2
z_max_2 = z_val_2
while z_val_2 > self.THRESHOLD:
sleep(0.05)
(x,y,z_2) = self.read_from_sensor_2()
z_val_2 = z_2 - self.z_base_line_2
z_max_2 = max(z_val_2, z_max_2)
if z_val_2 < self.THRESHOLD:
self.write_to_log('out lane: sending goes outs packet')
t = Thread(target=self.send_goes_out_packet, args=(z_max_2, ))
t.daemon = True
t.start()
self.z_base_line_2 = self.z_base_line_2*.95 + .05*z_2
def keep_alive(self):
"""
This should run in a seperate thread and will handle the keep_alive message cadence to the server
:return:
"""
while True:
self.send_alive_packet()
sleep(config.ALIVE_SLEEP)
#######################################################################################################################
# NETWORK METHODS
#######################################################################################################################
def send_init_packet(self, spots_available):
"""
This should be called once upon creation and will identify the sensor to the server.
:param spots_available:
:return:
"""
self.write_to_log('sending init packet')
packet = ParKingPacket.pack_init_packet(config.UNIQUE_ID, config.CAPACITY, spots_available)
self.sock.sendall(packet)
self.write_to_log('init packet send')
def send_goes_out_packet(self, z_value):
"""
This will send a goes_outs packet to the server
:param z_value:
:return:
"""
packet = ParKingPacket.pack_out_packet(config.UNIQUE_ID, z_value)
self.sock.sendall(packet)
def send_goes_in_packet(self, z_value):
"""
This will send a goes_ins packet to the server
:param z_value:
:return:
"""
packet = ParKingPacket.pack_in_packet(config.UNIQUE_ID, z_value)
self.sock.sendall(packet)
def send_alive_packet(self):
"""
This will send a keep_alive packet to the server
:return:
"""
packet = ParKingPacket.pack_alive_packet(config.UNIQUE_ID)
self.sock.sendall(packet)
#######################################################################################################################
# LOGGING METHODS
#######################################################################################################################
def get_time_stamp(self):
return datetime.fromtimestamp(time()).strftime(self.TIME_FORMAT_STRING)
def write_to_log(self, message):
message = self.get_time_stamp() + ' ' + message + '\n'
if self.data_log_mode:
self.log_file.write(message)
self.log_file.flush()
def write_to_data_file(self, value):
if self.data_log_mode:
message = str(self.index_for_csv) + ',' + value + '\n'
self.data_file.write(message)
self.data_file.flush()
self.index_for_csv = self.index_for_csv + 1 | mit | -7,633,728,508,509,363,000 | 34.749196 | 119 | 0.497976 | false |
rbaghdadi/ISIR | utils/speedup_model/src/old/fai_train.py | 1 | 2143 | from data_loader import *
from model import *
from model_bn import *
from torch.utils.data.sampler import SubsetRandomSampler
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import fastai as fai
from fastai.basic_data import DataLoader
from fire import Fire
import matplotlib.pyplot as plt
import pickle
def train_dev_split(dataset, batch_size, num_workers, maxsize, split_factor=10, seed=42):
indices = np.random.RandomState(seed=seed).permutation(maxsize)
val_indices, train_indices = indices[:maxsize//split_factor], indices[maxsize//split_factor:]
train_dl = DataLoader(DatasetFromHdf5(dataset, maxsize=len(train_indices)),
batch_size=batch_size,
sampler=SubsetRandomSampler(train_indices),
num_workers=num_workers)
val_dl = DataLoader(DatasetFromHdf5(dataset, maxsize=len(val_indices)),
batch_size=batch_size,
sampler=SubsetRandomSampler(val_indices),
num_workers=num_workers)
return train_dl, val_dl
def main(batch_size=2048, num_epochs=400,
num_workers=8, algorithm='adam',
maxsize=50000, new=True, dataset='data/speedup_dataset.h5',
batch_norm=False, filename='data/results.pkl',lr=0.001):
train_dl, val_dl = train_dev_split(dataset, batch_size, num_workers, maxsize)
db = fai.basic_data.DataBunch(train_dl, val_dl)
input_size = train_dl.dataset.X.shape[1]
output_size = train_dl.dataset.Y.shape[1]
model_name = "model " + algorithm
model= None
if batch_norm:
model_name += " batch_norm"
model = Model_BN(input_size, output_size)
else:
model = Model(input_size, output_size)
criterion = nn.MSELoss()
l = fai.Learner(db, model, loss_func=criterion)
if algorithm == 'SGD':
l.opt_func = optim.SGD
dl = {'train':train_dl, 'val': val_dl}
model, losses = train_model(model, criterion, optimizer, dl, num_epochs)
if __name__=='__main__':
Fire() | mit | -8,885,027,698,291,832,000 | 28.369863 | 97 | 0.640224 | false |
MontrealCorpusTools/PolyglotDB | tests/test_io_buckeye.py | 3 | 4891 | import pytest
import os
from polyglotdb.io import inspect_buckeye
from polyglotdb.io.parsers.buckeye import read_phones, read_words
from polyglotdb import CorpusContext
def test_load_phones(buckeye_test_dir):
expected_phones = [('{B_TRANS}', 0.0, 2.609000),
('IVER', 2.609000, 2.714347),
('eh', 2.714347, 2.753000),
('s', 2.753000, 2.892000),
('IVER', 2.892000, 3.206890),
('dh', 3.206890, 3.244160),
('ae', 3.244160, 3.327000),
('s', 3.327000, 3.377192),
('s', 3.377192, 3.438544),
('ae', 3.438544, 3.526272),
('tq', 3.526272, 3.614398),
('VOCNOISE', 3.614398, 3.673454),
('ah', 3.673454, 3.718614),
('w', 3.718614, 3.771112),
('ah', 3.771112, 3.851000),
('dx', 3.851000, 3.881000),
('eh', 3.881000, 3.941000),
('v', 3.941000, 4.001000),
('er', 4.001000, 4.036022),
('ey', 4.036022, 4.111000),
('k', 4.111000, 4.246000),
('ao', 4.246000, 4.326000),
('l', 4.326000, 4.369000),
('ah', 4.369000, 4.443707),
('t', 4.443707, 4.501000),
]
phones = read_phones(os.path.join(buckeye_test_dir, 'test.phones'))
for i, p in enumerate(expected_phones):
assert (p == phones[i])
def test_load_words(buckeye_test_dir):
words = read_words(os.path.join(buckeye_test_dir, 'test.words'))
expected_words = [
{'spelling': '{B_TRANS}', 'begin': 0, 'end': 2.609000, 'transcription': None, 'surface_transcription': None,
'category': None},
{'spelling': '<IVER>', 'begin': 2.609000, 'end': 2.714347, 'transcription': None, 'surface_transcription': None,
'category': None},
{'spelling': 'that\'s', 'begin': 2.714347, 'end': 2.892096, 'transcription': 'dh ae t s',
'surface_transcription': 'eh s', 'category': 'DT_VBZ'},
{'spelling': '<IVER>', 'begin': 2.892096, 'end': 3.206317, 'transcription': None, 'surface_transcription': None,
'category': None},
{'spelling': 'that\'s', 'begin': 3.206317, 'end': 3.377192, 'transcription': 'dh ae t s',
'surface_transcription': 'dh ae s', 'category': 'DT_VBZ'},
{'spelling': 'that', 'begin': 3.377192, 'end': 3.614398, 'transcription': 'dh ae t',
'surface_transcription': 's ae tq', 'category': 'IN'},
{'spelling': '<VOCNOISE>', 'begin': 3.614398, 'end': 3.673454, 'transcription': None,
'surface_transcription': None, 'category': None},
{'spelling': 'whatever', 'begin': 3.673454, 'end': 4.036022, 'transcription': 'w ah t eh v er',
'surface_transcription': 'ah w ah dx eh v er', 'category': 'WDT'},
{'spelling': 'they', 'begin': 4.036022, 'end': 4.111000, 'transcription': 'dh ey',
'surface_transcription': 'ey', 'category': 'PRP'},
{'spelling': 'call', 'begin': 4.111000, 'end': 4.369000, 'transcription': 'k aa l',
'surface_transcription': 'k ao l', 'category': 'VBP'},
{'spelling': 'it', 'begin': 4.369000, 'end': 4.501000, 'transcription': 'ih t',
'surface_transcription': 'ah t', 'category': 'PRP'}]
for i, w in enumerate(expected_words):
assert (w == words[i])
def test_load_discourse_buckeye(graph_db, buckeye_test_dir):
with CorpusContext('discourse_buckeye', **graph_db) as c:
c.reset()
word_path = os.path.join(buckeye_test_dir, 'test.words')
parser = inspect_buckeye(word_path)
c.load(parser, word_path)
assert (c.hierarchy.has_type_property('word', 'transcription'))
q = c.query_graph(c.phone).filter(c.phone.label == 's')
assert (q.count() == 3)
q = q.columns(c.phone.speaker.name.column_name('speaker'))
print(q.cypher())
results = q.all()
print(results)
assert (all(x['speaker'] == 'tes' for x in results))
def test_load_directory_buckeye(graph_db, buckeye_test_dir):
with CorpusContext('directory_buckeye', **graph_db) as c:
c.reset()
parser = inspect_buckeye(buckeye_test_dir)
c.load(parser, buckeye_test_dir)
q1 = c.query_graph(c.word).filter(c.word.label == 'that\'s')
assert (q1.count() == 2)
q = c.query_graph(c.phone).filter(c.phone.label == 's')
assert (q.count() == 3)
q = q.columns(c.phone.speaker.name.column_name('speaker'))
print(q.cypher())
results = q.all()
print(results)
assert (all(x['speaker'] == 'tes' for x in results))
| mit | 3,813,449,094,294,677,000 | 44.71028 | 120 | 0.521366 | false |
patacrep/patacrep | patacrep/songs/syntax.py | 1 | 1731 | """Generic parsing classes and methods"""
import functools
import logging
from patacrep.songs import errors
LOGGER = logging.getLogger()
class Parser:
"""Parser class"""
# pylint: disable=too-few-public-methods
def __init__(self):
self.filename = "" # Will be overloaded
self._errors = []
@staticmethod
def __find_column(token):
"""Return the column of ``token``."""
last_cr = token.lexer.lexdata.rfind('\n', 0, token.lexpos)
if last_cr < 0:
last_cr = 0
column = (token.lexpos - last_cr) + 1
return column
def error(self, *, line=None, column=None, message=""):
"""Record and display an error message"""
self._errors.append(functools.partial(
errors.SongSyntaxError,
line=line,
message=message,
))
coordinates = []
if line is not None:
coordinates.append("line {}".format(line))
if column is not None:
coordinates.append("column {}".format(column))
text = ", ".join(coordinates)
if message and text:
text += ": " + message
elif message:
text += message
else:
text += "."
if self.filename is None:
LOGGER.warning(text)
else:
LOGGER.warning("Song {}: {}".format(self.filename, text))
def p_error(self, token):
"""Manage parsing errors."""
if token is None:
self.error(message="Unexpected end of file.")
else:
self.error(
message="Syntax error",
line=token.lineno,
column=self.__find_column(token),
)
| gpl-2.0 | 4,681,234,503,793,055,000 | 27.377049 | 69 | 0.532062 | false |
shravan-shandilya/sendin.co.in | backend/bitcoin_server.py | 1 | 2950 | #!/usr/bin/python
from flask import Flask, url_for
from bitcoinlib.wallets import HDWallet
import time,random,string,requests,json,threading,icici
app = Flask(__name__)
watch_list = {}
completed_list = []
watch_daemon = None
wallet = HDWallet.create(name=time.ctime(),network='testnet')
json_data=open("credentials.json").read()
creds = json.loads(json_data)
icici.init(creds['id'],creds['pass'])
@app.route('/newaddress')
def new_address():
return '{"address":"%s"}'%(wallet.new_account().address)
@app.route('/watch/<address_amount>')
def api_article(address_amount):
address = address_amount.split("_")[0]
amount_btc = address_amount.split("_")[1]
amount_inr = address_amount.split("_")[2]
token = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(10))
watch_list[token]= {}
watch_list[token]["address"] = address
watch_list[token]["amount_btc"] = amount_btc
watch_list[token]["amount_inr"] = amount_inr
return '{"token":"%s"}'%(token)
@app.route('/status/<token>')
def send_status(token):
if token == "test":
return '{"status":"true","amount":"500"}'
if token in completed_list:
return '{"status":"true","amount":"%s"}'%(watch_list[token]["amount_inr"])
try:
resp = requests.get("http://tbtc.blockr.io/api/v1/address/info/%s"%(watch_list[token]['address']))
data = json.loads(resp.text)['data']
if (int(data["nb_txs"]) > 0 ) & (float(data["balance"]) >= float(watch_list[token]["amount_btc"])):
completed_list.append(token)
return '{"status":"true","amount":"%s"}'%(watch_list[token]["amount_inr"])
else:
return '{"status":"false"}'
except KeyError:
return '{"status":"false"}'
@app.route('/upi/<token_upi_amount>')
def send_in_upi(token_upi_amount):
token_upi_amount = str(token_upi_amount)
token = token_upi_amount.split("_")[0]
upi = token_upi_amount.split("_")[1]
amount = token_upi_amount.split("_")[2]
# if token in completed_list:
#watch_list[token]["upi"] = upi
receipt = icici.pay(upi,amount)
print receipt
if receipt[0] :
return '{"status":"true","receipt":"%s"}'%(receipt[1])
else:
return '{"status":"false"}'
# else:
# return '{"status":"false"}'
@app.route('/upi_status/<receipt>')
def send_upi_status(receipt):
return '{"status":"true"}'
def watch_daemon():
while True:
print "Inside daemon"
for watch in watch_list:
data = json.loads(request.get("http://tbtc.blockr.io/api/v1/address/info/%s"%(watch['address'])))
print data
if data["nb_txs"] > 0 & data["balance"] >= watch["amount"]:
completed_list.append(watch["token"])
time.sleep(3)
if __name__ == '__main__':
app.run()
#watch_daemon = threading.Thread(target='watch_daemon')
#watch_daemon.start()
| gpl-3.0 | -5,382,036,952,665,057,000 | 34.119048 | 118 | 0.606441 | false |
rossburton/yocto-autobuilder | lib/python2.7/site-packages/SQLAlchemy-0.8.0b2-py2.7-linux-x86_64.egg/sqlalchemy/testing/profiling.py | 6 | 9771 | """Profiling support for unit and performance tests.
These are special purpose profiling methods which operate
in a more fine-grained way than nose's profiling plugin.
"""
import os
import sys
from .util import gc_collect, decorator
from . import config
from nose import SkipTest
import pstats
import time
import collections
from .. import util
try:
import cProfile
except ImportError:
cProfile = None
from ..util.compat import jython, pypy, win32
_current_test = None
def profiled(target=None, **target_opts):
"""Function profiling.
@profiled()
or
@profiled(report=True, sort=('calls',), limit=20)
Outputs profiling info for a decorated function.
"""
profile_config = {'targets': set(),
'report': True,
'print_callers': False,
'print_callees': False,
'graphic': False,
'sort': ('time', 'calls'),
'limit': None}
if target is None:
target = 'anonymous_target'
filename = "%s.prof" % target
@decorator
def decorate(fn, *args, **kw):
elapsed, load_stats, result = _profile(
filename, fn, *args, **kw)
graphic = target_opts.get('graphic', profile_config['graphic'])
if graphic:
os.system("runsnake %s" % filename)
else:
report = target_opts.get('report', profile_config['report'])
if report:
sort_ = target_opts.get('sort', profile_config['sort'])
limit = target_opts.get('limit', profile_config['limit'])
print ("Profile report for target '%s' (%s)" % (
target, filename)
)
stats = load_stats()
stats.sort_stats(*sort_)
if limit:
stats.print_stats(limit)
else:
stats.print_stats()
print_callers = target_opts.get(
'print_callers', profile_config['print_callers'])
if print_callers:
stats.print_callers()
print_callees = target_opts.get(
'print_callees', profile_config['print_callees'])
if print_callees:
stats.print_callees()
os.unlink(filename)
return result
return decorate
class ProfileStatsFile(object):
""""Store per-platform/fn profiling results in a file.
We're still targeting Py2.5, 2.4 on 0.7 with no dependencies,
so no json lib :( need to roll something silly
"""
def __init__(self, filename):
self.write = (
config.options is not None and
config.options.write_profiles
)
self.fname = os.path.abspath(filename)
self.short_fname = os.path.split(self.fname)[-1]
self.data = collections.defaultdict(
lambda: collections.defaultdict(dict))
self._read()
if self.write:
# rewrite for the case where features changed,
# etc.
self._write()
@util.memoized_property
def platform_key(self):
dbapi_key = config.db.name + "_" + config.db.driver
# keep it at 2.7, 3.1, 3.2, etc. for now.
py_version = '.'.join([str(v) for v in sys.version_info[0:2]])
platform_tokens = [py_version]
platform_tokens.append(dbapi_key)
if jython:
platform_tokens.append("jython")
if pypy:
platform_tokens.append("pypy")
if win32:
platform_tokens.append("win")
_has_cext = config.requirements._has_cextensions()
platform_tokens.append(_has_cext and "cextensions" or "nocextensions")
return "_".join(platform_tokens)
def has_stats(self):
test_key = _current_test
return (
test_key in self.data and
self.platform_key in self.data[test_key]
)
def result(self, callcount):
test_key = _current_test
per_fn = self.data[test_key]
per_platform = per_fn[self.platform_key]
if 'counts' not in per_platform:
per_platform['counts'] = counts = []
else:
counts = per_platform['counts']
if 'current_count' not in per_platform:
per_platform['current_count'] = current_count = 0
else:
current_count = per_platform['current_count']
has_count = len(counts) > current_count
if not has_count:
counts.append(callcount)
if self.write:
self._write()
result = None
else:
result = per_platform['lineno'], counts[current_count]
per_platform['current_count'] += 1
return result
def _header(self):
return \
"# %s\n"\
"# This file is written out on a per-environment basis.\n"\
"# For each test in aaa_profiling, the corresponding function and \n"\
"# environment is located within this file. If it doesn't exist,\n"\
"# the test is skipped.\n"\
"# If a callcount does exist, it is compared to what we received. \n"\
"# assertions are raised if the counts do not match.\n"\
"# \n"\
"# To add a new callcount test, apply the function_call_count \n"\
"# decorator and re-run the tests using the --write-profiles \n"\
"# option - this file will be rewritten including the new count.\n"\
"# \n"\
"" % (self.fname)
def _read(self):
try:
profile_f = open(self.fname)
except IOError:
return
for lineno, line in enumerate(profile_f):
line = line.strip()
if not line or line.startswith("#"):
continue
test_key, platform_key, counts = line.split()
per_fn = self.data[test_key]
per_platform = per_fn[platform_key]
c = [int(count) for count in counts.split(",")]
per_platform['counts'] = c
per_platform['lineno'] = lineno + 1
per_platform['current_count'] = 0
profile_f.close()
def _write(self):
print("Writing profile file %s" % self.fname)
profile_f = open(self.fname, "w")
profile_f.write(self._header())
for test_key in sorted(self.data):
per_fn = self.data[test_key]
profile_f.write("\n# TEST: %s\n\n" % test_key)
for platform_key in sorted(per_fn):
per_platform = per_fn[platform_key]
c = ",".join(str(count) for count in per_platform['counts'])
profile_f.write("%s %s %s\n" % (test_key, platform_key, c))
profile_f.close()
from sqlalchemy.util.compat import update_wrapper
def function_call_count(variance=0.05):
"""Assert a target for a test case's function call count.
The main purpose of this assertion is to detect changes in
callcounts for various functions - the actual number is not as important.
Callcounts are stored in a file keyed to Python version and OS platform
information. This file is generated automatically for new tests,
and versioned so that unexpected changes in callcounts will be detected.
"""
def decorate(fn):
def wrap(*args, **kw):
if cProfile is None:
raise SkipTest("cProfile is not installed")
if not _profile_stats.has_stats() and not _profile_stats.write:
# run the function anyway, to support dependent tests
# (not a great idea but we have these in test_zoomark)
fn(*args, **kw)
raise SkipTest("No profiling stats available on this "
"platform for this function. Run tests with "
"--write-profiles to add statistics to %s for "
"this platform." % _profile_stats.short_fname)
gc_collect()
timespent, load_stats, fn_result = _profile(
fn, *args, **kw
)
stats = load_stats()
callcount = stats.total_calls
expected = _profile_stats.result(callcount)
if expected is None:
expected_count = None
else:
line_no, expected_count = expected
print("Pstats calls: %d Expected %s" % (
callcount,
expected_count
)
)
stats.print_stats()
#stats.print_callers()
if expected_count:
deviance = int(callcount * variance)
if abs(callcount - expected_count) > deviance:
raise AssertionError(
"Adjusted function call count %s not within %s%% "
"of expected %s. (Delete line %d of file %s to "
"regenerate this callcount, when tests are run "
"with --write-profiles.)"
% (
callcount, (variance * 100),
expected_count, line_no,
_profile_stats.fname))
return fn_result
return update_wrapper(wrap, fn)
return decorate
def _profile(fn, *args, **kw):
filename = "%s.prof" % fn.__name__
def load_stats():
st = pstats.Stats(filename)
os.unlink(filename)
return st
began = time.time()
cProfile.runctx('result = fn(*args, **kw)', globals(), locals(),
filename=filename)
ended = time.time()
return ended - began, load_stats, locals()['result']
| gpl-2.0 | -1,129,422,760,990,505,600 | 32.234694 | 78 | 0.540886 | false |
idjaw/netman | tests/core/session_storage_test.py | 4 | 2237 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from hamcrest import assert_that, is_, none
from netman.adapters.memory_session_storage import MemorySessionStorage
from netman.core.objects.exceptions import SessionAlreadyExists, UnknownSession
import mock
class SessionStorageTest(TestCase):
def setUp(self):
self.session_source = MemorySessionStorage()
self.switch_descriptor = mock.Mock()
def test_add_session(self):
self.session_source.add('some_session', self.switch_descriptor)
assert_that(self.session_source.get('some_session'),
is_(self.switch_descriptor))
def test_get_session(self):
self.session_source.add('some_session', self.switch_descriptor)
assert_that(self.session_source.get('some_session'), is_(self.switch_descriptor))
def test_get_nonexistent_session_is_none(self):
assert_that(self.session_source.get('nonexistent_session'), is_(none()))
def test_remove_session(self):
self.session_source.add('some_session', self.switch_descriptor)
self.session_source.remove('some_session')
assert_that(self.session_source.get('some_session'), is_(none()))
def test_add_session_that_already_exists_fails(self):
self.session_source.add('some_session', self.switch_descriptor)
with self.assertRaises(SessionAlreadyExists):
self.session_source.add('some_session', self.switch_descriptor)
def test_remove_nonexistent_session_fails(self):
self.session_source.add('other_session', self.switch_descriptor)
with self.assertRaises(UnknownSession):
self.session_source.remove('some_session')
| apache-2.0 | -4,559,705,732,787,534,000 | 42.019231 | 89 | 0.721055 | false |
dotbot-io/webapp | config.py | 1 | 1227 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
BOOTSTRAP_SERVE_LOCAL = True
CATKIN_FOLDER = os.environ.get('CATKIN_FOLDER') or '/Users/ludus/develop/dotbot_ws/ros/'
DOTBOT_PACKAGE_NAME = os.environ.get('DOTBOT_PACKAGE_NAME') or 'dotbot_app'
ROS_ENVS = os.environ.get('ROS_ENVS') or '/Users/ludus/develop/setup/source_dotbot_ros.sh'
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| gpl-2.0 | 1,334,177,387,230,055,400 | 34.057143 | 94 | 0.677262 | false |
azureplus/hue | desktop/core/ext-py/Django-1.6.10/tests/fixtures/models.py | 63 | 3312 | """
37. Fixtures.
Fixtures are a way of loading data into the database in bulk. Fixure data
can be stored in any serializable format (including JSON and XML). Fixtures
are identified by name, and are stored in either a directory named 'fixtures'
in the application directory, or in one of the directories named in the
``FIXTURE_DIRS`` setting.
"""
from django.contrib.auth.models import Permission
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
def __str__(self):
return self.headline
class Meta:
ordering = ('-pub_date', 'headline')
@python_2_unicode_compatible
class Blog(models.Model):
name = models.CharField(max_length=100)
featured = models.ForeignKey(Article, related_name='fixtures_featured_set')
articles = models.ManyToManyField(Article, blank=True,
related_name='fixtures_articles_set')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=100)
tagged_type = models.ForeignKey(ContentType, related_name="fixtures_tag_set")
tagged_id = models.PositiveIntegerField(default=0)
tagged = generic.GenericForeignKey(ct_field='tagged_type',
fk_field='tagged_id')
def __str__(self):
return '<%s: %s> tagged "%s"' % (self.tagged.__class__.__name__,
self.tagged, self.name)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
def natural_key(self):
return (self.name,)
class SpyManager(PersonManager):
def get_queryset(self):
return super(SpyManager, self).get_queryset().filter(cover_blown=False)
class Spy(Person):
objects = SpyManager()
cover_blown = models.BooleanField(default=False)
@python_2_unicode_compatible
class Visa(models.Model):
person = models.ForeignKey(Person)
permissions = models.ManyToManyField(Permission, blank=True)
def __str__(self):
return '%s %s' % (self.person.name,
', '.join(p.name for p in self.permissions.all()))
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Person)
def __str__(self):
authors = ' and '.join(a.name for a in self.authors.all())
return '%s by %s' % (self.name, authors) if authors else self.name
class Meta:
ordering = ('name',)
| apache-2.0 | 4,860,130,536,462,389,000 | 29.953271 | 81 | 0.664251 | false |
CalebSLane/openelisglobal-core | liquibase/OE5.1/testCatalogKenya/Scripts/dictionary.py | 9 | 1356 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def get_comma_split_names( name ):
split_name_list = [name]
if ',' in name:
split_name_list = name.split(",")
elif ';' in name:
split_name_list = name.split(";")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
def esc_name(name):
if "'" in name:
return "$$" + name.strip() + "$$"
else:
return "'" + name.strip() + "'"
old = []
old_file = open("currentDictNames.txt")
new_file = open("selectList.txt")
result = open("output/dictionaryResult.sql",'w')
for line in old_file:
old.append(line.strip())
old_file.close()
for line in new_file:
if len(line) > 1:
values = get_comma_split_names(line)
for value in values:
if value.strip() not in old:
old.append(value.strip())
result.write("INSERT INTO clinlims.dictionary ( id, is_active, dict_entry, lastupdated, dictionary_category_id ) \n\t")
result.write("VALUES ( nextval( 'dictionary_seq' ) , 'Y' , " + esc_name(value) + " , now(), ( select id from clinlims.dictionary_category where description = 'Haiti Lab' ));\n")
result.close()
print "Done check dictionaryResult.sql for values"
| mpl-2.0 | -9,184,392,477,490,277,000 | 26.25 | 193 | 0.570059 | false |
getlinky/linky | backend/fetcher/tests.py | 2 | 1838 | import unittest
import responses
try:
# 1. Required when running via manage.py
from fetcher.fetcher import fetch, HTTPError
except ImportError:
# 2. Required when running via python fetcher/tests.py
from fetcher import fetch, HTTPError
class TestFetcher(unittest.TestCase):
@responses.activate
def test_fetch_recieves_data(self):
'''
Fetch with a domain that has both a title and a description
'''
body = '''
<title>Example</title>
<meta content="An example description" name="description">
'''
responses.add(responses.GET,
'http://example.com',
body=body,
status=200,
content_type='text/html')
f = fetch('http://example.com')
self.assertEqual('Example', f.title)
self.assertEqual('An example description', f.description)
@responses.activate
def test_fetch_without_data(self):
'''
Fetch with a domain that doesn't have a title or description
'''
responses.add(responses.GET,
'http://example.com',
body='',
status=200,
content_type='text/html')
f = fetch('http://example.com')
self.assertEqual('', f.title)
self.assertEqual('', f.description)
@responses.activate
def test_fetch_bad_status_code(self):
'''
Fetch a URL that returns a bad status code
'''
responses.add(responses.GET,
'http://example.com',
status=404,
content_type='text/html')
with self.assertRaises(HTTPError):
fetch('http://example.com')
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | 8,535,452,428,260,397,000 | 28.645161 | 68 | 0.54407 | false |
josenavas/QiiTa | qiita_pet/handlers/stats.py | 1 | 2654 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from random import choice
from tornado.gen import coroutine, Task
from qiita_core.util import execute_as_transaction
from qiita_core.qiita_settings import qiita_config, r_client
from qiita_db.study import Study
from .base_handlers import BaseHandler
class StatsHandler(BaseHandler):
@execute_as_transaction
def _get_stats(self, callback):
stats = {}
# checking values from redis
portal = qiita_config.portal
vals = [
('number_studies', r_client.hgetall),
('number_of_samples', r_client.hgetall),
('num_users', r_client.get),
('lat_longs', r_client.get),
('num_studies_ebi', r_client.get),
('num_samples_ebi', r_client.get),
('number_samples_ebi_prep', r_client.get),
('img', r_client.get),
('time', r_client.get)]
for k, f in vals:
redis_key = '%s:stats:%s' % (portal, k)
stats[k] = f(redis_key)
callback(stats)
@coroutine
@execute_as_transaction
def get(self):
stats = yield Task(self._get_stats)
# Pull a random public study from the database
public_studies = Study.get_by_status('public')
study = choice(list(public_studies)) if public_studies else None
if study is None:
random_study_info = None
random_study_title = None
random_study_id = None
else:
random_study_info = study.info
random_study_title = study.title
random_study_id = study.id
self.render('stats.html',
number_studies=stats['number_studies'],
number_of_samples=stats['number_of_samples'],
num_users=stats['num_users'],
lat_longs=eval(stats['lat_longs']),
num_studies_ebi=stats['num_studies_ebi'],
num_samples_ebi=stats['num_samples_ebi'],
number_samples_ebi_prep=stats['number_samples_ebi_prep'],
img=stats['img'], time=stats['time'],
random_study_info=random_study_info,
random_study_title=random_study_title,
random_study_id=random_study_id)
| bsd-3-clause | 7,000,540,263,346,745,000 | 35.861111 | 79 | 0.541447 | false |
MartinHjelmare/home-assistant | homeassistant/components/amcrest/switch.py | 4 | 2486 | """Support for toggling Amcrest IP camera settings."""
import logging
from homeassistant.const import CONF_NAME, CONF_SWITCHES
from homeassistant.helpers.entity import ToggleEntity
from .const import DATA_AMCREST
_LOGGER = logging.getLogger(__name__)
# Switch types are defined like: Name, icon
SWITCHES = {
'motion_detection': ['Motion Detection', 'mdi:run-fast'],
'motion_recording': ['Motion Recording', 'mdi:record-rec']
}
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the IP Amcrest camera switch platform."""
if discovery_info is None:
return
name = discovery_info[CONF_NAME]
device = hass.data[DATA_AMCREST]['devices'][name]
async_add_entities(
[AmcrestSwitch(name, device, setting)
for setting in discovery_info[CONF_SWITCHES]],
True)
class AmcrestSwitch(ToggleEntity):
"""Representation of an Amcrest IP camera switch."""
def __init__(self, name, device, setting):
"""Initialize the Amcrest switch."""
self._name = '{} {}'.format(name, SWITCHES[setting][0])
self._api = device.api
self._setting = setting
self._state = False
self._icon = SWITCHES[setting][1]
@property
def name(self):
"""Return the name of the switch if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn setting on."""
if self._setting == 'motion_detection':
self._api.motion_detection = 'true'
elif self._setting == 'motion_recording':
self._api.motion_recording = 'true'
def turn_off(self, **kwargs):
"""Turn setting off."""
if self._setting == 'motion_detection':
self._api.motion_detection = 'false'
elif self._setting == 'motion_recording':
self._api.motion_recording = 'false'
def update(self):
"""Update setting state."""
_LOGGER.debug("Polling state for setting: %s ", self._name)
if self._setting == 'motion_detection':
detection = self._api.is_motion_detector_on()
elif self._setting == 'motion_recording':
detection = self._api.is_record_on_motion_detection()
self._state = detection
@property
def icon(self):
"""Return the icon for the switch."""
return self._icon
| apache-2.0 | -4,344,516,609,477,531,600 | 29.691358 | 67 | 0.612631 | false |
jjdmol/LOFAR | CEP/Calibration/BBSControl/scripts/solverexport.py | 1 | 2573 | #!/usr/bin/env python
#
# Script that generates ASCII output from solver statistics table
#
#
import sys
import pyrap.tables as pt
import lofar.bbs.solverquery as sq
# Variables
MSfilename=""
filename="solverstat.txt"
# TODO
# parse command arguments
if len(sys.argv)==1:
print "No MS filename given"
sys.exit(0)
elif len(sys.argv)==2:
MSfilename=sys.argv[1]
elif len(sys.argv)==3:
MSfilename=sys.argv[1]
filename=sys.argv[2]
# open MS through solverquery
# solverquery object
print "MSfilename = ", MSfilename
solverstat=sq.SolverQuery()
solverstat=solverstat.open(MSfilename) # open the solver statistics table
print "tableType = ", solverstat.getType()
# get unique timeslots
print "numTimeslots = ", solverstat.getNumTimeSlots()
timeslots=[]
timeslots=solverstat.getTimeSlots() #.getcol("STARTTIME")
# Open output file for writing
outfile=open(filename, "w")
# get STARTFREQ and ENDFREQ
startfreq=solverstat.getStartFreqs()[0]
endfreq=solverstat.getEndFreqs()[0]
print "startfreq = ", startfreq # DEBUG
print "endfreq = ", endfreq # DEBUG
#print "timeslots.nrows() = ", timeslots.nrows()
for i in range(0, timeslots.nrows()): # loop over time slots
#print "i = ", i
# get solution vector
solutions=solverstat.getSolution(timeslots[i]["STARTTIME"], timeslots[i]["ENDTIME"], startfreq, endfreq, iteration="all")
# get solver statistics parameter: ChiSqr
chiSqr, times=solverstat.readParameter("CHISQR", timeslots[i]["STARTTIME"], timeslots[i]["ENDTIME"], startfreq, endfreq, iteration="all")
# print "type(solutions) = ", type(solutions)
# print "length = ", length
length=len(solutions)
for iter in range(1, length): # Loop over iterations
line=str(i) # timeslot at first place of line to write out to file
line += "\t" + str(iter) # second column is iteration number
# get Real and imaginary part for all antennas from solution
# put values to together
for j in range(0, len(solutions[iter]), 2):
#print "len(solutions[iter]) = ", len(solutions[iter])
print "iter = ", iter # DEBUG
print "j = ", j # DEBUG
line += "\t" + str(solutions[iter][j]) + "\t" + str(solutions[iter][j+1])
#print "len(chiSqr) = ", len(chiSqr)
line = line + "\t" + str(chiSqr[iter]) + "\n"
#print "line = ", line # DEBUG
outfile.write(line) # write line to file
line=""
print "Closing ASCII file ", filename
outfile.close()
print "Closing MS solver statistics file ", MSfilename
#solverstat.close()
| gpl-3.0 | 7,031,979,709,839,667,000 | 27.910112 | 139 | 0.67159 | false |
Mad-ness/cobbler-extensions | systems/way2/genprofiles.py | 1 | 3211 | #!/usr/bin/env
#
# -****-
#codingUTF-8 -*-
#
import os
import json
import yaml
import time
import uuid
import jinja2
import argparse
template_file = "template.yaml"
systems_file = "systems.yaml"
def loadyaml2dict(filename): return yaml.load(open(filename, 'r'))
def loadfile2plain(filename): return open(filename, 'r').read()
def render_template(buffer, vars={}): return jinja2.Template(buffer).render(vars)
def yaml2dict(buffer): return yaml.load(buffer)
def dict2humantext(buffer): return json.dumps(buffer, indent=4)
def render_systems2plain():
systems_plaindefs = loadfile2plain(systems_file)
systems_vars = load_vars(systems_plaindefs)
systems_vars['ctime'] = time.time()
systems_vars['mtime'] = systems_vars['ctime']
return render_template(systems_plaindefs, { 'vars': systems_vars })
def load_vars(buffer):
data = yaml2dict(buffer)
result = {}
if data is not None and 'vars' in data.keys(): result = data['vars']
return result
def render_systems2yaml(output_format):
# get rendered systems file just a text
systemsfile_plaintext = render_systems2plain()
# convert it to a dict structure, vars are here as well
# but vars are not needed
systems_defs_yaml = yaml2dict(render_template(systemsfile_plaintext, {}))
list_ready_systems = []
if 'systems' in systems_defs_yaml.keys():
# load a Cobbler system template
system_template = loadfile2plain(template_file)
sysnames = []
for sys in systems_defs_yaml['systems']:
sysnames.append(sys['name'])
sys['uid'] = uuid.uuid4()
if output_format == 'json':
data_json = json.dumps(yaml2dict(render_template(system_template, sys)))
with open(sys['name'] + '.output.yaml', 'w') as fd:
fd.write(data_json)
elif output_format == 'yaml':
print("\n[ BEGIN ] System name: %s\n" % sys['name'])
print(render_template(system_template, sys))
print("\n[ END ] System name: %s\n" % sys['name'])
else:
print("Unknown format given: %s. Do nothing." % output_format)
exit(1)
print("Processed systems:\n - " + "\n - ".join(sysnames))
def menu():
f1 = 'template.yaml'
f2 = 'systems.yaml'
parser = argparse.ArgumentParser(description = "Building Cobbler suitable systems files from short YAML definitions")
parser.add_argument("-s", "--systems-file", default=f2, help="Name of a file within systems defitions at YAML syntax (default: %s)" % f2)
parser.add_argument("-t", "--template-file", default=f1, help="Template file that to be used for making output config files (default: %s)" % f1)
parser.add_argument("-o", "--output-format", choices=["json", "yaml"], default="yaml", type=str, help="Output format: json, yaml. If json is typed when output is written in files otherwise it is printed out at a screen")
args = parser.parse_args()
template_file = args.template_file
systems_file = args.systems_file
render_systems2yaml(output_format=args.output_format)
if __name__ == "__main__":
menu()
exit(0)
| gpl-3.0 | -4,083,872,477,220,621,000 | 34.285714 | 224 | 0.639676 | false |
sadanandb/pmt | src/pyasm/web/html_wdg_test.py | 6 | 1705 | #!/usr/bin/python
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import unittest, string
from html_wdg import *
from web_state import *
class HtmlWdgTest(unittest.TestCase):
def test_element(my):
br = HtmlElement("br")
my.assertEquals("<br/>\n", br.get_display() )
def test_children(my):
href = HtmlElement.href("yahoo", "http://www.yahoo.com")
my.assertEquals("<a href=\"http://www.yahoo.com\">yahoo</a>\n", href.get_display() )
def test_style(my):
div = HtmlElement.div("Hello")
style = "background-color: #f0f0f0"
div.set_style(style)
my.assertEquals("<div style=\"%s\">Hello</div>\n" % style, div.get_display() )
def test_table(my):
table = Table()
table.add_row()
table.add_cell( "Name:")
table.add_cell( "Remko")
table.add_row()
table.add_cell( "Password:" )
table.add_cell( "pig")
html = Html()
html.writeln("<table cellpadding=\"0\" cellspacing=\"0\">")
html.writeln("<tr><td>Name:</td><td>Remko</td></tr>")
html.writeln("<tr><td>Password:</td><td>pig</td></tr>")
html.writeln("</table>")
a = html.getvalue()
a = string.replace( a ,"\n", "")
b = table.get_display()
b = string.replace( b ,"\n", "")
my.assertEquals( a, b )
if __name__ == '__main__':
unittest.main()
| epl-1.0 | 4,263,023,429,376,281,000 | 22.680556 | 92 | 0.547214 | false |
lbouma/Cyclopath | pyserver/gwis/command_/user_unsubscribe.py | 1 | 4635 | # Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# For use with the spam.py script, uses a UUID to unsubscribe users from mails.
import os
import sys
import uuid
import conf
import g
from gwis import command
from gwis.exception.gwis_error import GWIS_Error
#from util_ import misc
log = g.log.getLogger('cmd.unsbscrb')
class Op_Handler(command.Op_Handler):
__slots__ = (
'email_supplied',
'proof_supplied',
'unsubscribe_ok',
)
# *** Constructor
def __init__(self, req):
command.Op_Handler.__init__(self, req)
self.email_supplied = None
self.proof_supplied = None
self.unsubscribe_ok = None
# ***
#
def __str__(self):
selfie = (
'user_unsubscrb: email_sppld: %s / proof_sppld: %s / unsubscrb_ok: %s'
% (self.email_supplied,
self.proof_supplied,
self.unsubscribe_ok,))
return selfie
# ***
#
def pre_decode(self):
command.Op_Handler.pre_decode(self)
#
def decode_request(self):
command.Op_Handler.decode_request(self, expect_no_token=True)
g.assurt(not self.req.client.username)
self.req.client.username = conf.anonymous_username
self.email_supplied = self.decode_key('email')
self.proof_supplied = self.decode_key('proof')
# Verify that the UUID is a UUID.
try:
just_testing = uuid.UUID(self.proof_supplied)
except ValueError, e:
raise GWIS_Error('The indicated UUID is not formatted correctly.')
#
def fetch_n_save(self):
command.Op_Handler.fetch_n_save(self)
# Verify the email and UUID.
proven_sql = (
"SELECT * FROM user_ WHERE email = '%s' AND unsubscribe_proof = '%s'"
% (self.email_supplied, self.proof_supplied,))
rows = self.req.db.sql(proven_sql)
if rows:
g.assurt(len(rows) == 1)
success = self.req.db.transaction_retryable(self.attempt_save,
self.req)
self.unsubscribe_ok = True
# Not really a warning but [lb] wants a logcheck email.
log.error('fetch_n_save: unsubscribed email: %s'
% (self.email_supplied,))
else:
self.unsubscribe_ok = False
log.error('fetch_n_save: cannot unsubscribe: %s'
% (self.email_supplied,))
#
def prepare_response(self):
log.verbose('prepare_response')
html_header = (
"""<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Cyclopath Geowiki: Unsubscribe</title>
</head>
<body>""")
html_footer = (
"""</body>
</html>""")
if self.unsubscribe_ok:
self.req.htmlfile_out = (
"""%s
<p><b>Unsubscribed</b><br/></p>
<p>Your email address, <i>%s</i>, has been removed
from the Cyclopath mailing list.<br/></p>
%s
""" % (html_header, self.email_supplied, html_footer,))
# BUG nnnn: How does a user resubscribe?
# We need user pref option in flashclient.
# <p>
# If you'd like to resubscribe, please visit Cyclopath and change your
# <a href="http://%s/#user_prefs">user preferences</a>.
# </p>
#
else:
self.req.htmlfile_out = (
"""%s
<p><b>Cannot Unsubscribe</b><br/></p>
<p>The specified email address, <i>%s</i>, could not be removed from the
Cyclopath mailing list: either the email is not attached to any user account,
or the unsubscribe link you're using is broken.<br/></p>
<p>Please email <a href="mailto:%s">%s</a> if you would like further
assistance.</p>
%s
""" % (html_header,
self.email_supplied,
conf.mail_from_addr,
conf.mail_from_addr,
html_footer,))
log.error('prepare_response: self.req.htmlfile_out: %s'
% (self.req.htmlfile_out,))
# ***
#
def attempt_save(self, db, *args, **kwargs):
g.assurt(id(db) == id(self.req.db))
db.transaction_begin_rw()
# In lieu of user_email.flag_set(db, username, option, value), which
# just updates one user by username, update all users with this email
# address.
update_sql = (
"""
UPDATE user_ SET enable_email = FALSE,
enable_email_research = FALSE WHERE email = '%s'
""" % (self.email_supplied,))
db.sql(update_sql)
log.debug('fetch_n_save: unsubscribe email: %s / count: %d'
% (self.email_supplied, db.curs.rowcount,))
db.transaction_commit()
# ***
# ***
| apache-2.0 | -5,695,904,645,830,247,000 | 27.78882 | 79 | 0.59849 | false |
RapidApplicationDevelopment/tensorflow | tensorflow/contrib/solvers/python/ops/lanczos.py | 8 | 9246 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lanczos algorithms."""
# TODO(rmlarsen): Add implementation of symmetric Lanczos algorithm.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow.contrib.solvers.python.ops import util
def lanczos_bidiag(operator,
k,
orthogonalize=True,
starting_vector=None,
name="lanczos_bidiag"):
"""Computes a Lanczos bidiagonalization for a linear operator.
Computes matrices `U` of shape `[m, k+1]`, `V` of shape `[n, k]` and lower
bidiagonal matrix `B` of shape `[k+1, k]`, that satisfy the equations
`A * V = U * B` and `A' * U[:, :-1] = V * B[:-1, :]'`.
The columns of `U` are orthonormal and form a basis for the Krylov subspace
`K(A*A', U[:,0])`.
The columns of `V` are orthonormal and form a basis for the Krylov subspace
`K(A'*A, A' U[:,0])`.
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an M x N matrix A, `shape` must contain
`[M, N]`.
- dtype: The datatype of input to and output from `apply` and
`apply_adjoint`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
- apply_adjoint: Callable object taking a vector `x` as input and
returning a vector with the result of applying the adjoint operator
to `x`, i.e. if `operator` represents matrix `A`, `apply_adjoint` should
return `conj(transpose(A)) * x`.
k: An integer or a scalar Tensor of type `int32`. Determines the maximum
number of steps to run. If an invariant subspace is found, the algorithm
may terminate before `k` steps have been run.
orthogonalize: If `True`, perform full orthogonalization. If `False` no
orthogonalization is performed.
starting_vector: If not null, must be a `Tensor` of shape `[n]`.
name: A name scope for the operation.
Returns:
output: A namedtuple representing a Lanczos bidiagonalization of
`operator` with attributes:
u: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[0], k_actual+1]`, where `k_actual` is the number of
steps run.
v: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[1], k_actual]`, where `k_actual` is the number of steps
run.
alpha: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
beta: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
"""
def tarray(size, dtype, name):
return tf.TensorArray(
dtype=dtype,
size=size,
tensor_array_name=name,
clear_after_read=False)
# Reads a row-vector at location i in tarray and returns it as a
# column-vector.
def read_colvec(tarray, i):
return tf.expand_dims(tarray.read(i), -1)
# Writes an column-vector as a row-vecor at location i in tarray.
def write_colvec(tarray, colvec, i):
return tarray.write(i, tf.squeeze(colvec))
# Ephemeral class holding Lanczos bidiagonalization state:
# u = left Lanczos vectors
# v = right Lanczos vectors
# alpha = diagonal of B_k.
# beta = subdiagonal of B_k.
# Notice that we store the left and right Lanczos vectors as the _rows_
# of u and v. This is done because tensors are stored row-major and
# TensorArray only supports packing along dimension 0.
lanzcos_bidiag_state = collections.namedtuple("LanczosBidiagState",
["u", "v", "alpha", "beta"])
def update_state(old, i, u, v, alpha, beta):
return lanzcos_bidiag_state(
write_colvec(old.u, u, i + 1),
write_colvec(old.v, v, i),
old.alpha.write(i, alpha),
old.beta.write(i, beta))
def gram_schmidt_step(j, basis, v):
"""Makes v orthogonal to the j'th vector in basis."""
v_shape = v.get_shape()
basis_vec = read_colvec(basis, j)
v -= tf.matmul(basis_vec, v, adjoint_a=True) * basis_vec
v.set_shape(v_shape)
return j + 1, basis, v
def orthogonalize_once(i, basis, v):
j = tf.constant(0, dtype=tf.int32)
_, _, v = tf.while_loop(lambda j, basis, v: j < i, gram_schmidt_step,
[j, basis, v])
return util.l2normalize(v)
# Iterated modified Gram-Schmidt orthogonalization adapted from PROPACK.
# TODO(rmlarsen): This is possibly the slowest implementation of
# iterated Gram-Schmidt orthogonalization since the abacus. Move to C++.
def orthogonalize_(i, basis, v):
v_norm = util.l2norm(v)
v_new, v_new_norm = orthogonalize_once(i, basis, v)
# If the norm decreases more than 1/sqrt(2), run a second
# round of MGS. See proof in:
# B. N. Parlett, ``The Symmetric Eigenvalue Problem'',
# Prentice-Hall, Englewood Cliffs, NJ, 1980. pp. 105-109
return tf.cond(v_new_norm < 0.7071 * v_norm,
lambda: orthogonalize_once(i, basis, v),
lambda: (v_new, v_new_norm))
def stopping_criterion(i, _):
# TODO(rmlarsen): Stop if an invariant subspace is detected.
return i < k
def lanczos_bidiag_step(i, ls):
"""Extends the Lanczos bidiagonalization ls by one step."""
u = read_colvec(ls.u, i)
r = operator.apply_adjoint(u)
# The shape inference doesn't work across cond, save and reapply the shape.
r_shape = r.get_shape()
r = tf.cond(
i > 0,
lambda: r - ls.beta.read(i - 1) * read_colvec(ls.v, i - 1),
lambda: r)
r.set_shape(r_shape)
if orthogonalize:
v, alpha = orthogonalize_(i - 1, ls.v, r)
else:
v, alpha = util.l2normalize(r)
p = operator.apply(v) - alpha * u
if orthogonalize:
u, beta = orthogonalize_(i, ls.u, p)
else:
u, beta = util.l2normalize(p)
return i + 1, update_state(ls, i, u, v, alpha, beta)
with tf.name_scope(name):
dtype = operator.dtype
if starting_vector is None:
starting_vector = tf.random_uniform(
operator.shape[:1], -1, 1, dtype=dtype)
u0, _ = util.l2normalize(starting_vector)
ls = lanzcos_bidiag_state(
u=write_colvec(tarray(k + 1, dtype, "u"), u0, 0),
v=tarray(k, dtype, "v"),
alpha=tarray(k, dtype, "alpha"),
beta=tarray(k, dtype, "beta"))
i = tf.constant(0, dtype=tf.int32)
_, ls = tf.while_loop(stopping_criterion, lanczos_bidiag_step, [i, ls])
return lanzcos_bidiag_state(
tf.matrix_transpose(ls.u.pack()),
tf.matrix_transpose(ls.v.pack()), ls.alpha.pack(), ls.beta.pack())
# TODO(rmlarsen): Implement C++ ops for handling bidiagonal matrices
# efficiently. Such a module should provide
# - multiplication,
# - linear system solution by back-substitution,
# - QR factorization,
# - SVD.
def bidiag_matmul(matrix, alpha, beta, adjoint_b=False, name="bidiag_matmul"):
"""Multiplies a matrix by a bidiagonal matrix.
alpha and beta are length k vectors representing the diagonal and first lower
subdiagonal of (K+1) x K matrix B.
If adjoint_b is False, computes A * B as follows:
A * B = A[:, :-1] * diag(alpha) + A[:, 1:] * diag(beta)
If adjoint_b is True, computes A * B[:-1, :]' as follows
A * B[:-1, :]' =
A * diag(alpha) + [zeros(m,1), A[:, :-1] * diag(beta[:-1])]
Args:
matrix: A rank-2 `Tensor` representing matrix A.
alpha: A rank-1 `Tensor` representing the diagonal of B.
beta: A rank-1 `Tensor` representing the lower subdiagonal diagonal of B.
adjoint_b: `bool` determining what to compute.
name: A name scope for the operation.
Returns:
If `adjoint_b` is False the `A * B` is returned.
If `adjoint_b` is True the `A * B'` is returned.
"""
with tf.name_scope(name):
alpha = tf.expand_dims(alpha, 0)
if adjoint_b is False:
beta = tf.expand_dims(beta, 0)
return matrix[:, :-1] * alpha + matrix[:, 1:] * beta
else:
beta = tf.expand_dims(beta[:-1], 0)
shape = tf.shape(matrix)
zero_column = tf.expand_dims(tf.zeros(shape[:1], dtype=matrix.dtype), 1)
return matrix * alpha + tf.concat(1, [zero_column, matrix[:, :-1] * beta])
| apache-2.0 | 4,289,418,034,032,950,300 | 39.025974 | 80 | 0.632814 | false |
Metaswitch/calico-nova | nova/virt/hyperv/livemigrationops.py | 4 | 5120 | # Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for live migration VM operations.
"""
import functools
from oslo.config import cfg
from oslo.utils import excutils
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
def check_os_version_requirement(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
if not self._livemigrutils:
raise NotImplementedError(_('Live migration is supported '
'starting with Hyper-V Server '
'2012'))
return function(self, *args, **kwds)
return wrapper
class LiveMigrationOps(object):
def __init__(self):
# Live migration is supported starting from Hyper-V Server 2012
if utilsfactory.get_hostutils().check_min_windows_version(6, 2):
self._livemigrutils = utilsfactory.get_livemigrationutils()
else:
self._livemigrutils = None
self._pathutils = utilsfactory.get_pathutils()
self._vmops = vmops.VMOps()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
@check_os_version_requirement
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
LOG.debug("live_migration called", instance=instance_ref)
instance_name = instance_ref["name"]
try:
self._vmops.copy_vm_console_logs(instance_name, dest)
self._livemigrutils.live_migrate_vm(instance_name,
dest)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Calling live migration recover_method "
"for instance: %s", instance_name)
recover_method(context, instance_ref, dest, block_migration)
LOG.debug("Calling live migration post_method for instance: %s",
instance_name)
post_method(context, instance_ref, dest, block_migration)
@check_os_version_requirement
def pre_live_migration(self, context, instance, block_device_info,
network_info):
LOG.debug("pre_live_migration called", instance=instance)
self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
boot_from_volume = self._volumeops.ebs_root_in_block_devices(
block_device_info)
if not boot_from_volume and instance.image_ref:
self._imagecache.get_cached_image(context, instance)
self._volumeops.initialize_volumes_connection(block_device_info)
@check_os_version_requirement
def post_live_migration(self, context, instance, block_device_info):
self._volumeops.disconnect_volumes(block_device_info)
@check_os_version_requirement
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug("post_live_migration_at_destination called",
instance=instance_ref)
self._vmops.log_vm_serial_output(instance_ref['name'],
instance_ref['uuid'])
@check_os_version_requirement
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
LOG.debug("check_can_live_migrate_destination called", instance_ref)
return {}
@check_os_version_requirement
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
LOG.debug("check_can_live_migrate_destination_cleanup called")
@check_os_version_requirement
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
LOG.debug("check_can_live_migrate_source called", instance_ref)
return dest_check_data
| apache-2.0 | -7,912,392,623,331,707,000 | 40.290323 | 78 | 0.62207 | false |
DikkeNeef/humtools | autoit/process.py | 2 | 4827 | # -*- coding: utf-8 -*-
__author__ = 'Jace Xu'
from .autoit import AUTO_IT
from .autoit import api, error
from .autoit import Properties
from .autoit import AutoItError
from ctypes.wintypes import *
@api.check(1, "run program failed")
def run(filename, work_dir="", show_flag=Properties.SW_SHOWNORMAL):
"""
:param filename:
:param work_dir:
:param show_flag:
:return:
"""
ret = AUTO_IT.AU3_Run(LPCWSTR(filename), LPCWSTR(work_dir),
INT(show_flag))
return ret
@api.check(1, "run program failed")
def run_wait(filename, work_dir="", show_flag=Properties.SW_SHOWNORMAL):
"""
:param filename:
:param work_dir:
:param show_flag:
:return:
"""
ret = AUTO_IT.AU3_RunWait(LPCWSTR(filename), LPCWSTR(work_dir),
INT(show_flag))
return ret
def process_close(process):
"""
Terminates a named process.
"""
ret = AUTO_IT.AU3_ProcessClose(LPCWSTR(process))
return ret
def process_exists(process):
"""
:param process:
:return:
"""
ret = AUTO_IT.AU3_ProcessExists(LPCWSTR(process))
return ret
def process_set_priority(process, priority):
"""
Changes the priority of a process
:param process: The name or PID of the process to check.
:param priority:A flag which determines what priority to set
0 - Idle/Low
1 - Below Normal (Not supported on Windows 95/98/ME)
2 - Normal
3 - Above Normal (Not supported on Windows 95/98/ME)
4 - High
5 - Realtime (Use with caution, may make the system unstable)
:return:
"""
ret = AUTO_IT.AU3_ProcessSetPriority(LPCWSTR(process), INT(priority))
if ret == 0:
if error() == 1:
raise AutoItError("set priority failed")
elif error() == 2:
raise AutoItError("unsupported priority class be used")
return ret
@api.check(2, "the process wait timed out")
def process_wait(process, timeout=0):
"""
Pauses script execution until a given process exists.
:param process:
:param timeout:
:return:
"""
ret = AUTO_IT.AU3_ProcessWait(LPCWSTR(process), INT(timeout))
return ret
@api.check(2, "the process wait close timed out")
def process_wait_close(process, timeout=0):
"""
Pauses script execution until a given process does not exist.
:param process:
:param timeout:
:return:
"""
ret = AUTO_IT.AU3_ProcessWaitClose(LPCWSTR(process), INT(timeout))
return ret
@api.check(1, "run an external program failed")
def run_as(user, domain, password, filename, logon_flag=1, work_dir="",
show_flag=Properties.SW_SHOWNORMAL):
"""
Runs an external program.
:param user: username The user name to use.
:param domain: The domain name to use.
:param password: The password to use.
:param logon_flag: 0 = do not load the user profile, 1 = (default) load
the user profile, 2 = use for net credentials only
:param filename: The name of the executable (EXE, BAT, COM, or PIF) to run.
:param work_dir: The working directory.
:param show_flag: The "show" flag of the executed program:
SW_HIDE = Hidden window
SW_MINIMIZE = Minimized window
SW_MAXIMIZE = Maximized window
:return:
"""
ret = AUTO_IT.AU3_RunAs(
LPCWSTR(user), LPCWSTR(domain), LPCWSTR(password), INT(logon_flag),
LPCWSTR(filename), LPCWSTR(work_dir), INT(show_flag)
)
return ret
@api.check(1, "run an external program failed")
def run_as_wait(user, domain, password, filename, logon_flag=1, work_dir="",
show_flag=Properties.SW_SHOWNORMAL):
"""
Runs an external program.
:param user: username The user name to use.
:param domain: The domain name to use.
:param password: The password to use.
:param logon_flag: 0 = do not load the user profile, 1 = (default) load
the user profile, 2 = use for net credentials only
:param filename: The name of the executable (EXE, BAT, COM, or PIF) to run.
:param work_dir: The working directory.
:param show_flag: The "show" flag of the executed program:
SW_HIDE = Hidden window
SW_MINIMIZE = Minimized window
SW_MAXIMIZE = Maximized window
:return:
"""
ret = AUTO_IT.AU3_RunAsWait(
LPCWSTR(user), LPCWSTR(domain), LPCWSTR(password), INT(logon_flag),
LPCWSTR(filename), LPCWSTR(work_dir), INT(show_flag)
)
return ret
@api.check(2, "set shutdown failed")
def shutdown(code):
"""
:param code: The shutdown code is a combination of the following values:
0 = Logoff
1 = Shutdown
2 = Reboot
4 = Force
8 = Power down
:return:
"""
ret = AUTO_IT.AU3_Shutdown(INT(code))
return ret | apache-2.0 | -5,177,541,453,538,643,000 | 27.91018 | 79 | 0.630205 | false |
luckydonald/django-tarview | tarview/tests/test_views.py | 1 | 2492 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import platform
import os
import tarfile
from django.test import TestCase
from django.core.files import File
from django.http import HttpResponse
from django.test.client import RequestFactory
from django.core.files.base import ContentFile
from django import get_version as django_version
from tarview.views import BaseTarView
delete_zip = True # if you want to check them manually
name = "/tmp/test_py-%s_dj-%s.tar" % (platform.python_version(),django_version())
class TarView(BaseTarView):
"""Test TarView basic implementation."""
_files = None
def get_files(self):
if self._files is None:
dirname = os.path.dirname(__file__)
self._files = [
File(open(os.path.join(dirname, 'test_file.txt')), name="test_file.txt"),
File(open(os.path.join(dirname, 'test_file.odt')), name="test_file.odt"),
ContentFile(b"Littlepip is best pony!", name="test_file_manual.txt")
]
return self._files
class TarViewTests(TestCase):
def setUp(self):
self.view = TarView()
self.request = RequestFactory()
def test_response_type(self):
response = self.view.get(self.request)
self.assertTrue(isinstance(response, HttpResponse))
def test_response_params(self):
response = self.view.get(self.request)
self.assertEqual(response['Content-Type'], 'application/x-tar')
self.assertEqual(response['Content-Disposition'], 'attachment; filename=download.tar')
def test_response_content_length(self):
response = self.view.get(self.request)
if platform.python_version() < "3":
self.assertEqual(response['Content-Length'], '30720') # measured manually with Finder.
else:
self.assertEqual(response['Content-Length'], '10240') # measured manually with Finder.
def test_valid_tarfile(self):
response = self.view.get(self.request)
with open(name, mode="wb") as file:
file.write(response.content)
response_file = ContentFile(response.content, name=name)
self.assertTrue(tarfile.is_tarfile(name))
tar_file = tarfile.TarFile(fileobj=response_file)
self.assertEqual(tar_file.getnames(), ['test_file.txt', 'test_file.odt', 'test_file_manual.txt'])
def tearDown(self):
if os.path.exists(name) and delete_zip:
os.unlink(name)
pass
| mit | 6,585,196,574,338,521,000 | 34.6 | 105 | 0.655297 | false |
scarface-4711/denonavr | denonavr/decorators.py | 1 | 5138 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module implements the REST API to Denon AVR receivers.
:copyright: (c) 2021 by Oliver Goetz.
:license: MIT, see LICENSE for more details.
"""
import asyncio
import inspect
import logging
import time
import xml.etree.ElementTree as ET
from functools import wraps
from typing import Callable, Coroutine
import httpx
from defusedxml import DefusedXmlException
from defusedxml.ElementTree import ParseError
from .exceptions import (
AvrRequestError,
AvrForbiddenError,
AvrNetworkError,
AvrTimoutError,
AvrInvalidResponseError)
_LOGGER = logging.getLogger(__name__)
def async_handle_receiver_exceptions(func: Coroutine) -> Coroutine:
"""
Handle exceptions raised when calling an Denon AVR endpoint asynchronously.
The decorated function must either have a string variable as second
argument or as "request" keyword argument.
"""
@wraps(func)
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except httpx.HTTPStatusError as err:
_LOGGER.debug(
"HTTP status error on request %s", err.request, exc_info=True)
# Separate handling of 403 errors
if err.response.status_code == 403:
raise AvrForbiddenError(
"HTTPStatusError: {}".format(err), err.request) from err
raise AvrRequestError(
"HTTPStatusError: {}".format(err), err.request) from err
except httpx.TimeoutException as err:
_LOGGER.debug(
"HTTP timeout exception on request %s", err.request,
exc_info=True)
raise AvrTimoutError(
"TimeoutException: {}".format(err), err.request) from err
except httpx.NetworkError as err:
_LOGGER.debug(
"Network error exception on request %s", err.request,
exc_info=True)
raise AvrNetworkError(
"NetworkError: {}".format(err), err.request) from err
except httpx.RemoteProtocolError as err:
_LOGGER.debug(
"Remote protocol error exception on request %s", err.request,
exc_info=True)
raise AvrInvalidResponseError(
"RemoteProtocolError: {}".format(err), err.request) from err
except (
ET.ParseError, DefusedXmlException, ParseError,
UnicodeDecodeError) as err:
_LOGGER.debug(
"Defusedxml parse error on request %s", (args, kwargs),
exc_info=True)
raise AvrInvalidResponseError(
"XMLParseError: {}".format(err), (args, kwargs)) from err
return wrapper
def cache_clear_on_exception(func: Coroutine) -> Coroutine:
"""
Decorate a function to clear lru_cache if an exception occurs.
The decorator must be placed right before the @lru_cache decorator.
It prevents memory leaks in home-assistant when receiver instances are
created and deleted right away in case the device is offline on setup.
"""
@wraps(func)
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except Exception as err:
_LOGGER.debug("Exception %s raised, clearing cache", err)
func.cache_clear()
raise
return wrapper
def set_cache_id(func: Callable) -> Callable:
"""
Decorate a function to add cache_id keyword argument if it is not present.
The function must be called with a fix cache_id keyword argument to be able
to get cached data. This prevents accidential caching of a function result.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if kwargs.get("cache_id") is None:
kwargs["cache_id"] = time.time()
return func(*args, **kwargs)
return wrapper
def run_async_synchronously(async_func: Coroutine) -> Callable:
"""
Decorate to run the configured asynchronous function synchronously instead.
If available the corresponding function with async_ prefix is called in an
own event loop. This is not efficient but it ensures backwards
compatibility of this library.
"""
def decorator(func: Callable):
# Check if function is a coroutine
if not inspect.iscoroutinefunction(async_func):
raise AttributeError(
"Function {} is not a coroutine function".format(async_func))
# Check if the signature of both functions is equal
if inspect.signature(func) != inspect.signature(async_func):
raise AttributeError(
"Functions {} and {} have different signatures".format(
func, async_func))
@wraps(func)
def wrapper(*args, **kwargs):
# Run async function in own event loop
loop = asyncio.new_event_loop()
try:
return loop.run_until_complete(async_func(*args, **kwargs))
finally:
loop.close()
return wrapper
return decorator
| mit | 766,311,828,356,369,300 | 32.802632 | 79 | 0.626314 | false |
CasualBeer/Legofy | legofy/__init__.py | 1 | 9064 | from __future__ import unicode_literals
from PIL import Image, ImageSequence
import sys
import os
# Python 2 and 3 support
# TODO: Proper images2gif version that supports both Py 2 and Py 3 (mostly handling binary data)
if sys.version_info < (3,):
import legofy.images2gif_py2 as images2gif
else:
import legofy.images2gif_py3 as images2gif
# http://www.brickjournal.com/files/PDFs/2010LEGOcolorpalette.pdf
PALETTE_SOLID = {
"024": [0xfe, 0xc4, 0x01],
"106": [0xe7, 0x64, 0x19],
"021": [0xde, 0x01, 0x0e],
"221": [0xde, 0x38, 0x8b],
"023": [0x01, 0x58, 0xa8],
"028": [0x01, 0x7c, 0x29],
"119": [0x95, 0xb9, 0x0c],
"192": [0x5c, 0x1d, 0x0d],
"018": [0xd6, 0x73, 0x41],
"001": [0xf4, 0xf4, 0xf4],
"026": [0x02, 0x02, 0x02],
"226": [0xff, 0xff, 0x99],
"222": [0xee, 0x9d, 0xc3],
"212": [0x87, 0xc0, 0xea],
"037": [0x01, 0x96, 0x25],
"005": [0xd9, 0xbb, 0x7c],
"283": [0xf5, 0xc1, 0x89],
"208": [0xe4, 0xe4, 0xda],
"191": [0xf4, 0x9b, 0x01],
"124": [0x9c, 0x01, 0xc6],
"102": [0x48, 0x8c, 0xc6],
"135": [0x5f, 0x75, 0x8c],
"151": [0x60, 0x82, 0x66],
"138": [0x8d, 0x75, 0x53],
"038": [0xa8, 0x3e, 0x16],
"194": [0x9c, 0x92, 0x91],
"154": [0x80, 0x09, 0x1c],
"268": [0x2d, 0x16, 0x78],
"140": [0x01, 0x26, 0x42],
"141": [0x01, 0x35, 0x17],
"312": [0xaa, 0x7e, 0x56],
"199": [0x4d, 0x5e, 0x57],
"308": [0x31, 0x10, 0x07]
}
PALETTE_TRANSPARENT = {
"044": [0xf9, 0xef, 0x69],
"182": [0xec, 0x76, 0x0e],
"047": [0xe7, 0x66, 0x48],
"041": [0xe0, 0x2a, 0x29],
"113": [0xee, 0x9d, 0xc3],
"126": [0x9c, 0x95, 0xc7],
"042": [0xb6, 0xe0, 0xea],
"043": [0x50, 0xb1, 0xe8],
"143": [0xce, 0xe3, 0xf6],
"048": [0x63, 0xb2, 0x6e],
"311": [0x99, 0xff, 0x66],
"049": [0xf1, 0xed, 0x5b],
"111": [0xa6, 0x91, 0x82],
"040": [0xee, 0xee, 0xee]
}
PALETTE_EFFECTS = {
"131": [0x8d, 0x94, 0x96],
"297": [0xaa, 0x7f, 0x2e],
"148": [0x49, 0x3f, 0x3b],
"294": [0xfe, 0xfc, 0xd5]
}
PALETTE_MONO = {
"001": [0xf4, 0xf4, 0xf4],
"026": [0x02, 0x02, 0x02]
}
def apply_color_overlay(image, color):
'''Small function to apply an effect over an entire image'''
overlay_red, overlay_green, overlay_blue = color
channels = image.split()
r = channels[0].point(lambda color: overlay_effect(color, overlay_red))
g = channels[1].point(lambda color: overlay_effect(color, overlay_green))
b = channels[2].point(lambda color: overlay_effect(color, overlay_blue))
channels[0].paste(r)
channels[1].paste(g)
channels[2].paste(b)
return Image.merge(image.mode, channels)
def overlay_effect(color, overlay):
'''Actual overlay effect function'''
if color < 33:
return overlay - 100
elif color > 233:
return overlay + 100
else:
return overlay - 133 + color
def make_lego_image(thumbnail_image, brick_image):
'''Create a lego version of an image from an image'''
base_width, base_height = thumbnail_image.size
brick_width, brick_height = brick_image.size
rgb_image = thumbnail_image.convert('RGB')
lego_image = Image.new("RGB", (base_width * brick_width,
base_height * brick_height), "white")
for brick_x in range(base_width):
for brick_y in range(base_height):
color = rgb_image.getpixel((brick_x, brick_y))
lego_image.paste(apply_color_overlay(brick_image, color),
(brick_x * brick_width, brick_y * brick_height))
return lego_image
def get_new_filename(file_path, ext_override=None):
'''Returns the save destination file path'''
folder, basename = os.path.split(file_path)
base, extention = os.path.splitext(basename)
if ext_override:
extention = ext_override
new_filename = os.path.join(folder, "{0}_lego{1}".format(base, extention))
return new_filename
def get_new_size(base_image, brick_image, size=None):
'''Returns a new size the first image should be so that the second one fits neatly in the longest axis'''
new_size = base_image.size
if size:
scale_x, scale_y = size, size
else:
scale_x, scale_y = brick_image.size
if new_size[0] > scale_x or new_size[1] > scale_y:
if new_size[0] < new_size[1]:
scale = new_size[1] / scale_y
else:
scale = new_size[0] / scale_x
new_size = (int(round(new_size[0] / scale)) or 1,
int(round(new_size[1] / scale)) or 1)
return new_size
def get_lego_palette(palette_mode):
'''Gets the palette for the specified lego palette mode'''
if palette_mode == 'solid':
palette = PALETTE_SOLID.values()
elif palette_mode == 'transparent':
palette = PALETTE_TRANSPARENT.values()
elif palette_mode == 'effects':
palette = PALETTE_EFFECTS.values()
elif palette_mode == 'mono':
palette = PALETTE_MONO.values()
elif palette_mode == 'all':
palette = list(PALETTE_SOLID.values()) + \
list(PALETTE_TRANSPARENT.values()) + \
list(PALETTE_EFFECTS.values())
else:
raise "Unkown palette mode : %s" % palette_mode
# Flatten array of color triples
palette = [item for sublist in palette for item in sublist]
assert len(palette) % 3 == 0
# Repeat the first color so that the palette has 256 colors
first_color = palette[0:3]
missing_colors = int(256 - len(palette)/3)
padding = first_color * missing_colors
palette += padding
assert len(palette) == 768
return palette
def apply_thumbnail_effects(image, palette, dither):
'''Apply effects on the reduced image before Legofying'''
palette_image = Image.new("P", (1, 1))
palette_image.putpalette(palette)
return image.im.convert("P",
Image.FLOYDSTEINBERG if dither else Image.NONE,
palette_image.im)
def legofy_gif(base_image, brick_image, output_path, size, palette_mode, dither):
'''Alternative function that legofies animated gifs, makes use of images2gif - uses numpy!'''
im = base_image
# Read original image duration
original_duration = im.info['duration']
# Split image into single frames
frames = [frame.copy() for frame in ImageSequence.Iterator(im)]
# Create container for converted images
frames_converted = []
print("Number of frames to convert: " + str(len(frames)))
# Iterate through single frames
for i, frame in enumerate(frames, 1):
print("Converting frame number " + str(i))
new_size = get_new_size(frame, brick_image, size)
frame.thumbnail(new_size, Image.ANTIALIAS)
if palette_mode:
palette = get_lego_palette(palette_mode)
frame = apply_thumbnail_effects(frame, palette, dither)
new_frame = make_lego_image(frame, brick_image)
frames_converted.append(new_frame)
# Make use of images to gif function
images2gif.writeGif(output_path, frames_converted, duration=original_duration/1000.0, dither=0, subRectangles=False)
def legofy_image(base_image, brick_image, output_path, size, palette_mode, dither):
'''Legofy an image'''
new_size = get_new_size(base_image, brick_image, size)
base_image.thumbnail(new_size, Image.ANTIALIAS)
if palette_mode:
palette = get_lego_palette(palette_mode)
base_image = apply_thumbnail_effects(base_image, palette, dither)
make_lego_image(base_image, brick_image).save(output_path)
def main(image_path, output_path=None, size=None,
palette_mode=None, dither=False):
'''Legofy image or gif with brick_path mask'''
image_path = os.path.realpath(image_path)
if not os.path.isfile(image_path):
print('Image file "{0}" was not found.'.format(image_path))
sys.exit(1)
brick_path = os.path.join(os.path.dirname(__file__), "assets",
"bricks", "1x1.png")
if not os.path.isfile(brick_path):
print('Brick asset "{0}" was not found.'.format(brick_path))
sys.exit(1)
base_image = Image.open(image_path)
brick_image = Image.open(brick_path)
if palette_mode:
print ("LEGO Palette {0} selected...".format(palette_mode.title()))
elif dither:
palette_mode = 'all'
if image_path.lower().endswith(".gif") and base_image.is_animated:
if output_path is None:
output_path = get_new_filename(image_path)
print("Animated gif detected, will now legofy to {0}".format(output_path))
legofy_gif(base_image, brick_image, output_path, size, palette_mode, dither)
else:
if output_path is None:
output_path = get_new_filename(image_path, '.png')
print("Static image detected, will now legofy to {0}".format(output_path))
legofy_image(base_image, brick_image, output_path, size, palette_mode, dither)
base_image.close()
brick_image.close()
print("Finished!")
| mit | -173,823,470,050,331,070 | 32.947566 | 120 | 0.615402 | false |
jesopo/bitbot | src/core_modules/channel_access.py | 1 | 3911 | #--depends-on check_mode
#--depends-on commands
#--depends-on permissions
from src import ModuleManager, utils
SPECIAL = ["low", "high", "admin"]
class Module(ModuleManager.BaseModule):
_name = "ChanAccess"
def _has_channel_access(self, target, user, names):
required_access = []
for name in names.split(","):
name = name.strip()
if name in SPECIAL:
required_access.extend(SPECIAL[:SPECIAL.index(name)+1])
else:
required_access.append(name)
user_access = target.get_user_setting(user.get_id(), "access", [])
identified = self.exports.get("is-identified")(user)
matched = list(set(required_access)&set(user_access))
return ("*" in user_access or matched) and identified
def _command_check(self, event, channel, require_access):
if channel:
if self._has_channel_access(channel, event["user"],
require_access):
return utils.consts.PERMISSION_FORCE_SUCCESS, None
else:
return (utils.consts.PERMISSION_ERROR,
"You do not have permission to do this")
else:
raise ValueError("_command_check requires a channel")
@utils.hook("preprocess.command")
def preprocess_command(self, event):
require_access = event["hook"].get_kwarg("require_access")
if require_access:
channel = event["kwargs"].get("channel",
event["target"] if event["is_channel"] else None)
return self._command_check(event, channel, require_access)
@utils.hook("check.command.channel-access")
def check_command(self, event):
target = event["target"]
access = event["request_args"][0]
if len(event["request_args"]) > 1:
target = event["request_args"][0]
access = event["request_args"][1]
return self._command_check(event, target, access)
@utils.hook("received.command.access")
@utils.kwarg("require_mode", "high")
@utils.spec("!<#channel>r~channel !'list !<nickname>ouser")
@utils.spec("!<#channel>r~channel !'add,remove,set !<nickname>ouser "
"!<permissions>string")
def access(self, event):
channel = event["spec"][0]
subcommand = event["spec"][1].lower()
target = event["spec"][2]
access = channel.get_user_setting(target.get_id(), "access", [])
if subcommand == "list":
event["stdout"].write("Access for %s: %s" % (target.nickname,
" ".join(access)))
elif subcommand == "set":
channel.set_user_setting(target.get_id(), "access",
event["spec"][3])
elif subcommand == "add":
for acc in event["spec"][3].split(" "):
if acc in access:
raise utils.EventError("%s already has '%s' permission" % (
target.nickname, acc))
access.append(acc)
channel.set_user_setting(target.get_id(), "access", access)
event["stdout"].write("Added permission to %s: %s" % (
target.nickname, event["spec"][3]))
elif subcommand == "remove":
for acc in event["spec"][3].split(" "):
if not acc in access:
raise utils.EventError("%s does not have '%s' permission" %
(target.nickname, acc))
access.remove(acc)
if access:
channel.set_user_setting(target.get_id(), "access",
access)
else:
channel.del_user_setting(target.get_id(), "access")
event["stdout"].write("Removed permission from %s: %s" % (
target.nickname, event["spec"][3]))
else:
event["stderr"].write("Unknown command '%s'" % subcommand)
| gpl-2.0 | -4,273,408,982,313,953,000 | 38.908163 | 79 | 0.551777 | false |
naterh/cloud-init-rax-pkg | cloudinit/config/cc_resolv_conf.py | 9 | 3866 | # vi: ts=4 expandtab
#
# Copyright (C) 2013 Craig Tracey
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Craig Tracey <[email protected]>
# Author: Juerg Haefliger <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Note:
# This module is intended to manage resolv.conf in environments where
# early configuration of resolv.conf is necessary for further
# bootstrapping and/or where configuration management such as puppet or
# chef own dns configuration. As Debian/Ubuntu will, by default, utilize
# resovlconf, and similarly RedHat will use sysconfig, this module is
# likely to be of little use unless those are configured correctly.
#
# For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP
# enabled NICs. And, in Ubuntu/Debian it is recommended that DNS
# be configured via the standard /etc/network/interfaces configuration
# file.
#
#
# Usage Example:
#
# #cloud-config
# manage_resolv_conf: true
#
# resolv_conf:
# nameservers: ['8.8.4.4', '8.8.8.8']
# searchdomains:
# - foo.example.com
# - bar.example.com
# domain: example.com
# options:
# rotate: true
# timeout: 1
#
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import templater
from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
distros = ['fedora', 'rhel', 'sles']
def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
flags = []
false_flags = []
if 'options' in params:
for key, val in params['options'].items():
if isinstance(val, bool):
if val:
flags.append(key)
else:
false_flags.append(key)
for flag in flags + false_flags:
del params['options'][flag]
if not params.get('options'):
params['options'] = {}
params['flags'] = flags
LOG.debug("Writing resolv.conf from template %s" % template_fn)
templater.render_to_file(template_fn, target_fname, params)
def handle(name, cfg, cloud, log, _args):
"""
Handler for resolv.conf
@param name: The module name "resolv-conf" from cloud.cfg
@param cfg: A nested dict containing the entire cloud config contents.
@param cloud: The L{CloudInit} object in use.
@param log: Pre-initialized Python logger object to use for logging.
@param args: Any module arguments from cloud.cfg
"""
if "manage_resolv_conf" not in cfg:
log.debug(("Skipping module named %s,"
" no 'manage_resolv_conf' key in configuration"), name)
return
if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False):
log.debug(("Skipping module named %s,"
" 'manage_resolv_conf' present but set to False"), name)
return
if "resolv_conf" not in cfg:
log.warn("manage_resolv_conf True but no parameters provided!")
template_fn = cloud.get_template_filename('resolv.conf')
if not template_fn:
log.warn("No template found, not rendering /etc/resolv.conf")
return
generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"])
return
| gpl-3.0 | -3,221,011,069,594,861,600 | 32.327586 | 79 | 0.661924 | false |
owlabs/incubator-airflow | tests/test_utils/mock_operators.py | 1 | 1365 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
# Namedtuple for testing purposes
MockNamedTuple = namedtuple("MockNamedTuple", ["var1", "var2"])
class MockOperator(BaseOperator):
"""Operator for testing purposes."""
template_fields = ("arg1", "arg2")
@apply_defaults
def __init__(self, arg1="", arg2="", **kwargs):
super(MockOperator, self).__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
def execute(self, context):
pass
| apache-2.0 | -3,687,914,404,034,162,000 | 34 | 63 | 0.731868 | false |
heri/openaccess | browser-ext/login/test/make_lastpass_csv.py | 26 | 1667 | #!/usr/bin/python
# *****************************************************************************
# Copyright (c) 2012, 2013, 2014 Lectorius, Inc.
# Authors:
# Vijay Pandurangan ([email protected])
# Evan Jones ([email protected])
# Adam Hilss ([email protected])
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact the authors at [email protected].
# *****************************************************************************
'''Outputs a LastPass CSV file with randomly generated data.'''
import os
import base64
def random_string(chars):
b = os.urandom(chars)
out = base64.encodestring(b)
return out[:chars]
def main():
print 'url,username,password,extra,name,grouping,fav'
for i in xrange(500):
url = 'http://example.com/' + str(i)
username = random_string(8)
password = random_string(9)
note = 'note ' + random_string(10)
title = 'title ' + str(i) + ' ' + random_string(11)
print '%s,%s,%s,%s,%s,,' % (url, username, password, note, title)
if __name__ == '__main__':
main() | gpl-3.0 | 8,697,218,888,819,779 | 30.471698 | 79 | 0.610078 | false |
gizmoguy/faucet | faucet/gauge_pollers.py | 7 | 9928 | """Library for polling dataplanes for statistics."""
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import time
from ryu.lib import hub
from ryu.ofproto import ofproto_v1_3 as ofp
from ryu.ofproto import ofproto_v1_3_parser as parser
from faucet.valve_of import devid_present
from faucet.valve_of_old import OLD_MATCH_FIELDS
class GaugePoller:
"""Abstraction for a poller for statistics."""
def __init__(self, conf, logname, prom_client):
self.dp = conf.dp # pylint: disable=invalid-name
self.conf = conf
self.prom_client = prom_client
self.reply_pending = False
self.ryudp = None
self.logger = logging.getLogger(
logname + '.{0}'.format(self.conf.type)
)
# _running indicates that the watcher is receiving data
self._running = False
self.req = None
def report_dp_status(self, dp_status):
"""Report DP status."""
self.prom_client.dp_status.labels(
**dict(dp_id=hex(self.dp.dp_id), dp_name=self.dp.name)).set(dp_status) # pylint: disable=no-member
def start(self, ryudp, active):
"""Start the poller."""
self.ryudp = ryudp
self._running = True
if active:
self.logger.info('starting')
def stop(self):
"""Stop the poller."""
self.logger.info('stopping')
self._running = False
def running(self):
"""Return True if the poller is running."""
return self._running
@staticmethod
def is_active():
"""Return True if the poller is controlling the request loop for its stat"""
return False
def send_req(self):
"""Send a stats request to a datapath."""
raise NotImplementedError # pragma: no cover
def no_response(self):
"""Called when a polling cycle passes without receiving a response."""
dpid_str = ''
if self.req and 'datapath' in self.req:
dpid_str = 'DPID %s (%s)' % (self.req.datapath.id, hex(self.req.datapath.id))
self.logger.info('%s no response to %s', dpid_str, self.req)
def update(self, rcv_time, msg):
"""Handle the responses to requests.
Called when a reply to a stats request sent by this object is received
by the controller.
It should acknowledge the receipt by setting self.reply_pending to
false.
Args:
rcv_time: the time the response was received
msg: the stats reply message
"""
# TODO: it may be worth while verifying this is the correct stats
# response before doing this
if not self.running():
self.logger.debug('update received when not running')
return
self.reply_pending = False
self._update(rcv_time, msg)
@staticmethod
def _format_stat_pairs(_delim, _stat):
return ()
@staticmethod
def _dp_stat_name(_stat, _stat_name):
return ''
@staticmethod
def _rcv_time(rcv_time):
return time.strftime('%b %d %H:%M:%S', time.localtime(rcv_time))
def _update(self, rcv_time, msg):
if not self.conf.file:
return
rcv_time_str = self._rcv_time(rcv_time)
log_lines = []
for stat in msg.body:
for stat_name, stat_val in self._format_stat_pairs('-', stat):
dp_stat_name = self._dp_stat_name(stat, stat_name)
log_lines.append(self._update_line(rcv_time_str, dp_stat_name, stat_val))
with open(self.conf.file, 'a') as logfile:
logfile.writelines(log_lines)
@staticmethod
def _format_stats(delim, stat_pairs):
formatted_stats = []
for stat_name_list, stat_val in stat_pairs:
stat_name = delim.join(stat_name_list)
# OVS reports unsupported statistics as all-1-bits (UINT64_MAX)
if stat_val == 2**64-1:
stat_val = 0
formatted_stats.append((stat_name, stat_val))
return formatted_stats
@staticmethod
def _update_line(rcv_time_str, stat_name, stat_val):
return '\t'.join((rcv_time_str, stat_name, str(stat_val))) + '\n'
class GaugeThreadPoller(GaugePoller):
"""A ryu thread object for sending and receiving OpenFlow stats requests.
The thread runs in a loop sending a request, sleeping then checking a
response was received before sending another request.
The methods send_req, update and no_response should be implemented by
subclasses.
"""
def __init__(self, conf, logname, prom_client):
super(GaugeThreadPoller, self).__init__(conf, logname, prom_client)
self.thread = None
self.interval = self.conf.interval
self.ryudp = None
def start(self, ryudp, active):
self.stop()
super(GaugeThreadPoller, self).start(ryudp, active)
if active:
self.thread = hub.spawn(self)
self.thread.name = 'GaugeThreadPoller'
def stop(self):
super(GaugeThreadPoller, self).stop()
if self.is_active():
hub.kill(self.thread)
hub.joinall([self.thread])
self.thread = None
def is_active(self):
return self.thread is not None
def __call__(self):
"""Send request loop.
Delays the initial request for a random interval to reduce load.
Then sends a request to the datapath, waits the specified interval and
checks that a response has been received in a loop."""
# TODO: this should use a deterministic method instead of random
hub.sleep(random.randint(1, self.conf.interval))
while True:
self.send_req()
self.reply_pending = True
hub.sleep(self.conf.interval)
if self.reply_pending:
self.no_response()
def send_req(self):
"""Send a stats request to a datapath."""
raise NotImplementedError # pragma: no cover
class GaugeMeterStatsPoller(GaugeThreadPoller):
"""Poll for all meter stats."""
def send_req(self):
if self.ryudp:
self.req = parser.OFPMeterStatsRequest(self.ryudp, 0, ofp.OFPM_ALL)
self.ryudp.send_msg(self.req)
class GaugePortStatsPoller(GaugeThreadPoller):
"""Periodically sends a port stats request to the datapath and parses
and outputs the response.
"""
def send_req(self):
if self.ryudp:
self.req = parser.OFPPortStatsRequest(self.ryudp, 0, ofp.OFPP_ANY)
self.ryudp.send_msg(self.req)
def _format_stat_pairs(self, delim, stat):
stat_pairs = (
(('packets', 'out'), stat.tx_packets),
(('packets', 'in'), stat.rx_packets),
(('bytes', 'out'), stat.tx_bytes),
(('bytes', 'in'), stat.rx_bytes),
(('dropped', 'out'), stat.tx_dropped),
(('dropped', 'in'), stat.rx_dropped),
(('errors', 'out'), stat.tx_errors),
(('errors', 'in'), stat.rx_errors))
return self._format_stats(delim, stat_pairs)
class GaugeFlowTablePoller(GaugeThreadPoller):
"""Periodically dumps the current datapath flow table as a yaml object.
Includes a timestamp and a reference ($DATAPATHNAME-flowtables). The
flow table is dumped as an OFFlowStatsReply message (in yaml format) that
matches all flows.
"""
def send_req(self):
if self.ryudp:
match = parser.OFPMatch()
self.req = parser.OFPFlowStatsRequest(
self.ryudp, 0, ofp.OFPTT_ALL, ofp.OFPP_ANY, ofp.OFPG_ANY,
0, 0, match)
self.ryudp.send_msg(self.req)
def _parse_flow_stats(self, stats):
"""Parse flow stats reply message into tags/labels and byte/packet counts."""
packet_count = int(stats['packet_count'])
byte_count = int(stats['byte_count'])
instructions = stats['instructions']
tags = {
'dp_name': self.dp.name,
'dp_id': hex(self.dp.dp_id),
'table_id': int(stats['table_id']),
'priority': int(stats['priority']),
'inst_count': len(instructions),
'cookie': int(stats['cookie']),
}
oxm_matches = stats['match']['OFPMatch']['oxm_fields']
for oxm_match in oxm_matches:
oxm_tlv = oxm_match['OXMTlv']
mask = oxm_tlv['mask']
val = oxm_tlv['value']
orig_field = oxm_tlv['field']
if mask is not None:
val = '/'.join((str(val), str(mask)))
field = OLD_MATCH_FIELDS.get(orig_field, orig_field)
tags[field] = val
if field == 'vlan_vid' and mask is None:
tags['vlan'] = devid_present(int(val))
return (
('flow_packet_count', tags, packet_count),
('flow_byte_count', tags, byte_count))
class GaugePortStatePoller(GaugePoller):
"""Abstraction for port state poller."""
def send_req(self):
"""Send a stats request to a datapath."""
raise NotImplementedError # pragma: no cover
def no_response(self):
"""Called when a polling cycle passes without receiving a response."""
raise NotImplementedError # pragma: no cover
| apache-2.0 | 8,339,926,034,943,005,000 | 33.592334 | 110 | 0.605359 | false |
CopyChat/Plotting | Downscaling/climatechange.clt2.py | 1 | 24117 | #!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import math
import numpy as np
import pylab as pl
import Scientific.IO.NetCDF as IO
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.lines as lines
import datetime
from mpl_toolkits.basemap import Basemap , addcyclic
from matplotlib.colors import LinearSegmentedColormap
import textwrap
pl.close('all')
obsRef=0
########################## for CMIP5 charactors
VARIABLE='clt'
PRODUCT='Amon'
AbsTemp=273.15
RefTemp=5
MODISmean=52.721 #2001-2010
TargetModel=[\
#'CCSM4',\
#'CESM1-BGC',\
#'CESM1-CAM5',\
#'CESM1-FASTCHEM',\
#'CESM1-WACCM',\
#'CNRM-CM5',\
#'CSIRO-Mk3-6-0',\
#'CanESM2',\
#'EC-EARTH',\
#'GFDL-ESM2G',\
'GFDL-ESM2M',\
#'GISS-E2-H',\
#'GISS-E2-R-CC',\
#'HadGEM2-AO',\
#'HadGEM2-CC',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
#'IPSL-CM5A-MR',\
#'MIROC-ESM-CHEM',\
#'MIROC-ESM',\
#'MIROC5',\
#'MPI-ESM-LR',\
#'MPI-ESM-MR',\
#'MPI-ESM-P',\
#'MRI-CGCM3',\
#'NorESM1-ME',\
#'bcc-csm1-1-m',\
#'bcc-csm1-1',\
#'inmcm4',\
]
COLORtar=['red','darkmagenta','navy',\
'deeppink','orange','orangered','yellow','gold','brown','chocolate',\
'green','yellowgreen','aqua','olive','teal','blue',\
'purple','darkmagenta','fuchsia','indigo',\
'dimgray','black','navy']
COLORall=['darkred','darkblue','darkgreen','deeppink',\
'red','blue','green','pink','gold',\
'lime','lightcyan','orchid','yellow','lightsalmon',\
'brown','khaki','aquamarine','yellowgreen','blueviolet',\
'snow','skyblue','slateblue','orangered','dimgray',\
'chocolate','teal','mediumvioletred','gray','cadetblue',\
'mediumorchid','bisque','tomato','hotpink','firebrick',\
'Chartreuse','purple','goldenrod',\
'black','orangered','cyan','magenta']
linestyles=['_', '_', '_', '-', '-',\
'-', '--','--','--', '--',\
'_', '_','_','_',\
'_', '_','_','_',\
'_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':']
RCMsHist=[\
'clt_AFR-44_CCCma-CanESM2_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_CNRM-CERFACS-CNRM-CM5_historical_r1i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_CNRM-CERFACS-CNRM-CM5_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_CSIRO-QCCCE-CSIRO-Mk3-6-0_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_historical_r12i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_historical_r12i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_historical_r1i1p1_KNMI-RACMO22T_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_historical_r3i1p1_DMI-HIRHAM5_v2_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_IPSL-IPSL-CM5A-MR_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_MIROC-MIROC5_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_MOHC-HadGEM2-ES_historical_r1i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_MOHC-HadGEM2-ES_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_MPI-M-MPI-ESM-LR_historical_r1i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_MPI-M-MPI-ESM-LR_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_NCC-NorESM1-M_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'clt_AFR-44_NOAA-GFDL-GFDL-ESM2M_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
]
RCMsRCP85=[\
'clt_AFR-44_CCCma-CanESM2_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_CNRM-CERFACS-CNRM-CM5_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_CNRM-CERFACS-CNRM-CM5_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_CSIRO-QCCCE-CSIRO-Mk3-6-0_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_rcp85_r12i1p1_CLMcom-CCLM4-8-17_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_rcp85_r12i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_rcp85_r1i1p1_KNMI-RACMO22T_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_ICHEC-EC-EARTH_rcp85_r3i1p1_DMI-HIRHAM5_v2_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_IPSL-IPSL-CM5A-MR_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_MIROC-MIROC5_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_MOHC-HadGEM2-ES_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_200601-210012.ymean.fldmean.nc',\
#'clt_AFR-44_MOHC-HadGEM2-ES_rcp85_r1i1p1_KNMI-RACMO22T_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_MOHC-HadGEM2-ES_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_MPI-M-MPI-ESM-LR_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_MPI-M-MPI-ESM-LR_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_NCC-NorESM1-M_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
'clt_AFR-44_NOAA-GFDL-GFDL-ESM2M_rcp85_r1i1p1_SMHI-RCA4_v1_200601-210012.ymean.fldmean.nc',\
]
GCMsRCP85=[\
'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CNRM-CM5',\
'CanESM2',\
'GFDL-ESM2G',\
'GFDL-ESM2M',\
'GISS-E2-H',\
'GISS-E2-R-CC',\
'HadGEM2-AO',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'NorESM1-ME',\
'inmcm4',\
]
#================================================ CMIP5 models
# for historical
GCMsHist=[\
'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CESM1-FASTCHEM',\
'CESM1-WACCM',\
'CNRM-CM5',\
'CSIRO-Mk3-6-0',\
'CanESM2',\
'EC-EARTH',\
'GFDL-ESM2G',\
'GFDL-ESM2M',\
'GISS-E2-H',\
'GISS-E2-R-CC',\
'HadGEM2-AO',\
'HadGEM2-CC',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'MIROC-ESM-CHEM',\
'MIROC-ESM',\
'MIROC5',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'MPI-ESM-P',\
'MRI-CGCM3',\
'NorESM1-ME',\
'bcc-csm1-1-m',\
'bcc-csm1-1',\
'inmcm4',\
]
EnsembleHist=[\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r2i1p1',\
'r1i1p1',\
'r2i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r2i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
]
EnsembleRCP85=[\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
]
#=================================================== define the Plot:
fig1=plt.figure(figsize=(16,9))
ax = fig1.add_subplot(111)
plt.xlabel('Year',fontsize=16)
plt.ylabel('Cloud Cover Fraction Change (%)',fontsize=16)
plt.title("Cloud Cover Fraction Change (%) in AFRICA simulated by CMIP5 models",fontsize=18)
plt.ylim(-10,5)
plt.xlim(1960,2100)
plt.grid()
plt.xticks(np.arange(1960, 2100+10, 20))
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
# vertical at 2005
plt.axvline(x=2005.5,linewidth=2, color='gray')
plt.axhline(y=0,linewidth=2, color='gray')
#plt.plot(x,y,color="blue",linewidth=4)
########################## for hist:
########################## for hist:
#============================ for CORDEX
#============================ for CORDEX
EXPERIMENT='CORDEX'
DirCordexHist='/Users/tang/climate/CORDEX/hist/AFRICA/'
YEAR=range(1960,2006)
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for infile0 in RCMsHist:
infile1=DirCordexHist+infile0
K=K+1 # for average
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
#print TEMP
if obsRef==1:
RefTemp=MODISmean
else:
# reference temp: mean of 1996-2005
RefTemp=np.mean(TEMP[len(YEAR)-10+1:len(YEAR)])
if K==1:
ArrRefTemp=[RefTemp]
else:
ArrRefTemp=ArrRefTemp+[RefTemp]
print 'ArrRefTemp ========== ',ArrRefTemp
TEMP=[t-RefTemp for t in TEMP]
#print " temp ======================== relative to mean of 1986-2005"
#print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
#if Model in TargetModel:
#plt.plot(YEAR,TEMP,label=Model,\
##linestyles[TargetModel.index(Model)],\
#color=COLORtar[TargetModel.index(Model)],linewidth=2)
#print "color is",COLORtar[TargetModel.index(Model)]
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print GCMsHist
plt.plot(YEAR,AveTemp,label=" CORDEX mean", color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='blue',alpha=0.3)
# draw NO. of model used:
plt.text(1980,-6,'CORDEX model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
)
#=================================================== for CORDEX RCP85
#=================================================== for CORDEX RCP85
#=================================================== for CORDEX RCP85
#=================================================== for CORDEX RCP85
DirCordexRcp85='/Users/tang/climate/CORDEX/rcp85/AFRICA/'
YEAR=range(2006,2101)
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for infile0 in RCMsRCP85:
infile1=DirCordexRcp85+infile0
K=K+1 # for average
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
#print TEMP
if obsRef==1:
RefTemp=MODISmean
else:
# reference temp: mean of 1996-2005
# get the reftemp if the model has historical data here
print 'ArrRefTemp in HIST ensembles:',np.shape(ArrRefTemp)
print ArrRefTemp
print 'model index in HIST: ',RCMsRCP85.index(infile0)
RefTemp=ArrRefTemp[RCMsRCP85.index(infile0)]
print 'RefTemp from HIST: ',RefTemp
TEMP=[t-RefTemp for t in TEMP]
#print " temp ======================== relative to mean of 1986-2005"
#print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
#if Model in TargetModel:
#plt.plot(YEAR,TEMP,label=Model,\
##linestyles[TargetModel.index(Model)],\
#color=COLORtar[TargetModel.index(Model)],linewidth=2)
#print "color is",COLORtar[TargetModel.index(Model)]
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print GCMsHist
plt.plot(YEAR,AveTemp,label=" CORDEX mean", color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='blue',alpha=0.3)
# draw NO. of model used:
plt.text(1980,-6,'CORDEX model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
)
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
DirCMIP5Hist='/Users/tang/climate/CMIP5/hist/AFRICA'
TAILhist='_196001-200512.ymean.fldmean.AFR.nc'
EXPERIMENT='historical'
YEAR=range(1960,2006)
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for Model in GCMsHist:
K=K+1 # for average
infile1=DirCMIP5Hist+'/'\
+VARIABLE+'_'+PRODUCT+'_'+Model+'_'+EXPERIMENT+'_'+EnsembleHist[GCMsHist.index(Model)]+TAILhist
#clt_Amon_MPI-ESM-LR_historical_r1i1p1_196001-200512.fldmean.AFR.nc
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
#print TEMP
if obsRef==1:
RefTemp=MODISmean
else:
# reference temp: mean of 1996-2005
RefTemp=np.mean(TEMP[len(YEAR)-10+1:len(YEAR)])
if K==1:
ArrRefTemp=[RefTemp]
else:
ArrRefTemp=ArrRefTemp+[RefTemp]
print 'ArrRefTemp ========== ',ArrRefTemp
TEMP=[t-RefTemp for t in TEMP]
#print " temp ======================== relative to mean of 1986-2005"
#print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
if Model in TargetModel:
plt.plot(YEAR,TEMP,\
#linestyles[TargetModel.index(Model)],\
color=COLORtar[TargetModel.index(Model)],linewidth=2)
print "color is",COLORtar[TargetModel.index(Model)]
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print GCMsHist
plt.plot(YEAR,AveTemp,label=' CMIP5 mean',color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='black',alpha=0.3)
# draw NO. of model used:
plt.text(1980,-4,'CMIP5 model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
)
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
DirCMIP5RCP85='/Users/tang/climate/CMIP5/rcp85/AFRICA/'
EXPERIMENT='rcp85'
TailRcp85='_200601-210012.ymean.fldmean.AFR.nc'
YEAR=range(2006,2101)
Nmonth=1140
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for Model in GCMsRCP85:
K=K+1 # for average
infile1=DirCMIP5RCP85+'/'\
+VARIABLE+'_'+PRODUCT+'_'+Model+'_'+EXPERIMENT+'_'+EnsembleRCP85[GCMsRCP85.index(Model)]+TailRcp85
#clt_Amon_MPI-ESM-LR_historical_r1i1p1_196001-200512.fldmean.AFR.nc
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
#print TEMP
if obsRef==1:
RefTemp=MODISmean
else:
# reference temp: mean of 1996-2005
# get the reftemp if the model has historical data here
print 'ArrRefTemp in HIST ensembles:',np.shape(ArrRefTemp)
print ArrRefTemp
if Model in GCMsHist:
print 'model index in HIST: ',GCMsHist.index(Model)
print 'K=',K
RefTemp=ArrRefTemp[GCMsHist.index(Model)]
print 'RefTemp from HIST: ',RefTemp
else:
RefTemp=np.mean(TEMP[0:9])
print 'RefTemp from RCP8.5: ',RefTemp
TEMP=[t-RefTemp for t in TEMP]
#print " temp ======================== relative to mean of 1986-2005"
#print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
if Model in TargetModel:
plt.plot(YEAR,TEMP,label=Model,\
#linestyles[TargetModel.index(Model)],\
color=COLORtar[TargetModel.index(Model)],linewidth=2)
print "color is",COLORtar[TargetModel.index(Model)]
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print GCMsHist
plt.plot(YEAR,AveTemp,label=' CMIP5 RCP85 mean',color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='black',alpha=0.3)
# draw NO. of model used:
plt.text(2020,-4,'CMIP5 model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
bbox = dict(boxstyle="round",
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
))
plt.legend(loc=2)
plt.show()
quit()
| gpl-3.0 | 1,598,114,237,466,660,400 | 29.800766 | 114 | 0.550649 | false |
behzadnouri/numpy | numpy/ma/extras.py | 2 | 56816 | """
Masked arrays add-ons.
A collection of utilities for `numpy.ma`.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
from __future__ import division, absolute_import, print_function
__all__ = [
'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
'atleast_3d', 'average', 'clump_masked', 'clump_unmasked',
'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols',
'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot',
'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges',
'hsplit', 'hstack', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols',
'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_',
'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',
'setdiff1d', 'setxor1d', 'unique', 'union1d', 'vander', 'vstack',
]
import itertools
import warnings
from . import core as ma
from .core import (
MaskedArray, MAError, add, array, asarray, concatenate, filled, count,
getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
nomask, ones, sort, zeros, getdata, get_masked_subclass, dot,
mask_rowcols
)
import numpy as np
from numpy import ndarray, array as nxarray
import numpy.core.umath as umath
from numpy.core.multiarray import normalize_axis_index
from numpy.core.numeric import normalize_axis_tuple
from numpy.lib.function_base import _ureduce
from numpy.lib.index_tricks import AxisConcatenator
def issequence(seq):
"""
Is seq a sequence (ndarray, list or tuple)?
"""
return isinstance(seq, (ndarray, tuple, list))
def count_masked(arr, axis=None):
"""
Count the number of masked elements along the given axis.
Parameters
----------
arr : array_like
An array with (possibly) masked elements.
axis : int, optional
Axis along which to count. If None (default), a flattened
version of the array is used.
Returns
-------
count : int, ndarray
The total number of masked elements (axis=None) or the number
of masked elements along each slice of the given axis.
See Also
--------
MaskedArray.count : Count non-masked elements.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(9).reshape((3,3))
>>> a = ma.array(a)
>>> a[1, 0] = ma.masked
>>> a[1, 2] = ma.masked
>>> a[2, 1] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False False False]
[ True False True]
[False True False]],
fill_value=999999)
>>> ma.count_masked(a)
3
When the `axis` keyword is used an array is returned.
>>> ma.count_masked(a, axis=0)
array([1, 1, 1])
>>> ma.count_masked(a, axis=1)
array([0, 2, 1])
"""
m = getmaskarray(arr)
return m.sum(axis)
def masked_all(shape, dtype=float):
"""
Empty masked array with all elements masked.
Return an empty masked array of the given shape and dtype, where all the
data are masked.
Parameters
----------
shape : tuple
Shape of the required MaskedArray.
dtype : dtype, optional
Data type of the output.
Returns
-------
a : MaskedArray
A masked array with all data masked.
See Also
--------
masked_all_like : Empty masked array modelled on an existing array.
Examples
--------
>>> import numpy.ma as ma
>>> ma.masked_all((3, 3))
masked_array(data =
[[-- -- --]
[-- -- --]
[-- -- --]],
mask =
[[ True True True]
[ True True True]
[ True True True]],
fill_value=1e+20)
The `dtype` parameter defines the underlying data type.
>>> a = ma.masked_all((3, 3))
>>> a.dtype
dtype('float64')
>>> a = ma.masked_all((3, 3), dtype=np.int32)
>>> a.dtype
dtype('int32')
"""
a = masked_array(np.empty(shape, dtype),
mask=np.ones(shape, make_mask_descr(dtype)))
return a
def masked_all_like(arr):
"""
Empty masked array with the properties of an existing array.
Return an empty masked array of the same shape and dtype as
the array `arr`, where all the data are masked.
Parameters
----------
arr : ndarray
An array describing the shape and dtype of the required MaskedArray.
Returns
-------
a : MaskedArray
A masked array with all data masked.
Raises
------
AttributeError
If `arr` doesn't have a shape attribute (i.e. not an ndarray)
See Also
--------
masked_all : Empty masked array with all elements masked.
Examples
--------
>>> import numpy.ma as ma
>>> arr = np.zeros((2, 3), dtype=np.float32)
>>> arr
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> ma.masked_all_like(arr)
masked_array(data =
[[-- -- --]
[-- -- --]],
mask =
[[ True True True]
[ True True True]],
fill_value=1e+20)
The dtype of the masked array matches the dtype of `arr`.
>>> arr.dtype
dtype('float32')
>>> ma.masked_all_like(arr).dtype
dtype('float32')
"""
a = np.empty_like(arr).view(MaskedArray)
a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))
return a
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
class _fromnxfunction:
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
An instance of `_fromnxfunction` can be called with the same parameters
as the wrapped NumPy function. The docstring of `newfunc` is adapted from
the wrapped function as well, see `getdoc`.
This class should not be used directly. Instead, one of its extensions that
provides support for a specific type of input should be used.
Parameters
----------
funcname : str
The name of the function to be adapted. The function should be
in the NumPy namespace (i.e. ``np.funcname``).
"""
def __init__(self, funcname):
self.__name__ = funcname
self.__doc__ = self.getdoc()
def getdoc(self):
"""
Retrieve the docstring and signature from the function.
The ``__doc__`` attribute of the function is used as the docstring for
the new masked array version of the function. A note on application
of the function to the mask is appended.
.. warning::
If the function docstring already contained a Notes section, the
new docstring will have two Notes sections instead of appending a note
to the existing section.
Parameters
----------
None
"""
npfunc = getattr(np, self.__name__, None)
doc = getattr(npfunc, '__doc__', None)
if doc:
sig = self.__name__ + ma.get_object_signature(npfunc)
locdoc = "Notes\n-----\nThe function is applied to both the _data"\
" and the _mask, if any."
return '\n'.join((sig, doc, locdoc))
return
def __call__(self, *args, **params):
pass
class _fromnxfunction_single(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single array
argument followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
if isinstance(x, ndarray):
_d = func(x.__array__(), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
else:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_seq(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single sequence
of arrays followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
_d = func(tuple([np.asarray(a) for a in x]), *args, **params)
_m = func(tuple([getmaskarray(a) for a in x]), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_args(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. The first non-array-like input marks the beginning of the
arguments that are passed verbatim for both the data and mask calls.
Array arguments are processed independently and the results are
returned in a list. If only one array is found, the return value is
just the processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
arrays = []
args = list(args)
while len(args) > 0 and issequence(args[0]):
arrays.append(args.pop(0))
res = []
for x in arrays:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
res.append(masked_array(_d, mask=_m))
if len(arrays) == 1:
return res[0]
return res
class _fromnxfunction_allargs(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. Similar to `_fromnxfunction_args` except that all args
are converted to arrays even if they are not so already. This makes
it possible to process scalars as 1-D arrays. Only keyword arguments
are passed through verbatim for the data and mask calls. Arrays
arguments are processed independently and the results are returned
in a list. If only one arg is present, the return value is just the
processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
res = []
for x in args:
_d = func(np.asarray(x), **params)
_m = func(getmaskarray(x), **params)
res.append(masked_array(_d, mask=_m))
if len(args) == 1:
return res[0]
return res
atleast_1d = _fromnxfunction_allargs('atleast_1d')
atleast_2d = _fromnxfunction_allargs('atleast_2d')
atleast_3d = _fromnxfunction_allargs('atleast_3d')
vstack = row_stack = _fromnxfunction_seq('vstack')
hstack = _fromnxfunction_seq('hstack')
column_stack = _fromnxfunction_seq('column_stack')
dstack = _fromnxfunction_seq('dstack')
hsplit = _fromnxfunction_single('hsplit')
diagflat = _fromnxfunction_single('diagflat')
#####--------------------------------------------------------------------------
#----
#####--------------------------------------------------------------------------
def flatten_inplace(seq):
"""Flatten a sequence in place."""
k = 0
while (k != len(seq)):
while hasattr(seq[k], '__iter__'):
seq[k:(k + 1)] = seq[k]
k += 1
return seq
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
(This docstring should be overwritten)
"""
arr = array(arr, copy=False, subok=True)
nd = arr.ndim
axis = normalize_axis_index(axis, nd)
ind = [0] * (nd - 1)
i = np.zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = np.asarray(arr.shape).take(indlist)
i.put(indlist, ind)
j = i.copy()
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
asscalar = np.isscalar(res)
if not asscalar:
try:
len(res)
except TypeError:
asscalar = True
# Note: we shouldn't set the dtype of the output from the first result
# so we force the type to object, and build a list of dtypes. We'll
# just take the largest, to avoid some downcasting
dtypes = []
if asscalar:
dtypes.append(np.asarray(res).dtype)
outarr = zeros(outshape, object)
outarr[tuple(ind)] = res
Ntot = np.product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
dtypes.append(asarray(res).dtype)
k += 1
else:
res = array(res, copy=False, subok=True)
j = i.copy()
j[axis] = ([slice(None, None)] * res.ndim)
j.put(indlist, ind)
Ntot = np.product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = res.shape
dtypes.append(asarray(res).dtype)
outshape = flatten_inplace(outshape)
outarr = zeros(outshape, object)
outarr[tuple(flatten_inplace(j.tolist()))] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
j.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(flatten_inplace(j.tolist()))] = res
dtypes.append(asarray(res).dtype)
k += 1
max_dtypes = np.dtype(np.asarray(dtypes).max())
if not hasattr(arr, '_mask'):
result = np.asarray(outarr, dtype=max_dtypes)
else:
result = asarray(outarr, dtype=max_dtypes)
result.fill_value = ma.default_fill_value(result)
return result
apply_along_axis.__doc__ = np.apply_along_axis.__doc__
def apply_over_axes(func, a, axes):
"""
(This docstring will be overwritten)
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = ma.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
if apply_over_axes.__doc__ is not None:
apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
:np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
"""
Examples
--------
>>> a = ma.arange(24).reshape(2,3,4)
>>> a[:,0,1] = ma.masked
>>> a[:,1,:] = ma.masked
>>> print(a)
[[[0 -- 2 3]
[-- -- -- --]
[8 9 10 11]]
[[12 -- 14 15]
[-- -- -- --]
[20 21 22 23]]]
>>> print(ma.apply_over_axes(ma.sum, a, [0,2]))
[[[46]
[--]
[124]]]
Tuple axis arguments to ufuncs are equivalent:
>>> print(ma.sum(a, axis=(0,2)).reshape((1,-1,1)))
[[[46]
[--]
[124]]]
"""
def average(a, axis=None, weights=None, returned=False):
"""
Return the weighted average of array over the given axis.
Parameters
----------
a : array_like
Data to be averaged.
Masked entries are not taken into account in the computation.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
The importance that each element has in the computation of the average.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
weight equal to one. If `weights` is complex, the imaginary parts
are ignored.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
Default is False.
Returns
-------
average, [sum_of_weights] : (tuple of) scalar or MaskedArray
The average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `np.float64`
if `a` is of integer type and floats smaller than `float64`, or the
input data-type, otherwise. If returned, `sum_of_weights` is always
`float64`.
Examples
--------
>>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True])
>>> np.ma.average(a, weights=[3, 1, 0, 0])
1.25
>>> x = np.ma.arange(6.).reshape(3, 2)
>>> print(x)
[[ 0. 1.]
[ 2. 3.]
[ 4. 5.]]
>>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
... returned=True)
>>> print(avg)
[2.66666666667 3.66666666667]
"""
a = asarray(a)
m = getmask(a)
# inspired by 'average' in numpy/lib/function_base.py
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.count(axis))
else:
wgt = np.asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
if m is not nomask:
wgt = wgt*(~a.mask)
scl = wgt.sum(axis=axis, dtype=result_dtype)
avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
if returned:
if scl.shape != avg.shape:
scl = np.broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (None) is
to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True, and the input
is not already an `ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
.. versionadded:: 1.10.0
Returns
-------
median : ndarray
A new array holding the result is returned unless out is
specified, in which case a reference to out is returned.
Return data-type is `float64` for integers and floats smaller than
`float64`, or the input data-type, otherwise.
See Also
--------
mean
Notes
-----
Given a vector ``V`` with ``N`` non masked values, the median of ``V``
is the middle value of a sorted copy of ``V`` (``Vs``) - i.e.
``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2``
when ``N`` is even.
Examples
--------
>>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4)
>>> np.ma.median(x)
1.5
>>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
>>> np.ma.median(x)
2.5
>>> np.ma.median(x, axis=-1, overwrite_input=True)
masked_array(data = [ 2. 5.],
mask = False,
fill_value = 1e+20)
"""
if not hasattr(a, 'mask'):
m = np.median(getdata(a, subok=True), axis=axis,
out=out, overwrite_input=overwrite_input,
keepdims=keepdims)
if isinstance(m, np.ndarray) and 1 <= m.ndim:
return masked_array(m, copy=False)
else:
return m
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# when an unmasked NaN is present return it, so we need to sort the NaN
# values behind the mask
if np.issubdtype(a.dtype, np.inexact):
fill_value = np.inf
else:
fill_value = None
if overwrite_input:
if axis is None:
asorted = a.ravel()
asorted.sort(fill_value=fill_value)
else:
a.sort(axis=axis, fill_value=fill_value)
asorted = a
else:
asorted = sort(a, axis=axis, fill_value=fill_value)
if axis is None:
axis = 0
else:
axis = normalize_axis_index(axis, asorted.ndim)
if asorted.shape[axis] == 0:
# for empty axis integer indices fail so use slicing to get same result
# as median (which is mean of empty slice = nan)
indexer = [slice(None)] * asorted.ndim
indexer[axis] = slice(0, 0)
return np.ma.mean(asorted[indexer], axis=axis, out=out)
if asorted.ndim == 1:
counts = count(asorted)
idx, odd = divmod(count(asorted), 2)
mid = asorted[idx + odd - 1:idx + 1]
if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0:
# avoid inf / x = masked
s = mid.sum(out=out)
if not odd:
s = np.true_divide(s, 2., casting='safe', out=out)
s = np.lib.utils._median_nancheck(asorted, s, axis, out)
else:
s = mid.mean(out=out)
# if result is masked either the input contained enough
# minimum_fill_value so that it would be the median or all values
# masked
if np.ma.is_masked(s) and not np.all(asorted.mask):
return np.ma.minimum_fill_value(asorted)
return s
counts = count(asorted, axis=axis)
h = counts // 2
# create indexing mesh grid for all but reduced axis
axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape)
if i != axis]
ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij')
# insert indices of low and high median
ind.insert(axis, h - 1)
low = asorted[tuple(ind)]
ind[axis] = np.minimum(h, asorted.shape[axis] - 1)
high = asorted[tuple(ind)]
def replace_masked(s):
# Replace masked entries with minimum_full_value unless it all values
# are masked. This is required as the sort order of values equal or
# larger than the fill value is undefined and a valid value placed
# elsewhere, e.g. [4, --, inf].
if np.ma.is_masked(s):
rep = (~np.all(asorted.mask, axis=axis)) & s.mask
s.data[rep] = np.ma.minimum_fill_value(asorted)
s.mask[rep] = False
replace_masked(low)
replace_masked(high)
# duplicate high if odd number of elements so mean does nothing
odd = counts % 2 == 1
np.copyto(low, high, where=odd)
# not necessary for scalar True/False masks
try:
np.copyto(low.mask, high.mask, where=odd)
except:
pass
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
s = np.ma.sum([low, high], axis=0, out=out)
np.true_divide(s.data, 2., casting='unsafe', out=s.data)
s = np.lib.utils._median_nancheck(asorted, s, axis, out)
else:
s = np.ma.mean([low, high], axis=0, out=out)
return s
def compress_nd(x, axis=None):
"""Supress slices from multiple dimensions which contain masked values.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked, `x` is interpreted as a MaskedArray with `mask`
set to `nomask`.
axis : tuple of ints or int, optional
Which dimensions to supress slices from can be configured with this
parameter.
- If axis is a tuple of ints, those are the axes to supress slices from.
- If axis is an int, then that is the only axis to supress slices from.
- If axis is None, all axis are selected.
Returns
-------
compress_array : ndarray
The compressed array.
"""
x = asarray(x)
m = getmask(x)
# Set axis to tuple of ints
if axis is None:
axis = tuple(range(x.ndim))
else:
axis = normalize_axis_tuple(axis, x.ndim)
# Nothing is masked: return x
if m is nomask or not m.any():
return x._data
# All is masked: return empty
if m.all():
return nxarray([])
# Filter elements through boolean indexing
data = x._data
for ax in axis:
axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))
data = data[(slice(None),)*ax + (~m.any(axis=axes),)]
return data
def compress_rowcols(x, axis=None):
"""
Suppress the rows and/or columns of a 2-D array that contain
masked values.
The suppression behavior is selected with the `axis` parameter.
- If axis is None, both rows and columns are suppressed.
- If axis is 0, only rows are suppressed.
- If axis is 1 or -1, only columns are suppressed.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with
`mask` set to `nomask`. Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. Default is None.
Returns
-------
compressed_array : ndarray
The compressed array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x
masked_array(data =
[[-- 1 2]
[-- 4 5]
[6 7 8]],
mask =
[[ True False False]
[ True False False]
[False False False]],
fill_value = 999999)
>>> np.ma.compress_rowcols(x)
array([[7, 8]])
>>> np.ma.compress_rowcols(x, 0)
array([[6, 7, 8]])
>>> np.ma.compress_rowcols(x, 1)
array([[1, 2],
[4, 5],
[7, 8]])
"""
if asarray(x).ndim != 2:
raise NotImplementedError("compress_rowcols works for 2D arrays only.")
return compress_nd(x, axis=axis)
def compress_rows(a):
"""
Suppress whole rows of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
`extras.compress_rowcols` for details.
See Also
--------
extras.compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_rows works for 2D arrays only.")
return compress_rowcols(a, 0)
def compress_cols(a):
"""
Suppress whole columns of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see
`extras.compress_rowcols` for details.
See Also
--------
extras.compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
def mask_rows(a, axis=None):
"""
Mask rows of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_rows(a)
masked_array(data =
[[0 0 0]
[-- -- --]
[0 0 0]],
mask =
[[False False False]
[ True True True]
[False False False]],
fill_value=999999)
"""
return mask_rowcols(a, 0)
def mask_cols(a, axis=None):
"""
Mask columns of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_cols(a)
masked_array(data =
[[0 -- 0]
[0 -- 0]
[0 -- 0]],
mask =
[[False True False]
[False True False]
[False True False]],
fill_value=999999)
"""
return mask_rowcols(a, 1)
#####--------------------------------------------------------------------------
#---- --- arraysetops ---
#####--------------------------------------------------------------------------
def ediff1d(arr, to_end=None, to_begin=None):
"""
Compute the differences between consecutive elements of an array.
This function is the equivalent of `numpy.ediff1d` that takes masked
values into account, see `numpy.ediff1d` for details.
See Also
--------
numpy.ediff1d : Equivalent function for ndarrays.
"""
arr = ma.asanyarray(arr).flat
ed = arr[1:] - arr[:-1]
arrays = [ed]
#
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
#
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in the common
# case where neither to_begin or to_end was given.
ed = hstack(arrays)
#
return ed
def unique(ar1, return_index=False, return_inverse=False):
"""
Finds the unique elements of an array.
Masked values are considered the same element (masked). The output array
is always a masked array. See `numpy.unique` for more details.
See Also
--------
numpy.unique : Equivalent function for ndarrays.
"""
output = np.unique(ar1,
return_index=return_index,
return_inverse=return_inverse)
if isinstance(output, tuple):
output = list(output)
output[0] = output[0].view(MaskedArray)
output = tuple(output)
else:
output = output.view(MaskedArray)
return output
def intersect1d(ar1, ar2, assume_unique=False):
"""
Returns the unique elements common to both arrays.
Masked values are considered equal one to the other.
The output is always a masked array.
See `numpy.intersect1d` for more details.
See Also
--------
numpy.intersect1d : Equivalent function for ndarrays.
Examples
--------
>>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
>>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
>>> intersect1d(x, y)
masked_array(data = [1 3 --],
mask = [False False True],
fill_value = 999999)
"""
if assume_unique:
aux = ma.concatenate((ar1, ar2))
else:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
aux = ma.concatenate((unique(ar1), unique(ar2)))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Set exclusive-or of 1-D arrays with unique elements.
The output is always a masked array. See `numpy.setxor1d` for more details.
See Also
--------
numpy.setxor1d : Equivalent function for ndarrays.
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = ma.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
auxf = aux.filled()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = (flag[1:] == flag[:-1])
return aux[flag2]
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of an array is also present in a second
array.
The output is always a masked array. See `numpy.in1d` for more details.
See Also
--------
numpy.in1d : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.4.0
"""
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = unique(ar2)
ar = ma.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = ma.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
def union1d(ar1, ar2):
"""
Union of two arrays.
The output is always a masked array. See `numpy.union1d` for more details.
See also
--------
numpy.union1d : Equivalent function for ndarrays.
"""
return unique(ma.concatenate((ar1, ar2)))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Set difference of 1D arrays with unique elements.
The output is always a masked array. See `numpy.setdiff1d` for more
details.
See Also
--------
numpy.setdiff1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
>>> np.ma.setdiff1d(x, [1, 2])
masked_array(data = [3 --],
mask = [False True],
fill_value = 999999)
"""
if assume_unique:
ar1 = ma.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
###############################################################################
# Covariance #
###############################################################################
def _covhelper(x, y=None, rowvar=True, allow_masked=True):
"""
Private function for the computation of covariance and correlation
coefficients.
"""
x = ma.array(x, ndmin=2, copy=True, dtype=float)
xmask = ma.getmaskarray(x)
# Quick exit if we can't process masked data
if not allow_masked and xmask.any():
raise ValueError("Cannot process masked data.")
#
if x.shape[0] == 1:
rowvar = True
# Make sure that rowvar is either 0 or 1
rowvar = int(bool(rowvar))
axis = 1 - rowvar
if rowvar:
tup = (slice(None), None)
else:
tup = (None, slice(None))
#
if y is None:
xnotmask = np.logical_not(xmask).astype(int)
else:
y = array(y, copy=False, ndmin=2, dtype=float)
ymask = ma.getmaskarray(y)
if not allow_masked and ymask.any():
raise ValueError("Cannot process masked data.")
if xmask.any() or ymask.any():
if y.shape == x.shape:
# Define some common mask
common_mask = np.logical_or(xmask, ymask)
if common_mask is not nomask:
xmask = x._mask = y._mask = ymask = common_mask
x._sharedmask = False
y._sharedmask = False
x = ma.concatenate((x, y), axis)
xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)
x -= x.mean(axis=rowvar)[tup]
return (x, xnotmask, rowvar)
def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
"""
Estimate the covariance matrix.
Except for the handling of missing data this function does the same as
`numpy.cov`. For more details and examples, see `numpy.cov`.
By default, masked values are recognized as such. If `x` and `y` have the
same shape, a common mask is allocated: if ``x[i,j]`` is masked, then
``y[i,j]`` will also be masked.
Setting `allow_masked` to False will raise an exception if values are
missing in either of the input arrays.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N-1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. This keyword can be overridden by
the keyword ``ddof`` in numpy versions >= 1.5.
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises a `ValueError` exception when some values are missing.
ddof : {None, int}, optional
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
.. versionadded:: 1.5
Raises
------
ValueError
Raised if some values are missing and `allow_masked` is False.
See Also
--------
numpy.cov
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be an integer")
# Set up ddof
if ddof is None:
if bias:
ddof = 0
else:
ddof = 1
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
return result
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,
ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Except for the handling of missing data this function does the same as
`numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises an exception. Because `bias` is deprecated, this
argument needs to be treated as keyword only to avoid a warning.
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
See Also
--------
numpy.corrcoef : Equivalent function in top-level NumPy module.
cov : Estimate the covariance matrix.
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
msg = 'bias and ddof have no effect and are deprecated'
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn(msg, DeprecationWarning, stacklevel=2)
# Get the data
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
# Compute the covariance matrix
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1.
c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1.
c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
# Check whether we have a scalar
try:
diag = ma.diagonal(c)
except ValueError:
return 1
#
if xnotmask.all():
_denom = ma.sqrt(ma.multiply.outer(diag, diag))
else:
_denom = diagflat(diag)
_denom._sharedmask = False # We know return is always a copy
n = x.shape[1 - rowvar]
if rowvar:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(vstack((x[i], x[j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
else:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(
vstack((x[:, i], x[:, j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
return c / _denom
#####--------------------------------------------------------------------------
#---- --- Concatenation helpers ---
#####--------------------------------------------------------------------------
class MAxisConcatenator(AxisConcatenator):
"""
Translate slice objects to concatenation along an axis.
For documentation on usage, see `mr_class`.
See Also
--------
mr_class
"""
def __init__(self, axis=0):
AxisConcatenator.__init__(self, axis, matrix=False)
def __getitem__(self, key):
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
final_dtypedescr = None
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = np.linspace(start, stop, num=size)
else:
newobj = np.arange(start, stop, step)
elif isinstance(key[k], str):
if (key[k] in 'rc'):
self.matrix = True
self.col = (key[k] == 'c')
continue
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("Unknown special directive")
elif type(key[k]) in np.ScalarType:
newobj = asarray([key[k]])
scalars.append(k)
scalar = True
else:
newobj = key[k]
objs.append(newobj)
if isinstance(newobj, ndarray) and not scalar:
if final_dtypedescr is None:
final_dtypedescr = newobj.dtype
elif newobj.dtype > final_dtypedescr:
final_dtypedescr = newobj.dtype
if final_dtypedescr is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtypedescr)
res = concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
class mr_class(MAxisConcatenator):
"""
Translate slice objects to concatenation along the first axis.
This is the masked array version of `lib.index_tricks.RClass`.
See Also
--------
lib.index_tricks.RClass
Examples
--------
>>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
"""
def __init__(self):
MAxisConcatenator.__init__(self, 0)
mr_ = mr_class()
#####--------------------------------------------------------------------------
#---- Find unmasked data ---
#####--------------------------------------------------------------------------
def flatnotmasked_edges(a):
"""
Find the indices of the first and last unmasked values.
Expects a 1-D `MaskedArray`, returns None if all values are masked.
Parameters
----------
a : array_like
Input 1-D `MaskedArray`
Returns
-------
edges : ndarray or None
The indices of first and last non-masked value in the array.
Returns None if all values are masked.
See Also
--------
flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 1-D arrays.
Examples
--------
>>> a = np.ma.arange(10)
>>> flatnotmasked_edges(a)
[0,-1]
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> flatnotmasked_edges(a)
array([3, 8])
>>> a[:] = np.ma.masked
>>> print(flatnotmasked_edges(ma))
None
"""
m = getmask(a)
if m is nomask or not np.any(m):
return np.array([0, a.size - 1])
unmasked = np.flatnonzero(~m)
if len(unmasked) > 0:
return unmasked[[0, -1]]
else:
return None
def notmasked_edges(a, axis=None):
"""
Find the indices of the first and last unmasked values along an axis.
If all values are masked, return None. Otherwise, return a list
of two tuples, corresponding to the indices of the first and last
unmasked values respectively.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
edges : ndarray or list
An array of start and end indexes if there are any masked data in
the array. If there are no masked data in the array, `edges` is a
list of the first and last index.
See Also
--------
flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous,
clump_masked, clump_unmasked
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> m = np.zeros_like(a)
>>> m[1:, 1:] = 1
>>> am = np.ma.array(a, mask=m)
>>> np.array(am[~am.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.notmasked_edges(ma)
array([0, 6])
"""
a = asarray(a)
if axis is None or a.ndim == 1:
return flatnotmasked_edges(a)
m = getmaskarray(a)
idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))
return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
def flatnotmasked_contiguous(a):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : narray
The input array.
Returns
-------
slice_list : list
A sorted sequence of slices (start index, end index).
See Also
--------
flatnotmasked_edges, notmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.flatnotmasked_contiguous(a)
slice(0, 10, None)
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.flatnotmasked_contiguous(a)
[slice(3, 5, None), slice(6, 9, None)]
>>> a[:] = np.ma.masked
>>> print(np.ma.flatnotmasked_edges(a))
None
"""
m = getmask(a)
if m is nomask:
return slice(0, a.size, None)
i = 0
result = []
for (k, g) in itertools.groupby(m.ravel()):
n = len(list(g))
if not k:
result.append(slice(i, i + n))
i += n
return result or None
def notmasked_contiguous(a, axis=None):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
endpoints : list
A list of slices (start and end indexes) of unmasked indexes
in the array.
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> mask = np.zeros_like(a)
>>> mask[1:, 1:] = 1
>>> ma = np.ma.array(a, mask=mask)
>>> np.array(ma[~ma.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.notmasked_contiguous(ma)
[slice(0, 4, None), slice(6, 7, None)]
"""
a = asarray(a)
nd = a.ndim
if nd > 2:
raise NotImplementedError("Currently limited to atmost 2D array.")
if axis is None or nd == 1:
return flatnotmasked_contiguous(a)
#
result = []
#
other = (axis + 1) % 2
idx = [0, 0]
idx[axis] = slice(None, None)
#
for i in range(a.shape[other]):
idx[other] = i
result.append(flatnotmasked_contiguous(a[idx]) or None)
return result
def _ezclump(mask):
"""
Finds the clumps (groups of data with the same values) for a 1D bool array.
Returns a series of slices.
"""
if mask.ndim > 1:
mask = mask.ravel()
idx = (mask[1:] ^ mask[:-1]).nonzero()
idx = idx[0] + 1
if mask[0]:
if len(idx) == 0:
return [slice(0, mask.size)]
r = [slice(0, idx[0])]
r.extend((slice(left, right)
for left, right in zip(idx[1:-1:2], idx[2::2])))
else:
if len(idx) == 0:
return []
r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]
if mask[-1]:
r.append(slice(idx[-1], mask.size))
return r
def clump_unmasked(a):
"""
Return list of slices corresponding to the unmasked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of unmasked
elements in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
notmasked_contiguous, clump_masked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_unmasked(a)
[slice(3, 6, None), slice(7, 8, None)]
"""
mask = getattr(a, '_mask', nomask)
if mask is nomask:
return [slice(0, a.size)]
return _ezclump(~mask)
def clump_masked(a):
"""
Returns a list of slices corresponding to the masked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of masked elements
in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
notmasked_contiguous, clump_unmasked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_masked(a)
[slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]
"""
mask = ma.getmask(a)
if mask is nomask:
return []
return _ezclump(mask)
###############################################################################
# Polynomial fit #
###############################################################################
def vander(x, n=None):
"""
Masked values in the input array result in rows of zeros.
"""
_vander = np.vander(x, n)
m = getmask(x)
if m is not nomask:
_vander[m] = 0
return _vander
vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Any masked values in x is propagated in y, and vice-versa.
"""
x = asarray(x)
y = asarray(y)
m = getmask(x)
if y.ndim == 1:
m = mask_or(m, getmask(y))
elif y.ndim == 2:
my = getmask(mask_rows(y))
if my is not nomask:
m = mask_or(m, my[:, 0])
else:
raise TypeError("Expected a 1D or 2D array for y!")
if w is not None:
w = asarray(w)
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
m = mask_or(m, getmask(w))
if m is not nomask:
not_m = ~m
if w is not None:
w = w[not_m]
return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)
else:
return np.polyfit(x, y, deg, rcond, full, w, cov)
polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
| bsd-3-clause | -3,088,952,023,299,618,000 | 28.966245 | 83 | 0.554949 | false |
slank/ansible | hacking/dump_playbook_attributes.py | 14 | 1675 | #!/usr/bin/env python
import optparse
from jinja2 import Environment, FileSystemLoader
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
template_file = 'playbooks_directives.rst.j2'
oblist = {}
clist = []
class_list = [ Play, Role, Block, Task ]
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options]',
description='Generate module documentation from metadata',
)
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/', help="Output directory for rst files")
(options, args) = p.parse_args()
for aclass in class_list:
aobj = aclass()
name = type(aobj).__name__
# build ordered list to loop over and dict with attributes
clist.append(name)
oblist[name] = dict((x, aobj.__dict__['_attributes'][x]) for x in aobj.__dict__['_attributes'] if 'private' not in x or not x.private)
# loop is really with_ for users
if name == 'Task':
oblist[name]['with_<lookup_plugin>'] = True
# local_action is implicit with action
if 'action' in oblist[name]:
oblist[name]['local_action'] = True
env = Environment(loader=FileSystemLoader(options.template_dir), trim_blocks=True,)
template = env.get_template(template_file)
outputname = options.output_dir + template_file.replace('.j2','')
tempvars = { 'oblist': oblist, 'clist': clist }
with open( outputname, 'w') as f:
f.write(template.render(tempvars))
| gpl-3.0 | 5,719,085,790,880,470,000 | 33.895833 | 148 | 0.690746 | false |
vdenPython/python_training | fixture/contact.py | 1 | 6515 | # -*- coding: utf-8 -*-
__author__ = 'vden'
from model.contact import Contact
from model.group import Group
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def create(self, contact):
wd = self.app.wd
self.app.navigation.open_add_new_page()
self.fill_contact_form(contact)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.app.navigation.open_add_new_page()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.app.common.change_field_value(field_name="firstname", text=contact.firstname)
self.app.common.change_field_value(field_name="middlename", text=contact.middlename)
self.app.common.change_field_value(field_name="lastname", text=contact.lastname)
self.app.common.change_field_value(field_name="nickname", text=contact.nickname)
self.app.common.change_field_value(field_name="title", text=contact.title)
self.app.common.change_field_value(field_name="company", text=contact.company)
self.app.common.change_field_value(field_name="address", text=contact.address)
self.app.common.change_field_value(field_name="home", text=contact.homephone)
self.app.common.change_field_value(field_name="mobile", text=contact.mobilephone)
self.app.common.change_field_value(field_name="work", text=contact.workphone)
self.app.common.change_field_value(field_name="fax", text=contact.fax)
self.app.common.change_field_value(field_name="address2", text=contact.address2)
self.app.common.change_field_value(field_name="phone2", text=contact.seconderyphone)
self.app.common.change_field_value(field_name="notes", text=contact.notes)
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.app.navigation.open_home_page()
self.select_contact_by_index(index)
wd.find_element_by_xpath("//div[@id='content']/form[2]/input[2]").click()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.app.navigation.open_home_page()
self.select_contact_by_id(id)
wd.find_element_by_css_selector(".left>input[value='Delete").click()
wd.switch_to_alert().accept()
self.app.navigation.open_home_page()
self.contact_cache = None
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_css_selector('img[alt="Edit"]')[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s'" % id).click()
def modify_first_contact(self, new_contact_date):
self.modify_contact_by_index(0)
def modify_contact_by_index(self, index, new_contact_date):
wd = self.app.wd
self.app.navigation.open_home_page()
self.select_contact_by_index(index)
self.fill_contact_form(new_contact_date)
wd.find_element_by_name("update").click()
self.contact_cache = None
def count(self):
wd = self.app.wd
self.app.navigation.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
wd = self.app.wd
if self.contact_cache is None:
self.app.navigation.open_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
firstname = cells[2].text
lastname = cells[1].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
address = cells[3].text
all_e_mails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(Contact(id=id, lastname=lastname, firstname=firstname,
address=address, all_e_mails=all_e_mails,
all_phones_from_home_page = all_phones))
return list(self.contact_cache)
def get_contact_info_edit_page(self, index):
wd = self.app.wd
self.app.navigation.open_home_page()
self.select_contact_by_index(index)
firstname = wd.find_element_by_name("lastname").get_attribute("value")
lastname = wd.find_element_by_name("firstname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
seconderyphone = wd.find_element_by_name("phone2").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, id=id, address=address,
homephone=homephone, workphone=workphone,
email=email, email2=email2, email3=email3,
mobilephone=mobilephone, seconderyphone=seconderyphone )
def get_contact_view_page(self, index):
wd = self.app.wd
self.app.navigation.open_home_page()
wd.find_elements_by_css_selector('img[alt="Details"]')[index].click()
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
seconderyphone = re.search("P: (.*)", text).group(1)
return Contact(homephone=homephone, workphone=workphone,
mobilephone=mobilephone, seconderyphone=seconderyphone )
def add_contact_in_group(self, index, contact):
wd = self.app.wd
self.select_contact_by_id(contact.id)
wd.find_element_by_css_selector(".right>select").click()
wd.find_element_by_xpath("//div[@class='right']//option[%s]" %index).click()
wd.find_element_by_name("add").click()
| apache-2.0 | 7,957,171,386,964,191,000 | 45.870504 | 96 | 0.624405 | false |
rendermotion/RMPY | rig/biped/rig_parts/hand.py | 1 | 1972 | from RMPY.rig import rigBase
from RMPY.rig.biped.rig_parts import finger
from RMPY.rig import rigSingleJoint
import pymel.core as pm
class HandModel(rigBase.BaseModel):
def __init__(self):
super(HandModel, self).__init__()
self.fingers = []
self.palm = rigSingleJoint.RigSingleJoint()
class Hand(rigBase.RigBase):
def __init__(self, *args, **kwargs):
super(Hand, self).__init__(*args, **kwargs)
self._model = HandModel()
self.fingers = []
self.joints = []
self.reset_joints = []
self.reset_controls = []
self.controls = []
@property
def fingers(self):
return self._model.fingers
@fingers.setter
def fingers(self, value):
self._model.fingers = value
@property
def palm(self):
return self._model.palm
def create_point_base(self, *args, **kwargs):
args = self.rm.dataValidators.as_pymel_nodes(args)
self.palm.create_point_base(args[0])
for each in args[0].getChildren(type='transform'):
new_finger = finger.Finger()
print self.rm.descendents_list(each)
new_finger.create_point_base(*self.rm.descendents_list(each))
self.create.constraint.node_base(self.palm.joints[-1], new_finger.reset_controls[0], mo=True)
self.fingers.append(new_finger)
self.joints = self.palm.joints
self.reset_joints = self.palm.reset_joints
self.reset_controls = self.palm.reset_controls
self.controls = self.palm.controls
def rename_as_skinned_joints(self, nub=True):
super(Hand, self).rename_as_skinned_joints(nub=nub)
for each_rig in self.fingers:
each_rig.rename_as_skinned_joints(nub=nub)
self.palm.rename_as_skinned_joints(nub=False)
if __name__ == '__main__':
palm_root = pm.ls('L_palm01_reference_pnt')[0]
print palm_root
hand = Hand()
hand.create_point_base(palm_root) | lgpl-3.0 | -8,619,685,792,531,022,000 | 30.822581 | 105 | 0.622718 | false |
scottmcmaster/catapult | tracing/third_party/tvcm/third_party/rcssmin/_setup/py2/ext.py | 43 | 7489 | # -*- coding: ascii -*-
#
# Copyright 2007, 2008, 2009, 2010, 2011
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
===================
C extension tools
===================
C extension tools.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
__test__ = False
from distutils import core as _core
from distutils import errors as _distutils_errors
import os as _os
import posixpath as _posixpath
import shutil as _shutil
import tempfile as _tempfile
from _setup import commands as _commands
from _setup.util import log
def _install_finalizer(installer):
if installer.without_c_extensions:
installer.distribution.ext_modules = []
def _build_finalizer(builder):
if builder.without_c_extensions:
builder.extensions = []
class Extension(_core.Extension):
"""
Extension with prerequisite check interface
If your check is cacheable (during the setup run), override
`cached_check_prerequisites`, `check_prerequisites` otherwise.
:IVariables:
`cached_check` : ``bool``
The cached check result
"""
cached_check = None
def __init__(self, *args, **kwargs):
""" Initialization """
if kwargs.has_key('depends'):
self.depends = kwargs['depends'] or []
else:
self.depends = []
_core.Extension.__init__(self, *args, **kwargs)
# add include path
included = _posixpath.join('_setup', 'include')
if included not in self.include_dirs:
self.include_dirs.append(included)
# add cext.h to the dependencies
cext_h = _posixpath.join(included, 'cext.h')
if cext_h not in self.depends:
self.depends.append(cext_h)
_commands.add_option('install_lib', 'without-c-extensions',
help_text='Don\'t install C extensions',
inherit='install',
)
_commands.add_finalizer('install_lib', 'c-extensions',
_install_finalizer
)
_commands.add_option('build_ext', 'without-c-extensions',
help_text='Don\'t build C extensions',
inherit=('build', 'install_lib'),
)
_commands.add_finalizer('build_ext', 'c-extensions', _build_finalizer)
def check_prerequisites(self, build):
"""
Check prerequisites
The check should cover all dependencies needed for the extension to
be built and run. The method can do the following:
- return a false value: the extension will be built
- return a true value: the extension will be skipped. This is useful
for optional extensions
- raise an exception. This is useful for mandatory extensions
If the check result is cacheable (during the setup run), override
`cached_check_prerequisites` instead.
:Parameters:
`build` : `BuildExt`
The extension builder
:Return: Skip the extension?
:Rtype: ``bool``
"""
if self.cached_check is None:
log.debug("PREREQ check for %s" % self.name)
self.cached_check = self.cached_check_prerequisites(build)
else:
log.debug("PREREQ check for %s (cached)" % self.name)
return self.cached_check
def cached_check_prerequisites(self, build):
"""
Check prerequisites
The check should cover all dependencies needed for the extension to
be built and run. The method can do the following:
- return a false value: the extension will be built
- return a true value: the extension will be skipped. This is useful
for optional extensions
- raise an exception. This is useful for mandatory extensions
If the check result is *not* cacheable (during the setup run),
override `check_prerequisites` instead.
:Parameters:
`build` : `BuildExt`
The extension builder
:Return: Skip the extension?
:Rtype: ``bool``
"""
# pylint: disable = W0613
log.debug("Nothing to check for %s!" % self.name)
return False
class ConfTest(object):
"""
Single conftest abstraction
:IVariables:
`_tempdir` : ``str``
The tempdir created for this test
`src` : ``str``
Name of the source file
`target` : ``str``
Target filename
`compiler` : ``CCompiler``
compiler instance
`obj` : ``list``
List of object filenames (``[str, ...]``)
"""
_tempdir = None
def __init__(self, build, source):
"""
Initialization
:Parameters:
`build` : ``distuils.command.build_ext.build_ext``
builder instance
`source` : ``str``
Source of the file to compile
"""
self._tempdir = tempdir = _tempfile.mkdtemp()
src = _os.path.join(tempdir, 'conftest.c')
fp = open(src, 'w')
try:
fp.write(source)
finally:
fp.close()
self.src = src
self.compiler = compiler = build.compiler
self.target = _os.path.join(tempdir, 'conftest')
self.obj = compiler.object_filenames([src], output_dir=tempdir)
def __del__(self):
""" Destruction """
self.destroy()
def destroy(self):
""" Destroy the conftest leftovers on disk """
tempdir, self._tempdir = self._tempdir, None
if tempdir is not None:
_shutil.rmtree(tempdir)
def compile(self, **kwargs):
"""
Compile the conftest
:Parameters:
`kwargs` : ``dict``
Optional keyword parameters for the compiler call
:Return: Was the compilation successful?
:Rtype: ``bool``
"""
kwargs['output_dir'] = self._tempdir
try:
self.compiler.compile([self.src], **kwargs)
except _distutils_errors.CompileError:
return False
return True
def link(self, **kwargs):
r"""
Link the conftest
Before you can link the conftest objects they need to be `compile`\d.
:Parameters:
`kwargs` : ``dict``
Optional keyword parameters for the linker call
:Return: Was the linking successful?
:Rtype: ``bool``
"""
try:
self.compiler.link_executable(self.obj, self.target, **kwargs)
except _distutils_errors.LinkError:
return False
return True
def pipe(self, mode="r"):
r"""
Execute the conftest binary and connect to it using a pipe
Before you can pipe to or from the conftest binary it needs to
be `link`\ed.
:Parameters:
`mode` : ``str``
Pipe mode - r/w
:Return: The open pipe
:Rtype: ``file``
"""
return _os.popen(self.compiler.executable_filename(self.target), mode)
| bsd-3-clause | -4,656,518,016,770,663,000 | 28.484252 | 78 | 0.594605 | false |
escamilla/MultipartPostHandler | MultipartPostHandler.py | 1 | 3692 | #!/usr/bin/python
# Copyright 2013 Joshua Escamilla <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The latest version of this program can always be found at:
# https://github.com/escamilla/MultipartPostHandler
#
# This program is a modified version of the MultipartPostHandler module by
# Will Holcomb <[email protected]>, which is available at:
# https://pypi.python.org/pypi/MultipartPostHandler/
__all__ = ["MultipartPostHandler"]
from email.generator import _make_boundary
from mimetypes import guess_type
from os.path import basename
import sys
PY3K = sys.version > "3"
if PY3K:
from io import IOBase as FILE_TYPE
from urllib.parse import urlencode
from urllib.request import BaseHandler
else:
FILE_TYPE = file
from urllib import urlencode
from urllib2 import BaseHandler
try:
bytes
except NameError:
bytes = str
def b(str_or_bytes):
if not isinstance(str_or_bytes, bytes):
return str_or_bytes.encode("ascii")
else:
return str_or_bytes
NEWLINE = "\r\n"
def _get_content_type(filename):
return guess_type(filename)[0] or "application/octet-stream"
class MultipartPostHandler(BaseHandler):
handler_order = BaseHandler.handler_order - 10
def _encode_form_data(self, fields, files):
boundary = _make_boundary()
parts = []
for name, value in fields:
parts.append(b("--%s" % boundary))
parts.append(b("Content-Disposition: form-data; name=\"%s\""
% name))
parts.append(b("Content-Type: text/plain"))
parts.append(b(""))
parts.append(b(value))
for name, fp in files:
filename = basename(fp.name)
mimetype = _get_content_type(filename)
fp.seek(0)
parts.append(b("--%s" % boundary))
parts.append(b("Content-Disposition: file; name=\"%s\"; " \
"filename=\"%s\"" % (name, filename)))
parts.append(b("Content-Type: %s" % mimetype))
parts.append(b(""))
parts.append(fp.read())
parts.append(b("--%s--" % boundary))
data = b(NEWLINE).join(parts)
return boundary, data
def http_request(self, req):
data = req.data
if data and isinstance(data, dict):
fields = []
files = []
for key, value in data.items():
if isinstance(value, FILE_TYPE):
files.append((key, value))
else:
fields.append((key, value))
if files:
boundary, data = self._encode_form_data(fields, files)
req.add_header("Content-Type", "multipart/form-data; " \
"boundary=\"%s\"" % boundary)
req.add_header("Content-Length", len(data))
else:
data = urlencode(fields, doseq=True)
req.data = data
return req
https_request = http_request
| lgpl-3.0 | 2,126,666,777,738,652,000 | 30.555556 | 77 | 0.606176 | false |
frc1418/2015-vision | scratchpad/rio-mjpg/client.py | 2 | 3772 | #!/usr/bin/env python3
import cv2
import numpy as np
from optparse import OptionParser
import socket
import struct
import threading
import time
import logging
logger = logging.getLogger('cvclient')
class CaptureClient:
'''
This probably isn't terribly efficient..
'''
kPort = 1180
kMagicNumber = bytes([0x01, 0x00, 0x00, 0x00])
kSize640x480 = 0
kSize320x240 = 1
kSize160x120 = 2
intStruct = struct.Struct("!i")
def __init__(self, options):
self.host = options.host
self.port = options.port
self.running = True
self.sock = None
self.on_img = None
self.fps = 10
self.compression = 30
self.size = self.kSize160x120
self.thread = threading.Thread(target=self._capture_thread)
def start(self):
if self.on_img is None:
raise ValueError("No capture function set")
self.thread.start()
def stop(self):
self.running = False
if self.sock is not None:
self.sock.close()
self.thread.join()
def set_on_img(self, fn):
self.on_img = fn
def _capture_thread(self):
address = (self.host, self.port)
while self.running:
self.sock = None
try:
self.sock = socket.create_connection(address, timeout=1)
self.sock.settimeout(5)
s = self.sock.makefile('rwb')
self._do_capture(s)
except IOError:
logger.exception("Error reading data")
try:
if self.sock is not None:
self.sock.close()
except:
pass
if self.sock is None:
time.sleep(1)
def _read(self, s, size):
data = s.read(size)
if len(data) != size:
raise IOError("EOF")
return data
def _do_capture(self, s):
# TODO: Add metrics
s.write(self.intStruct.pack(self.fps))
s.write(self.intStruct.pack(self.compression))
s.write(self.intStruct.pack(self.size))
s.flush()
while True:
# read an int
print("read")
magic = self._read(s, 4)
sz = self.intStruct.unpack(self._read(s, 4))[0]
print("readsz", sz)
# read the image buffer
img_bytes = self._read(s, sz)
img_bytes = np.fromstring(img_bytes, np.uint8)
# decode it
img = cv2.imdecode(img_bytes, cv2.IMREAD_COLOR)
# write to a buffer
# - TODO: use two buffers to save memory allocations
self.on_img(img)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = OptionParser()
parser.add_option('--host', default='localhost')
parser.add_option('--port', type='int', default=1180)
options, args = parser.parse_args()
capture = CaptureClient(options)
img_lock = threading.Lock()
imgs = [None]
def _on_img(img):
with img_lock:
imgs[0] = img
capture.set_on_img(_on_img)
capture.start()
while True:
time.sleep(0.1)
with img_lock:
if imgs[0] == None:
continue
cv2.imshow("img", imgs[0])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| apache-2.0 | -1,006,342,026,339,089,400 | 22.72327 | 72 | 0.482503 | false |
mseaborn/switch | switch_mod/project/no_commit.py | 2 | 3824 | # Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
Defines simple limitations on project dispatch without considering unit
commitment. This module is mutually exclusive with the project.commit
module which constrains dispatch to unit committment decisions.
SYNOPSIS
>>> from switch_mod.utilities import define_AbstractModel
>>> model = define_AbstractModel(
... 'timescales', 'financials', 'load_zones', 'fuels',
... 'gen_tech', 'project.build', 'project.dispatch', 'project.no_commit')
>>> instance = model.load_inputs(inputs_dir='test_dat')
"""
from pyomo.environ import *
def define_components(mod):
"""
Adds components to a Pyomo abstract model object to constrain
dispatch decisions subject to available capacity, renewable resource
availability, and baseload restrictions. Unless otherwise stated,
all power capacity is specified in units of MW and all sets and
parameters are mandatory. This module estimates project dispatch
limits and fuel consumption without consideration of unit
commitment. This can be a useful approximation if fuel startup
requirements are a small portion of overall fuel consumption, so
that the aggregate fuel consumption with respect to energy
production can be approximated as a line with a 0 intercept. This
estimation method has been known to result in excessive cycling of
Combined Cycle Gas Turbines in the SWITCH-WECC model.
DispatchUpperLimit[(proj, t) in PROJ_DISPATCH_POINTS] is an
expression that defines the upper bounds of dispatch subject to
installed capacity, average expected outage rates, and renewable
resource availability.
DispatchLowerLimit[(proj, t) in PROJ_DISPATCH_POINTS] in an
expression that defines the lower bounds of dispatch, which is 0
except for baseload plants where is it the upper limit.
Enforce_Dispatch_Lower_Limit[(proj, t) in PROJ_DISPATCH_POINTS] and
Enforce_Dispatch_Upper_Limit[(proj, t) in PROJ_DISPATCH_POINTS] are
constraints that limit DispatchProj to the upper and lower bounds
defined above.
DispatchLowerLimit <= DispatchProj <= DispatchUpperLimit
ProjFuelUseRate_Calculate[(proj, t) in PROJ_DISPATCH_POINTS]
calculates fuel consumption for the variable ProjFuelUseRate as
DispatchProj * proj_full_load_heat_rate. The units become:
MW * (MMBtu / MWh) = MMBTU / h
"""
def DispatchUpperLimit_expr(m, proj, t):
if proj in m.VARIABLE_PROJECTS:
return (m.ProjCapacityTP[proj, t] * m.proj_availability[proj] *
m.prj_max_capacity_factor[proj, t])
else:
return m.ProjCapacityTP[proj, t] * m.proj_availability[proj]
mod.DispatchUpperLimit = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=DispatchUpperLimit_expr)
def DispatchLowerLimit_expr(m, proj, t):
if proj in m.BASELOAD_PROJECTS:
return DispatchUpperLimit_expr(m, proj, t)
else:
return 0
mod.DispatchLowerLimit = Expression(
mod.PROJ_DISPATCH_POINTS,
initialize=DispatchLowerLimit_expr)
mod.Enforce_Dispatch_Lower_Limit = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (
m.DispatchLowerLimit[proj, t] <= m.DispatchProj[proj, t]))
mod.Enforce_Dispatch_Upper_Limit = Constraint(
mod.PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (
m.DispatchProj[proj, t] <= m.DispatchUpperLimit[proj, t]))
mod.ProjFuelUseRate_Calculate = Constraint(
mod.PROJ_FUEL_DISPATCH_POINTS,
rule=lambda m, proj, t: (
m.ProjFuelUseRate[proj, t] ==
m.DispatchProj[proj, t] * m.proj_full_load_heat_rate[proj]))
| apache-2.0 | 6,036,229,683,619,147,000 | 39.680851 | 77 | 0.705282 | false |
tjanez/obnam | obnamlib/sizeparse_tests.py | 4 | 3415 | # Copyright 2010-2014 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import obnamlib
class ByteSizeParserTests(unittest.TestCase):
def setUp(self):
self.p = obnamlib.ByteSizeParser()
def test_parses_zero(self):
self.assertEqual(self.p.parse('0'), 0)
def test_parses_unadorned_size_as_bytes(self):
self.assertEqual(self.p.parse('123'), 123)
def test_returns_an_int(self):
self.assert_(isinstance(self.p.parse('123'), int))
def test_parses_unadorned_size_using_default_unit(self):
self.p.set_default_unit('KiB')
self.assertEqual(self.p.parse('123'), 123 * 1024)
def test_parses_size_with_byte_unit(self):
self.assertEqual(self.p.parse('123 B'), 123)
def test_parses_size_with_kilo_unit(self):
self.assertEqual(self.p.parse('123 k'), 123 * 1000)
def test_parses_size_with_kilobyte_unit(self):
self.assertEqual(self.p.parse('123 kB'), 123 * 1000)
def test_parses_size_with_kibibyte_unit(self):
self.assertEqual(self.p.parse('123 KiB'), 123 * 1024)
def test_parses_size_with_mega_unit(self):
self.assertEqual(self.p.parse('123 m'), 123 * 1000**2)
def test_parses_size_with_megabyte_unit(self):
self.assertEqual(self.p.parse('123 MB'), 123 * 1000**2)
def test_parses_size_with_mebibyte_unit(self):
self.assertEqual(self.p.parse('123 MiB'), 123 * 1024**2)
def test_parses_size_with_giga_unit(self):
self.assertEqual(self.p.parse('123 g'), 123 * 1000**3)
def test_parses_size_with_gigabyte_unit(self):
self.assertEqual(self.p.parse('123 GB'), 123 * 1000**3)
def test_parses_size_with_gibibyte_unit(self):
self.assertEqual(self.p.parse('123 GiB'), 123 * 1024**3)
def test_raises_error_for_empty_string(self):
self.assertRaises(obnamlib.SizeSyntaxError, self.p.parse, '')
def test_raises_error_for_missing_size(self):
self.assertRaises(obnamlib.SizeSyntaxError, self.p.parse, 'KiB')
def test_raises_error_for_bad_unit(self):
self.assertRaises(obnamlib.SizeSyntaxError, self.p.parse, '1 km')
def test_raises_error_for_bad_unit_thats_similar_to_real_one(self):
self.assertRaises(obnamlib.UnitNameError, self.p.parse, '1 ib')
def test_raises_error_for_bad_default_unit(self):
self.assertRaises(obnamlib.UnitNameError,
self.p.set_default_unit, 'km')
def test_size_syntax_error_includes_input_string(self):
text = 'asdf asdf'
e = obnamlib.SizeSyntaxError(size=text)
self.assert_(text in str(e), str(e))
def test_unit_name_error_includes_input_string(self):
text = 'asdf asdf'
e = obnamlib.UnitNameError(unit=text)
self.assert_(text in str(e), str(e))
| gpl-3.0 | -1,943,482,080,466,903,600 | 34.947368 | 73 | 0.674963 | false |
EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/web/test/test_wsgi.py | 13 | 74813 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.wsgi}.
"""
__metaclass__ = type
from sys import exc_info
import tempfile
import traceback
import warnings
from zope.interface.verify import verifyObject
from twisted.python.compat import intToBytes, urlquote, _PY3
from twisted.python.log import addObserver, removeObserver, err
from twisted.python.failure import Failure
from twisted.python.threadable import getThreadID
from twisted.python.threadpool import ThreadPool
from twisted.internet.defer import Deferred, gatherResults
from twisted.internet import reactor
from twisted.internet.error import ConnectionLost
from twisted.trial.unittest import TestCase, SkipTest
from twisted.web import http
from twisted.web.resource import IResource, Resource
from twisted.web.server import Request, Site, version
from twisted.web.wsgi import WSGIResource
from twisted.web.test.test_web import DummyChannel
class SynchronousThreadPool:
"""
A single-threaded implementation of part of the L{ThreadPool} interface.
This implementation calls functions synchronously rather than running
them in a thread pool. It is used to make the tests which are not
directly for thread-related behavior deterministic.
"""
def callInThread(self, f, *a, **kw):
"""
Call C{f(*a, **kw)} in this thread rather than scheduling it to be
called in a thread.
"""
try:
f(*a, **kw)
except:
# callInThread doesn't let exceptions propagate to the caller.
# None is always returned and any exception raised gets logged
# later on.
err(None, "Callable passed to SynchronousThreadPool.callInThread failed")
class SynchronousReactorThreads:
"""
A single-threaded implementation of part of the L{IReactorThreads}
interface. This implementation assumes that it will only be invoked
from the reactor thread, so it calls functions synchronously rather than
trying to schedule them to run in the reactor thread. It is used in
conjunction with L{SynchronousThreadPool} to make the tests which are
not directly for thread-related behavior deterministic.
"""
def callFromThread(self, f, *a, **kw):
"""
Call C{f(*a, **kw)} in this thread which should also be the reactor
thread.
"""
f(*a, **kw)
class WSGIResourceTests(TestCase):
def setUp(self):
"""
Create a L{WSGIResource} with synchronous threading objects and a no-op
application object. This is useful for testing certain things about
the resource implementation which are unrelated to WSGI.
"""
self.resource = WSGIResource(
SynchronousReactorThreads(), SynchronousThreadPool(),
lambda environ, startResponse: None)
def test_interfaces(self):
"""
L{WSGIResource} implements L{IResource} and stops resource traversal.
"""
verifyObject(IResource, self.resource)
self.assertTrue(self.resource.isLeaf)
def test_unsupported(self):
"""
A L{WSGIResource} cannot have L{IResource} children. Its
C{getChildWithDefault} and C{putChild} methods raise L{RuntimeError}.
"""
self.assertRaises(
RuntimeError,
self.resource.getChildWithDefault,
b"foo", Request(DummyChannel(), False))
self.assertRaises(
RuntimeError,
self.resource.putChild,
b"foo", Resource())
class WSGITestsMixin:
"""
@ivar channelFactory: A no-argument callable which will be invoked to
create a new HTTP channel to associate with request objects.
"""
channelFactory = DummyChannel
def setUp(self):
self.threadpool = SynchronousThreadPool()
self.reactor = SynchronousReactorThreads()
def lowLevelRender(
self, requestFactory, applicationFactory, channelFactory, method,
version, resourceSegments, requestSegments, query=None, headers=[],
body=None, safe=''):
"""
@param method: A C{str} giving the request method to use.
@param version: A C{str} like C{'1.1'} giving the request version.
@param resourceSegments: A C{list} of unencoded path segments which
specifies the location in the resource hierarchy at which the
L{WSGIResource} will be placed, eg C{['']} for I{/}, C{['foo',
'bar', '']} for I{/foo/bar/}, etc.
@param requestSegments: A C{list} of unencoded path segments giving the
request URI.
@param query: A C{list} of two-tuples of C{str} giving unencoded query
argument keys and values.
@param headers: A C{list} of two-tuples of C{str} giving request header
names and corresponding values.
@param safe: A C{str} giving the bytes which are to be considered
I{safe} for inclusion in the request URI and not quoted.
@return: A L{Deferred} which will be called back with a two-tuple of
the arguments passed which would be passed to the WSGI application
object for this configuration and request (ie, the environment and
start_response callable).
"""
def _toByteString(string):
# Twisted's HTTP implementation prefers byte strings. As a
# convenience for tests, string arguments are encoded to an
# ISO-8859-1 byte string (if not already) before being passed on.
if isinstance(string, bytes):
return string
else:
return string.encode('iso-8859-1')
root = WSGIResource(
self.reactor, self.threadpool, applicationFactory())
resourceSegments.reverse()
for seg in resourceSegments:
tmp = Resource()
tmp.putChild(_toByteString(seg), root)
root = tmp
channel = channelFactory()
channel.site = Site(root)
request = requestFactory(channel, False)
for k, v in headers:
request.requestHeaders.addRawHeader(
_toByteString(k), _toByteString(v))
request.gotLength(0)
if body:
request.content.write(body)
request.content.seek(0)
uri = '/' + '/'.join([urlquote(seg, safe) for seg in requestSegments])
if query is not None:
uri += '?' + '&'.join(['='.join([urlquote(k, safe), urlquote(v, safe)])
for (k, v) in query])
request.requestReceived(
_toByteString(method), _toByteString(uri),
b'HTTP/' + _toByteString(version))
return request
def render(self, *a, **kw):
result = Deferred()
def applicationFactory():
def application(*args):
environ, startResponse = args
result.callback(args)
startResponse('200 OK', [])
return iter(())
return application
self.lowLevelRender(
Request, applicationFactory, self.channelFactory, *a, **kw)
return result
def requestFactoryFactory(self, requestClass=Request):
d = Deferred()
def requestFactory(*a, **kw):
request = requestClass(*a, **kw)
# If notifyFinish is called after lowLevelRender returns, it won't
# do the right thing, because the request will have already
# finished. One might argue that this is a bug in
# Request.notifyFinish.
request.notifyFinish().chainDeferred(d)
return request
return d, requestFactory
def getContentFromResponse(self, response):
return response.split(b'\r\n\r\n', 1)[1]
def prepareRequest(self, application=None):
"""
Prepare a L{Request} which, when a request is received, captures the
C{environ} and C{start_response} callable passed to a WSGI app.
@param application: An optional WSGI application callable that accepts
the familiar C{environ} and C{start_response} args and returns an
iterable of body content. If not supplied, C{start_response} will
be called with a "200 OK" status and no headers, and no content
will be yielded.
@return: A two-tuple of (C{request}, C{deferred}). The former is a
Twisted L{Request}. The latter is a L{Deferred} which will be
called back with a two-tuple of the arguments passed to a WSGI
application (i.e. the C{environ} and C{start_response} callable),
or will errback with any error arising within the WSGI app.
"""
result = Deferred()
def outerApplication(environ, startResponse):
try:
if application is None:
startResponse('200 OK', [])
content = iter(()) # No content.
else:
content = application(environ, startResponse)
except:
result.errback()
startResponse('500 Error', [])
return iter(())
else:
result.callback((environ, startResponse))
return content
resource = WSGIResource(
self.reactor, self.threadpool, outerApplication)
root = Resource()
root.putChild(b"res", resource)
channel = self.channelFactory()
channel.site = Site(root)
class CannedRequest(Request):
"""
Convenient L{Request} derivative which has canned values for all
of C{requestReceived}'s arguments.
"""
def requestReceived(
self, command=b"GET", path=b"/res", version=b"1.1"):
return Request.requestReceived(
self, command=command, path=path, version=version)
request = CannedRequest(channel, queued=False)
request.gotLength(0) # Initialize buffer for request body.
return request, result
class EnvironTests(WSGITestsMixin, TestCase):
"""
Tests for the values in the C{environ} C{dict} passed to the application
object by L{twisted.web.wsgi.WSGIResource}.
"""
def environKeyEqual(self, key, value):
def assertEnvironKeyEqual(result):
environ, startResponse = result
self.assertEqual(environ[key], value)
return value
return assertEnvironKeyEqual
def test_environIsDict(self):
"""
L{WSGIResource} calls the application object with an C{environ}
parameter which is exactly of type C{dict}.
"""
d = self.render('GET', '1.1', [], [''])
def cbRendered(result):
environ, startResponse = result
self.assertIdentical(type(environ), dict)
# Environment keys are always native strings.
for name in environ:
self.assertIsInstance(name, str)
d.addCallback(cbRendered)
return d
def test_requestMethod(self):
"""
The C{'REQUEST_METHOD'} key of the C{environ} C{dict} passed to the
application contains the HTTP method in the request (RFC 3875, section
4.1.12).
"""
get = self.render('GET', '1.1', [], [''])
get.addCallback(self.environKeyEqual('REQUEST_METHOD', 'GET'))
# Also make sure a different request method shows up as a different
# value in the environ dict.
post = self.render('POST', '1.1', [], [''])
post.addCallback(self.environKeyEqual('REQUEST_METHOD', 'POST'))
return gatherResults([get, post])
def test_requestMethodIsNativeString(self):
"""
The C{'REQUEST_METHOD'} key of the C{environ} C{dict} passed to the
application is always a native string.
"""
for method in b"GET", u"GET":
request, result = self.prepareRequest()
request.requestReceived(method)
result.addCallback(self.environKeyEqual('REQUEST_METHOD', 'GET'))
self.assertIsInstance(self.successResultOf(result), str)
def test_scriptName(self):
"""
The C{'SCRIPT_NAME'} key of the C{environ} C{dict} passed to the
application contains the I{abs_path} (RFC 2396, section 3) to this
resource (RFC 3875, section 4.1.13).
"""
root = self.render('GET', '1.1', [], [''])
root.addCallback(self.environKeyEqual('SCRIPT_NAME', ''))
emptyChild = self.render('GET', '1.1', [''], [''])
emptyChild.addCallback(self.environKeyEqual('SCRIPT_NAME', '/'))
leaf = self.render('GET', '1.1', ['foo'], ['foo'])
leaf.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo'))
container = self.render('GET', '1.1', ['foo', ''], ['foo', ''])
container.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo/'))
internal = self.render('GET', '1.1', ['foo'], ['foo', 'bar'])
internal.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo'))
unencoded = self.render(
'GET', '1.1', ['foo', '/', b'bar\xff'], ['foo', '/', b'bar\xff'])
# The RFC says "(not URL-encoded)", even though that makes
# interpretation of SCRIPT_NAME ambiguous.
unencoded.addCallback(
self.environKeyEqual('SCRIPT_NAME', '/foo///bar\xff'))
return gatherResults([
root, emptyChild, leaf, container, internal, unencoded])
def test_scriptNameIsNativeString(self):
"""
The C{'SCRIPT_NAME'} key of the C{environ} C{dict} passed to the
application is always a native string.
"""
request, result = self.prepareRequest()
request.requestReceived(path=b"/res")
result.addCallback(self.environKeyEqual('SCRIPT_NAME', '/res'))
self.assertIsInstance(self.successResultOf(result), str)
if _PY3:
# Native strings are rejected by Request.requestReceived() before
# t.w.wsgi has any say in the matter.
request, result = self.prepareRequest()
self.assertRaises(TypeError, request.requestReceived, path=u"/res")
else:
request, result = self.prepareRequest()
request.requestReceived(path=u"/res")
result.addCallback(self.environKeyEqual('SCRIPT_NAME', '/res'))
self.assertIsInstance(self.successResultOf(result), str)
def test_pathInfo(self):
"""
The C{'PATH_INFO'} key of the C{environ} C{dict} passed to the
application contains the suffix of the request URI path which is not
included in the value for the C{'SCRIPT_NAME'} key (RFC 3875, section
4.1.5).
"""
assertKeyEmpty = self.environKeyEqual('PATH_INFO', '')
root = self.render('GET', '1.1', [], [''])
root.addCallback(self.environKeyEqual('PATH_INFO', '/'))
emptyChild = self.render('GET', '1.1', [''], [''])
emptyChild.addCallback(assertKeyEmpty)
leaf = self.render('GET', '1.1', ['foo'], ['foo'])
leaf.addCallback(assertKeyEmpty)
container = self.render('GET', '1.1', ['foo', ''], ['foo', ''])
container.addCallback(assertKeyEmpty)
internalLeaf = self.render('GET', '1.1', ['foo'], ['foo', 'bar'])
internalLeaf.addCallback(self.environKeyEqual('PATH_INFO', '/bar'))
internalContainer = self.render('GET', '1.1', ['foo'], ['foo', ''])
internalContainer.addCallback(self.environKeyEqual('PATH_INFO', '/'))
unencoded = self.render('GET', '1.1', [], ['foo', '/', b'bar\xff'])
unencoded.addCallback(
self.environKeyEqual('PATH_INFO', '/foo///bar\xff'))
return gatherResults([
root, leaf, container, internalLeaf,
internalContainer, unencoded])
def test_pathInfoIsNativeString(self):
"""
The C{'PATH_INFO'} key of the C{environ} C{dict} passed to the
application is always a native string.
"""
request, result = self.prepareRequest()
request.requestReceived(path=b"/res/foo/bar")
result.addCallback(self.environKeyEqual('PATH_INFO', '/foo/bar'))
self.assertIsInstance(self.successResultOf(result), str)
if _PY3:
# Native strings are rejected by Request.requestReceived() before
# t.w.wsgi has any say in the matter.
request, result = self.prepareRequest()
self.assertRaises(
TypeError, request.requestReceived, path=u"/res/foo/bar")
else:
request, result = self.prepareRequest()
request.requestReceived(path=u"/res/foo/bar")
result.addCallback(self.environKeyEqual('PATH_INFO', '/foo/bar'))
self.assertIsInstance(self.successResultOf(result), str)
def test_queryString(self):
"""
The C{'QUERY_STRING'} key of the C{environ} C{dict} passed to the
application contains the portion of the request URI after the first
I{?} (RFC 3875, section 4.1.7).
"""
missing = self.render('GET', '1.1', [], [''], None)
missing.addCallback(self.environKeyEqual('QUERY_STRING', ''))
empty = self.render('GET', '1.1', [], [''], [])
empty.addCallback(self.environKeyEqual('QUERY_STRING', ''))
present = self.render('GET', '1.1', [], [''], [('foo', 'bar')])
present.addCallback(self.environKeyEqual('QUERY_STRING', 'foo=bar'))
unencoded = self.render('GET', '1.1', [], [''], [('/', '/')])
unencoded.addCallback(self.environKeyEqual('QUERY_STRING', '%2F=%2F'))
# "?" is reserved in the <searchpart> portion of a URL. However, it
# seems to be a common mistake of clients to forget to quote it. So,
# make sure we handle that invalid case.
doubleQuestion = self.render(
'GET', '1.1', [], [''], [('foo', '?bar')], safe='?')
doubleQuestion.addCallback(
self.environKeyEqual('QUERY_STRING', 'foo=?bar'))
return gatherResults([
missing, empty, present, unencoded, doubleQuestion])
def test_queryStringIsNativeString(self):
"""
The C{'QUERY_STRING'} key of the C{environ} C{dict} passed to the
application is always a native string.
"""
request, result = self.prepareRequest()
request.requestReceived(path=b"/res?foo=bar")
result.addCallback(self.environKeyEqual('QUERY_STRING', 'foo=bar'))
self.assertIsInstance(self.successResultOf(result), str)
if _PY3:
# Native strings are rejected by Request.requestReceived() before
# t.w.wsgi has any say in the matter.
request, result = self.prepareRequest()
self.assertRaises(
TypeError, request.requestReceived, path=u"/res?foo=bar")
else:
request, result = self.prepareRequest()
request.requestReceived(path=u"/res?foo=bar")
result.addCallback(self.environKeyEqual('QUERY_STRING', 'foo=bar'))
self.assertIsInstance(self.successResultOf(result), str)
def test_contentType(self):
"""
The C{'CONTENT_TYPE'} key of the C{environ} C{dict} passed to the
application contains the value of the I{Content-Type} request header
(RFC 3875, section 4.1.3).
"""
missing = self.render('GET', '1.1', [], [''])
missing.addCallback(self.environKeyEqual('CONTENT_TYPE', ''))
present = self.render(
'GET', '1.1', [], [''], None, [('content-type', 'x-foo/bar')])
present.addCallback(self.environKeyEqual('CONTENT_TYPE', 'x-foo/bar'))
return gatherResults([missing, present])
def test_contentTypeIsNativeString(self):
"""
The C{'CONTENT_TYPE'} key of the C{environ} C{dict} passed to the
application is always a native string.
"""
for contentType in b"x-foo/bar", u"x-foo/bar":
request, result = self.prepareRequest()
request.requestHeaders.addRawHeader(b"Content-Type", contentType)
request.requestReceived()
result.addCallback(self.environKeyEqual('CONTENT_TYPE', 'x-foo/bar'))
self.assertIsInstance(self.successResultOf(result), str)
def test_contentLength(self):
"""
The C{'CONTENT_LENGTH'} key of the C{environ} C{dict} passed to the
application contains the value of the I{Content-Length} request header
(RFC 3875, section 4.1.2).
"""
missing = self.render('GET', '1.1', [], [''])
missing.addCallback(self.environKeyEqual('CONTENT_LENGTH', ''))
present = self.render(
'GET', '1.1', [], [''], None, [('content-length', '1234')])
present.addCallback(self.environKeyEqual('CONTENT_LENGTH', '1234'))
return gatherResults([missing, present])
def test_contentLengthIsNativeString(self):
"""
The C{'CONTENT_LENGTH'} key of the C{environ} C{dict} passed to the
application is always a native string.
"""
for contentLength in b"1234", u"1234":
request, result = self.prepareRequest()
request.requestHeaders.addRawHeader(b"Content-Length", contentLength)
request.requestReceived()
result.addCallback(self.environKeyEqual('CONTENT_LENGTH', '1234'))
self.assertIsInstance(self.successResultOf(result), str)
def test_serverName(self):
"""
The C{'SERVER_NAME'} key of the C{environ} C{dict} passed to the
application contains the best determination of the server hostname
possible, using either the value of the I{Host} header in the request
or the address the server is listening on if that header is not
present (RFC 3875, section 4.1.14).
"""
missing = self.render('GET', '1.1', [], [''])
# 10.0.0.1 value comes from a bit far away -
# twisted.test.test_web.DummyChannel.transport.getHost().host
missing.addCallback(self.environKeyEqual('SERVER_NAME', '10.0.0.1'))
present = self.render(
'GET', '1.1', [], [''], None, [('host', 'example.org')])
present.addCallback(self.environKeyEqual('SERVER_NAME', 'example.org'))
return gatherResults([missing, present])
def test_serverNameIsNativeString(self):
"""
The C{'SERVER_NAME'} key of the C{environ} C{dict} passed to the
application is always a native string.
"""
for serverName in b"host.example.com", u"host.example.com":
request, result = self.prepareRequest()
# This is kind of a cheat; getRequestHostname() breaks in Python 3
# when the "Host" request header is set to a native string because
# it tries to split around b":", so we patch the method.
request.getRequestHostname = lambda: serverName
request.requestReceived()
result.addCallback(self.environKeyEqual('SERVER_NAME', 'host.example.com'))
self.assertIsInstance(self.successResultOf(result), str)
def test_serverPort(self):
"""
The C{'SERVER_PORT'} key of the C{environ} C{dict} passed to the
application contains the port number of the server which received the
request (RFC 3875, section 4.1.15).
"""
portNumber = 12354
def makeChannel():
channel = DummyChannel()
channel.transport = DummyChannel.TCP()
channel.transport.port = portNumber
return channel
self.channelFactory = makeChannel
d = self.render('GET', '1.1', [], [''])
d.addCallback(self.environKeyEqual('SERVER_PORT', str(portNumber)))
return d
def test_serverPortIsNativeString(self):
"""
The C{'SERVER_PORT'} key of the C{environ} C{dict} passed to the
application is always a native string.
"""
request, result = self.prepareRequest()
request.requestReceived()
result.addCallback(self.environKeyEqual('SERVER_PORT', '80'))
self.assertIsInstance(self.successResultOf(result), str)
def test_serverProtocol(self):
"""
The C{'SERVER_PROTOCOL'} key of the C{environ} C{dict} passed to the
application contains the HTTP version number received in the request
(RFC 3875, section 4.1.16).
"""
old = self.render('GET', '1.0', [], [''])
old.addCallback(self.environKeyEqual('SERVER_PROTOCOL', 'HTTP/1.0'))
new = self.render('GET', '1.1', [], [''])
new.addCallback(self.environKeyEqual('SERVER_PROTOCOL', 'HTTP/1.1'))
return gatherResults([old, new])
def test_serverProtocolIsNativeString(self):
"""
The C{'SERVER_PROTOCOL'} key of the C{environ} C{dict} passed to the
application is always a native string.
"""
for serverProtocol in b"1.1", u"1.1":
request, result = self.prepareRequest()
# In Python 3, native strings can be rejected by Request.write()
# which will cause a crash after the bit we're trying to test, so
# we patch write() out here to do nothing.
request.write = lambda data: None
request.requestReceived(version=b"1.1")
result.addCallback(self.environKeyEqual('SERVER_PROTOCOL', '1.1'))
self.assertIsInstance(self.successResultOf(result), str)
def test_remoteAddr(self):
"""
The C{'REMOTE_ADDR'} key of the C{environ} C{dict} passed to the
application contains the address of the client making the request.
"""
d = self.render('GET', '1.1', [], [''])
d.addCallback(self.environKeyEqual('REMOTE_ADDR', '192.168.1.1'))
return d
def test_headers(self):
"""
HTTP request headers are copied into the C{environ} C{dict} passed to
the application with a C{HTTP_} prefix added to their names.
"""
singleValue = self.render(
'GET', '1.1', [], [''], None, [('foo', 'bar'), ('baz', 'quux')])
def cbRendered(result):
environ, startResponse = result
self.assertEqual(environ['HTTP_FOO'], 'bar')
self.assertEqual(environ['HTTP_BAZ'], 'quux')
singleValue.addCallback(cbRendered)
multiValue = self.render(
'GET', '1.1', [], [''], None, [('foo', 'bar'), ('foo', 'baz')])
multiValue.addCallback(self.environKeyEqual('HTTP_FOO', 'bar,baz'))
withHyphen = self.render(
'GET', '1.1', [], [''], None, [('foo-bar', 'baz')])
withHyphen.addCallback(self.environKeyEqual('HTTP_FOO_BAR', 'baz'))
multiLine = self.render(
'GET', '1.1', [], [''], None, [('foo', 'bar\n\tbaz')])
multiLine.addCallback(self.environKeyEqual('HTTP_FOO', 'bar \tbaz'))
return gatherResults([singleValue, multiValue, withHyphen, multiLine])
def test_wsgiVersion(self):
"""
The C{'wsgi.version'} key of the C{environ} C{dict} passed to the
application has the value C{(1, 0)} indicating that this is a WSGI 1.0
container.
"""
versionDeferred = self.render('GET', '1.1', [], [''])
versionDeferred.addCallback(self.environKeyEqual('wsgi.version', (1, 0)))
return versionDeferred
def test_wsgiRunOnce(self):
"""
The C{'wsgi.run_once'} key of the C{environ} C{dict} passed to the
application is set to C{False}.
"""
once = self.render('GET', '1.1', [], [''])
once.addCallback(self.environKeyEqual('wsgi.run_once', False))
return once
def test_wsgiMultithread(self):
"""
The C{'wsgi.multithread'} key of the C{environ} C{dict} passed to the
application is set to C{True}.
"""
thread = self.render('GET', '1.1', [], [''])
thread.addCallback(self.environKeyEqual('wsgi.multithread', True))
return thread
def test_wsgiMultiprocess(self):
"""
The C{'wsgi.multiprocess'} key of the C{environ} C{dict} passed to the
application is set to C{False}.
"""
process = self.render('GET', '1.1', [], [''])
process.addCallback(self.environKeyEqual('wsgi.multiprocess', False))
return process
def test_wsgiURLScheme(self):
"""
The C{'wsgi.url_scheme'} key of the C{environ} C{dict} passed to the
application has the request URL scheme.
"""
# XXX Does this need to be different if the request is for an absolute
# URL?
def channelFactory():
channel = DummyChannel()
channel.transport = DummyChannel.SSL()
return channel
self.channelFactory = DummyChannel
httpDeferred = self.render('GET', '1.1', [], [''])
httpDeferred.addCallback(self.environKeyEqual('wsgi.url_scheme', 'http'))
self.channelFactory = channelFactory
httpsDeferred = self.render('GET', '1.1', [], [''])
httpsDeferred.addCallback(self.environKeyEqual('wsgi.url_scheme', 'https'))
return gatherResults([httpDeferred, httpsDeferred])
def test_wsgiErrors(self):
"""
The C{'wsgi.errors'} key of the C{environ} C{dict} passed to the
application is a file-like object (as defined in the U{Input and Errors
Streams<http://www.python.org/dev/peps/pep-0333/#input-and-error-streams>}
section of PEP 333) which converts bytes written to it into events for
the logging system.
"""
events = []
addObserver(events.append)
self.addCleanup(removeObserver, events.append)
errors = self.render('GET', '1.1', [], [''])
def cbErrors(result):
environ, startApplication = result
errors = environ['wsgi.errors']
errors.write('some message\n')
errors.writelines(['another\nmessage\n'])
errors.flush()
self.assertEqual(events[0]['message'], ('some message\n',))
self.assertEqual(events[0]['system'], 'wsgi')
self.assertTrue(events[0]['isError'])
self.assertEqual(events[1]['message'], ('another\nmessage\n',))
self.assertEqual(events[1]['system'], 'wsgi')
self.assertTrue(events[1]['isError'])
self.assertEqual(len(events), 2)
errors.addCallback(cbErrors)
return errors
def test_wsgiErrorsExpectsOnlyNativeStringsInPython2(self):
"""
The C{'wsgi.errors'} file-like object from the C{environ} C{dict}
expects writes of only native strings in Python 2. Some existing WSGI
applications may write non-native (i.e. C{unicode}) strings so, for
compatibility, these elicit only a warning in Python 2.
"""
if _PY3:
raise SkipTest("Not relevant in Python 3")
request, result = self.prepareRequest()
request.requestReceived()
environ, _ = self.successResultOf(result)
errors = environ["wsgi.errors"]
with warnings.catch_warnings(record=True) as caught:
errors.write(u"fred")
self.assertEqual(1, len(caught))
self.assertEqual(UnicodeWarning, caught[0].category)
self.assertEqual(
"write() argument should be str, not u'fred' (unicode)",
str(caught[0].message))
def test_wsgiErrorsAcceptsOnlyNativeStringsInPython3(self):
"""
The C{'wsgi.errors'} file-like object from the C{environ} C{dict}
permits writes of only native strings in Python 3, and raises
C{TypeError} for writes of non-native strings.
"""
if not _PY3:
raise SkipTest("Relevant only in Python 3")
request, result = self.prepareRequest()
request.requestReceived()
environ, _ = self.successResultOf(result)
errors = environ["wsgi.errors"]
error = self.assertRaises(TypeError, errors.write, b"fred")
self.assertEqual(
"write() argument must be str, not b'fred' (bytes)",
str(error))
class InputStreamTestMixin(WSGITestsMixin):
"""
A mixin for L{TestCase} subclasses which defines a number of tests against
L{_InputStream}. The subclass is expected to create a file-like object to
be wrapped by an L{_InputStream} under test.
"""
def getFileType(self):
raise NotImplementedError(
"%s.getFile must be implemented" % (self.__class__.__name__,))
def _renderAndReturnReaderResult(self, reader, content):
contentType = self.getFileType()
class CustomizedRequest(Request):
def gotLength(self, length):
# Always allocate a file of the specified type, instead of
# using the base behavior of selecting one depending on the
# length.
self.content = contentType()
def appFactoryFactory(reader):
result = Deferred()
def applicationFactory():
def application(*args):
environ, startResponse = args
result.callback(reader(environ['wsgi.input']))
startResponse('200 OK', [])
return iter(())
return application
return result, applicationFactory
d, appFactory = appFactoryFactory(reader)
self.lowLevelRender(
CustomizedRequest, appFactory, DummyChannel,
'PUT', '1.1', [], [''], None, [],
content)
return d
def test_readAll(self):
"""
Calling L{_InputStream.read} with no arguments returns the entire input
stream.
"""
bytes = b"some bytes are here"
d = self._renderAndReturnReaderResult(lambda input: input.read(), bytes)
d.addCallback(self.assertEqual, bytes)
return d
def test_readSome(self):
"""
Calling L{_InputStream.read} with an integer returns that many bytes
from the input stream, as long as it is less than or equal to the total
number of bytes available.
"""
bytes = b"hello, world."
d = self._renderAndReturnReaderResult(lambda input: input.read(3), bytes)
d.addCallback(self.assertEqual, b"hel")
return d
def test_readMoreThan(self):
"""
Calling L{_InputStream.read} with an integer that is greater than the
total number of bytes in the input stream returns all bytes in the
input stream.
"""
bytes = b"some bytes are here"
d = self._renderAndReturnReaderResult(
lambda input: input.read(len(bytes) + 3), bytes)
d.addCallback(self.assertEqual, bytes)
return d
def test_readTwice(self):
"""
Calling L{_InputStream.read} a second time returns bytes starting from
the position after the last byte returned by the previous read.
"""
bytes = b"some bytes, hello"
def read(input):
input.read(3)
return input.read()
d = self._renderAndReturnReaderResult(read, bytes)
d.addCallback(self.assertEqual, bytes[3:])
return d
def test_readNone(self):
"""
Calling L{_InputStream.read} with L{None} as an argument returns all
bytes in the input stream.
"""
bytes = b"the entire stream"
d = self._renderAndReturnReaderResult(
lambda input: input.read(None), bytes)
d.addCallback(self.assertEqual, bytes)
return d
def test_readNegative(self):
"""
Calling L{_InputStream.read} with a negative integer as an argument
returns all bytes in the input stream.
"""
bytes = b"all of the input"
d = self._renderAndReturnReaderResult(
lambda input: input.read(-1), bytes)
d.addCallback(self.assertEqual, bytes)
return d
def test_readline(self):
"""
Calling L{_InputStream.readline} with no argument returns one line from
the input stream.
"""
bytes = b"hello\nworld"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(), bytes)
d.addCallback(self.assertEqual, b"hello\n")
return d
def test_readlineSome(self):
"""
Calling L{_InputStream.readline} with an integer returns at most that
many bytes, even if it is not enough to make up a complete line.
COMPATIBILITY NOTE: the size argument is excluded from the WSGI
specification, but is provided here anyhow, because useful libraries
such as python stdlib's cgi.py assume their input file-like-object
supports readline with a size argument. If you use it, be aware your
application may not be portable to other conformant WSGI servers.
"""
bytes = b"goodbye\nworld"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(3), bytes)
d.addCallback(self.assertEqual, b"goo")
return d
def test_readlineMoreThan(self):
"""
Calling L{_InputStream.readline} with an integer which is greater than
the number of bytes in the next line returns only the next line.
"""
bytes = b"some lines\nof text"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(20), bytes)
d.addCallback(self.assertEqual, b"some lines\n")
return d
def test_readlineTwice(self):
"""
Calling L{_InputStream.readline} a second time returns the line
following the line returned by the first call.
"""
bytes = b"first line\nsecond line\nlast line"
def readline(input):
input.readline()
return input.readline()
d = self._renderAndReturnReaderResult(readline, bytes)
d.addCallback(self.assertEqual, b"second line\n")
return d
def test_readlineNone(self):
"""
Calling L{_InputStream.readline} with L{None} as an argument returns
one line from the input stream.
"""
bytes = b"this is one line\nthis is another line"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(None), bytes)
d.addCallback(self.assertEqual, b"this is one line\n")
return d
def test_readlineNegative(self):
"""
Calling L{_InputStream.readline} with a negative integer as an argument
returns one line from the input stream.
"""
bytes = b"input stream line one\nline two"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(-1), bytes)
d.addCallback(self.assertEqual, b"input stream line one\n")
return d
def test_readlines(self):
"""
Calling L{_InputStream.readlines} with no arguments returns a list of
all lines from the input stream.
"""
bytes = b"alice\nbob\ncarol"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(), bytes)
d.addCallback(self.assertEqual, [b"alice\n", b"bob\n", b"carol"])
return d
def test_readlinesSome(self):
"""
Calling L{_InputStream.readlines} with an integer as an argument
returns a list of lines from the input stream with the argument serving
as an approximate bound on the total number of bytes to read.
"""
bytes = b"123\n456\n789\n0"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(5), bytes)
def cbLines(lines):
# Make sure we got enough lines to make 5 bytes. Anything beyond
# that is fine too.
self.assertEqual(lines[:2], [b"123\n", b"456\n"])
d.addCallback(cbLines)
return d
def test_readlinesMoreThan(self):
"""
Calling L{_InputStream.readlines} with an integer which is greater than
the total number of bytes in the input stream returns a list of all
lines from the input.
"""
bytes = b"one potato\ntwo potato\nthree potato"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(100), bytes)
d.addCallback(
self.assertEqual,
[b"one potato\n", b"two potato\n", b"three potato"])
return d
def test_readlinesAfterRead(self):
"""
Calling L{_InputStream.readlines} after a call to L{_InputStream.read}
returns lines starting at the byte after the last byte returned by the
C{read} call.
"""
bytes = b"hello\nworld\nfoo"
def readlines(input):
input.read(7)
return input.readlines()
d = self._renderAndReturnReaderResult(readlines, bytes)
d.addCallback(self.assertEqual, [b"orld\n", b"foo"])
return d
def test_readlinesNone(self):
"""
Calling L{_InputStream.readlines} with L{None} as an argument returns
all lines from the input.
"""
bytes = b"one fish\ntwo fish\n"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(None), bytes)
d.addCallback(self.assertEqual, [b"one fish\n", b"two fish\n"])
return d
def test_readlinesNegative(self):
"""
Calling L{_InputStream.readlines} with a negative integer as an
argument returns a list of all lines from the input.
"""
bytes = b"red fish\nblue fish\n"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(-1), bytes)
d.addCallback(self.assertEqual, [b"red fish\n", b"blue fish\n"])
return d
def test_iterable(self):
"""
Iterating over L{_InputStream} produces lines from the input stream.
"""
bytes = b"green eggs\nand ham\n"
d = self._renderAndReturnReaderResult(lambda input: list(input), bytes)
d.addCallback(self.assertEqual, [b"green eggs\n", b"and ham\n"])
return d
def test_iterableAfterRead(self):
"""
Iterating over L{_InputStream} after calling L{_InputStream.read}
produces lines from the input stream starting from the first byte after
the last byte returned by the C{read} call.
"""
bytes = b"green eggs\nand ham\n"
def iterate(input):
input.read(3)
return list(input)
d = self._renderAndReturnReaderResult(iterate, bytes)
d.addCallback(self.assertEqual, [b"en eggs\n", b"and ham\n"])
return d
class InputStreamStringIOTests(InputStreamTestMixin, TestCase):
"""
Tests for L{_InputStream} when it is wrapped around a
L{StringIO.StringIO}.
This is only available in Python 2.
"""
def getFileType(self):
try:
from StringIO import StringIO
except ImportError:
raise SkipTest("StringIO.StringIO is not available.")
else:
return StringIO
class InputStreamCStringIOTests(InputStreamTestMixin, TestCase):
"""
Tests for L{_InputStream} when it is wrapped around a
L{cStringIO.StringIO}.
This is only available in Python 2.
"""
def getFileType(self):
try:
from cStringIO import StringIO
except ImportError:
raise SkipTest("cStringIO.StringIO is not available.")
else:
return StringIO
class InputStreamBytesIOTests(InputStreamTestMixin, TestCase):
"""
Tests for L{_InputStream} when it is wrapped around an L{io.BytesIO}.
"""
def getFileType(self):
from io import BytesIO
return BytesIO
class InputStreamTemporaryFileTests(InputStreamTestMixin, TestCase):
"""
Tests for L{_InputStream} when it is wrapped around a L{tempfile.TemporaryFile}.
"""
def getFileType(self):
return tempfile.TemporaryFile
class StartResponseTests(WSGITestsMixin, TestCase):
"""
Tests for the I{start_response} parameter passed to the application object
by L{WSGIResource}.
"""
def test_status(self):
"""
The response status passed to the I{start_response} callable is written
as the status of the response to the request.
"""
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('107 Strange message', [])
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
b'HTTP/1.1 107 Strange message'))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_statusMustBeNativeString(self):
"""
The response status passed to the I{start_response} callable MUST be a
native string in Python 2 and Python 3.
"""
status = b"200 OK" if _PY3 else u"200 OK"
def application(environ, startResponse):
startResponse(status, [])
return iter(())
request, result = self.prepareRequest(application)
request.requestReceived()
def checkMessage(error):
if _PY3:
self.assertEqual(
"status must be str, not b'200 OK' (bytes)", str(error))
else:
self.assertEqual(
"status must be str, not u'200 OK' (unicode)", str(error))
return self.assertFailure(result, TypeError).addCallback(checkMessage)
def _headersTest(self, appHeaders, expectedHeaders):
"""
Verify that if the response headers given by C{appHeaders} are passed
to the I{start_response} callable, then the response header lines given
by C{expectedHeaders} plus I{Server} and I{Date} header lines are
included in the response.
"""
# Make the Date header value deterministic
self.patch(http, 'datetimeToString', lambda: 'Tuesday')
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', appHeaders)
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
response = channel.transport.written.getvalue()
headers, rest = response.split(b'\r\n\r\n', 1)
headerLines = headers.split(b'\r\n')[1:]
headerLines.sort()
allExpectedHeaders = expectedHeaders + [
b'Date: Tuesday',
b'Server: ' + version,
b'Transfer-Encoding: chunked']
allExpectedHeaders.sort()
self.assertEqual(headerLines, allExpectedHeaders)
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_headers(self):
"""
The headers passed to the I{start_response} callable are included in
the response as are the required I{Date} and I{Server} headers and the
necessary connection (hop to hop) header I{Transfer-Encoding}.
"""
return self._headersTest(
[('foo', 'bar'), ('baz', 'quux')],
[b'Baz: quux', b'Foo: bar'])
def test_headersMustBeSequence(self):
"""
The headers passed to the I{start_response} callable MUST be a
sequence.
"""
headers = [("key", "value")]
def application(environ, startResponse):
startResponse("200 OK", iter(headers))
return iter(())
request, result = self.prepareRequest(application)
request.requestReceived()
def checkMessage(error):
self.assertRegex(
str(error), "headers must be a list, not "
r"<(list_?|sequence)iterator .+> [(]\1iterator[)]")
return self.assertFailure(result, TypeError).addCallback(checkMessage)
def test_headersShouldBePlainList(self):
"""
According to PEP-3333, the headers passed to the I{start_response}
callable MUST be a plain list:
The response_headers argument ... must be a Python list; i.e.
type(response_headers) is ListType
However, for bug-compatibility, any sequence is accepted. In both
Python 2 and Python 3, only a warning is issued when a sequence other
than a list is encountered.
"""
def application(environ, startResponse):
startResponse("200 OK", (("not", "list"),))
return iter(())
request, result = self.prepareRequest(application)
with warnings.catch_warnings(record=True) as caught:
request.requestReceived()
result = self.successResultOf(result)
self.assertEqual(1, len(caught))
self.assertEqual(RuntimeWarning, caught[0].category)
self.assertEqual(
"headers should be a list, not (('not', 'list'),) (tuple)",
str(caught[0].message))
def test_headersMustEachBeSequence(self):
"""
Each header passed to the I{start_response} callable MUST be a
sequence.
"""
header = ("key", "value")
def application(environ, startResponse):
startResponse("200 OK", [iter(header)])
return iter(())
request, result = self.prepareRequest(application)
request.requestReceived()
def checkMessage(error):
self.assertRegex(
str(error), "header must be a [(]str, str[)] tuple, not "
r"<(tuple_?|sequence)iterator .+> [(]\1iterator[)]")
return self.assertFailure(result, TypeError).addCallback(checkMessage)
def test_headersShouldEachBeTuple(self):
"""
According to PEP-3333, each header passed to the I{start_response}
callable should be a tuple:
The response_headers argument is a list of (header_name,
header_value) tuples
However, for bug-compatibility, any 2 element sequence is also
accepted. In both Python 2 and Python 3, only a warning is issued when
a sequence other than a tuple is encountered.
"""
def application(environ, startResponse):
startResponse("200 OK", [["not", "tuple"]])
return iter(())
request, result = self.prepareRequest(application)
with warnings.catch_warnings(record=True) as caught:
request.requestReceived()
result = self.successResultOf(result)
self.assertEqual(1, len(caught))
self.assertEqual(RuntimeWarning, caught[0].category)
self.assertEqual(
"header should be a (str, str) tuple, not ['not', 'tuple'] (list)",
str(caught[0].message))
def test_headersShouldEachHaveKeyAndValue(self):
"""
Each header passed to the I{start_response} callable MUST hold a key
and a value, and ONLY a key and a value.
"""
def application(environ, startResponse):
startResponse("200 OK", [("too", "many", "cooks")])
return iter(())
request, result = self.prepareRequest(application)
request.requestReceived()
def checkMessage(error):
self.assertEqual(
"header must be a (str, str) tuple, not "
"('too', 'many', 'cooks')", str(error))
return self.assertFailure(result, TypeError).addCallback(checkMessage)
def test_headerKeyMustBeNativeString(self):
"""
Each header key passed to the I{start_response} callable MUST be at
native string in Python 2 and Python 3.
"""
key = b"key" if _PY3 else u"key"
def application(environ, startResponse):
startResponse("200 OK", [(key, "value")])
return iter(())
request, result = self.prepareRequest(application)
request.requestReceived()
def checkMessage(error):
self.assertEqual(
"header must be (str, str) tuple, not (%r, 'value')" % (key,),
str(error))
return self.assertFailure(result, TypeError).addCallback(checkMessage)
def test_headerValueMustBeNativeString(self):
"""
Each header value passed to the I{start_response} callable MUST be at
native string in Python 2 and Python 3.
"""
value = b"value" if _PY3 else u"value"
def application(environ, startResponse):
startResponse("200 OK", [("key", value)])
return iter(())
request, result = self.prepareRequest(application)
request.requestReceived()
def checkMessage(error):
self.assertEqual(
"header must be (str, str) tuple, not ('key', %r)" % (value,),
str(error))
return self.assertFailure(result, TypeError).addCallback(checkMessage)
def test_applicationProvidedContentType(self):
"""
If I{Content-Type} is included in the headers passed to the
I{start_response} callable, one I{Content-Type} header is included in
the response.
"""
return self._headersTest(
[('content-type', 'monkeys are great')],
[b'Content-Type: monkeys are great'])
def test_applicationProvidedServerAndDate(self):
"""
If either I{Server} or I{Date} is included in the headers passed to the
I{start_response} callable, they are disregarded.
"""
return self._headersTest(
[('server', 'foo'), ('Server', 'foo'),
('date', 'bar'), ('dATE', 'bar')],
[])
def test_delayedUntilReturn(self):
"""
Nothing is written in response to a request when the I{start_response}
callable is invoked. If the iterator returned by the application
object produces only empty strings, the response is written after the
last element is produced.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('foo', 'bar'), ('baz', 'quux')])
yield b''
record()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(intermediateValues, [b''])
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_delayedUntilContent(self):
"""
Nothing is written in response to a request when the I{start_response}
callable is invoked. Once a non-empty string has been produced by the
iterator returned by the application object, the response status and
headers are written.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('foo', 'bar')])
yield b''
record()
yield b'foo'
record()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertFalse(intermediateValues[0])
self.assertTrue(intermediateValues[1])
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_content(self):
"""
Content produced by the iterator returned by the application object is
written to the request as it is produced.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('content-length', '6')])
yield b'foo'
record()
yield b'bar'
record()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(
self.getContentFromResponse(intermediateValues[0]),
b'foo')
self.assertEqual(
self.getContentFromResponse(intermediateValues[1]),
b'foobar')
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_multipleStartResponse(self):
"""
If the I{start_response} callable is invoked multiple times before a
data for the response body is produced, the values from the last call
are used.
"""
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('100 Foo', [])
startResponse('200 Bar', [])
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
b'HTTP/1.1 200 Bar\r\n'))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_startResponseWithException(self):
"""
If the I{start_response} callable is invoked with a third positional
argument before the status and headers have been written to the
response, the status and headers become the newly supplied values.
"""
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('100 Foo', [], (Exception, Exception("foo"), None))
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
b'HTTP/1.1 100 Foo\r\n'))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_startResponseWithExceptionTooLate(self):
"""
If the I{start_response} callable is invoked with a third positional
argument after the status and headers have been written to the
response, the supplied I{exc_info} values are re-raised to the
application.
"""
channel = DummyChannel()
class SomeException(Exception):
pass
try:
raise SomeException()
except:
excInfo = exc_info()
reraised = []
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
yield b'foo'
try:
startResponse('500 ERR', [], excInfo)
except:
reraised.append(exc_info())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
b'HTTP/1.1 200 OK\r\n'))
self.assertEqual(reraised[0][0], excInfo[0])
self.assertEqual(reraised[0][1], excInfo[1])
# Show that the tracebacks end with the same stack frames.
tb1 = reraised[0][2].tb_next
tb2 = excInfo[2]
self.assertEqual(
# On Python 2 (str is bytes) we need to move back only one
# stack frame to skip. On Python 3 we need to move two frames.
traceback.extract_tb(tb1)[1 if str is bytes else 2],
traceback.extract_tb(tb2)[0]
)
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_write(self):
"""
I{start_response} returns the I{write} callable which can be used to
write bytes to the response body without buffering.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
write = startResponse('100 Foo', [('content-length', '6')])
write(b'foo')
record()
write(b'bar')
record()
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(
self.getContentFromResponse(intermediateValues[0]),
b'foo')
self.assertEqual(
self.getContentFromResponse(intermediateValues[1]),
b'foobar')
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_writeAcceptsOnlyByteStrings(self):
"""
The C{write} callable returned from C{start_response} only accepts
byte strings.
"""
def application(environ, startResponse):
write = startResponse("200 OK", [])
write(u"bogus")
return iter(())
request, result = self.prepareRequest(application)
request.requestReceived()
def checkMessage(error):
if _PY3:
self.assertEqual(
"Can only write bytes to a transport, not 'bogus'",
str(error))
else:
self.assertEqual(
"Can only write bytes to a transport, not u'bogus'",
str(error))
return self.assertFailure(result, TypeError).addCallback(checkMessage)
class ApplicationTests(WSGITestsMixin, TestCase):
"""
Tests for things which are done to the application object and the iterator
it returns.
"""
def enableThreads(self):
self.reactor = reactor
self.threadpool = ThreadPool()
self.threadpool.start()
self.addCleanup(self.threadpool.stop)
def test_close(self):
"""
If the application object returns an iterator which also has a I{close}
method, that method is called after iteration is complete.
"""
channel = DummyChannel()
class Result:
def __init__(self):
self.open = True
def __iter__(self):
for i in range(3):
if self.open:
yield intToBytes(i)
def close(self):
self.open = False
result = Result()
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('content-length', '3')])
return result
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(
self.getContentFromResponse(
channel.transport.written.getvalue()),
b'012')
self.assertFalse(result.open)
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''])
return d
def test_applicationCalledInThread(self):
"""
The application object is invoked and iterated in a thread which is not
the reactor thread.
"""
self.enableThreads()
invoked = []
def applicationFactory():
def application(environ, startResponse):
def result():
for i in range(3):
invoked.append(getThreadID())
yield intToBytes(i)
invoked.append(getThreadID())
startResponse('200 OK', [('content-length', '3')])
return result()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertNotIn(getThreadID(), invoked)
self.assertEqual(len(set(invoked)), 1)
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
DummyChannel, 'GET', '1.1', [], [''])
return d
def test_writeCalledFromThread(self):
"""
The I{write} callable returned by I{start_response} calls the request's
C{write} method in the reactor thread.
"""
self.enableThreads()
invoked = []
class ThreadVerifier(Request):
def write(self, bytes):
invoked.append(getThreadID())
return Request.write(self, bytes)
def applicationFactory():
def application(environ, startResponse):
write = startResponse('200 OK', [])
write(b'foo')
return iter(())
return application
d, requestFactory = self.requestFactoryFactory(ThreadVerifier)
def cbRendered(ignored):
self.assertEqual(set(invoked), set([getThreadID()]))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return d
def test_iteratedValuesWrittenFromThread(self):
"""
Strings produced by the iterator returned by the application object are
written to the request in the reactor thread.
"""
self.enableThreads()
invoked = []
class ThreadVerifier(Request):
def write(self, bytes):
invoked.append(getThreadID())
return Request.write(self, bytes)
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
yield b'foo'
return application
d, requestFactory = self.requestFactoryFactory(ThreadVerifier)
def cbRendered(ignored):
self.assertEqual(set(invoked), set([getThreadID()]))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return d
def test_statusWrittenFromThread(self):
"""
The response status is set on the request object in the reactor thread.
"""
self.enableThreads()
invoked = []
class ThreadVerifier(Request):
def setResponseCode(self, code, message):
invoked.append(getThreadID())
return Request.setResponseCode(self, code, message)
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
return iter(())
return application
d, requestFactory = self.requestFactoryFactory(ThreadVerifier)
def cbRendered(ignored):
self.assertEqual(set(invoked), set([getThreadID()]))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return d
def test_connectionClosedDuringIteration(self):
"""
If the request connection is lost while the application object is being
iterated, iteration is stopped.
"""
class UnreliableConnection(Request):
"""
This is a request which pretends its connection is lost immediately
after the first write is done to it.
"""
def write(self, bytes):
self.connectionLost(Failure(ConnectionLost("No more connection")))
self.badIter = False
def appIter():
yield b"foo"
self.badIter = True
raise Exception("Should not have gotten here")
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
return appIter()
return application
d, requestFactory = self.requestFactoryFactory(UnreliableConnection)
def cbRendered(ignored):
self.assertFalse(self.badIter, "Should not have resumed iteration")
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return self.assertFailure(d, ConnectionLost)
def _internalServerErrorTest(self, application):
channel = DummyChannel()
def applicationFactory():
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertTrue(
channel.transport.written.getvalue().startswith(
b'HTTP/1.1 500 Internal Server Error'))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_applicationExceptionBeforeStartResponse(self):
"""
If the application raises an exception before calling I{start_response}
then the response status is I{500} and the exception is logged.
"""
def application(environ, startResponse):
raise RuntimeError("This application had some error.")
return self._internalServerErrorTest(application)
def test_applicationExceptionAfterStartResponse(self):
"""
If the application calls I{start_response} but then raises an exception
before any data is written to the response then the response status is
I{500} and the exception is logged.
"""
def application(environ, startResponse):
startResponse('200 OK', [])
raise RuntimeError("This application had some error.")
return self._internalServerErrorTest(application)
def _connectionClosedTest(self, application, responseContent):
channel = DummyChannel()
def applicationFactory():
return application
d, requestFactory = self.requestFactoryFactory()
# Capture the request so we can disconnect it later on.
requests = []
def requestFactoryWrapper(*a, **kw):
requests.append(requestFactory(*a, **kw))
return requests[-1]
def ebRendered(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
response = channel.transport.written.getvalue()
self.assertTrue(response.startswith(b'HTTP/1.1 200 OK'))
# Chunked transfer-encoding makes this a little messy.
self.assertIn(responseContent, response)
d.addErrback(ebRendered)
self.lowLevelRender(
requestFactoryWrapper, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
# By now the connection should be closed.
self.assertTrue(channel.transport.disconnected)
# Give it a little push to go the rest of the way.
requests[0].connectionLost(Failure(ConnectionLost("All gone")))
return d
def test_applicationExceptionAfterWrite(self):
"""
If the application raises an exception after the response status has
already been sent then the connection is closed and the exception is
logged.
"""
responseContent = (
b'Some bytes, triggering the server to start sending the response')
def application(environ, startResponse):
startResponse('200 OK', [])
yield responseContent
raise RuntimeError("This application had some error.")
return self._connectionClosedTest(application, responseContent)
def test_applicationCloseException(self):
"""
If the application returns a closeable iterator and the C{close} method
raises an exception when called then the connection is still closed and
the exception is logged.
"""
responseContent = b'foo'
class Application(object):
def __init__(self, environ, startResponse):
startResponse('200 OK', [])
def __iter__(self):
yield responseContent
def close(self):
raise RuntimeError("This application had some error.")
return self._connectionClosedTest(Application, responseContent)
| mit | 8,876,126,830,731,377,000 | 34.693225 | 87 | 0.594322 | false |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Foursquare/Users/PendingFriendRequests.py | 5 | 3448 | # -*- coding: utf-8 -*-
###############################################################################
#
# PendingFriendRequests
# Retrieves a list of pending friend requests for the authenticated user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class PendingFriendRequests(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the PendingFriendRequests Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(PendingFriendRequests, self).__init__(temboo_session, '/Library/Foursquare/Users/PendingFriendRequests')
def new_input_set(self):
return PendingFriendRequestsInputSet()
def _make_result_set(self, result, path):
return PendingFriendRequestsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return PendingFriendRequestsChoreographyExecution(session, exec_id, path)
class PendingFriendRequestsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the PendingFriendRequests
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_OauthToken(self, value):
"""
Set the value of the OauthToken input for this Choreo. ((required, string) The Foursquare API OAuth token string.)
"""
super(PendingFriendRequestsInputSet, self)._set_input('OauthToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
super(PendingFriendRequestsInputSet, self)._set_input('ResponseFormat', value)
class PendingFriendRequestsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the PendingFriendRequests Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Foursquare. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class PendingFriendRequestsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return PendingFriendRequestsResultSet(response, path)
| apache-2.0 | -8,875,550,126,312,098,000 | 38.632184 | 171 | 0.695186 | false |
MISP/MISP | app/files/scripts/stix2/misp2stix2.py | 1 | 89636 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2017-2018 CIRCL Computer Incident Response Center Luxembourg (smile gie)
# Copyright (C) 2017-2018 Christian Studer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import re
import sys
import uuid
import misp2stix2_mapping
from datetime import datetime
from stix2.base import STIXJSONEncoder
from stix2.exceptions import InvalidValueError, TLPMarkingDefinitionError, AtLeastOnePropertyError
from stix2.properties import DictionaryProperty, ListProperty, StringProperty, TimestampProperty
from stix2.v20.common import MarkingDefinition, TLP_WHITE, TLP_GREEN, TLP_AMBER, TLP_RED
from stix2.v20.observables import SocketExt, WindowsPESection, WindowsRegistryValueType
from stix2.v20.sdo import AttackPattern, CourseOfAction, CustomObject, Identity, Indicator, IntrusionSet, Malware, ObservedData, Report, ThreatActor, Tool, Vulnerability
from stix2.v20.sro import Relationship
from collections import defaultdict
from copy import deepcopy
_MISP_event_tags = ['Threat-Report', 'misp:tool="misp2stix2"']
_time_fields = {'indicator': ('valid_from', 'valid_until'),
'observed-data': ('first_observed', 'last_observed')}
class StixBuilder():
def __init__(self):
self.orgs = []
self.galaxies = []
self.ids = {}
self.custom_objects = {}
def loadEvent(self, args):
pathname = os.path.dirname(args[0])
filename = os.path.join(pathname, args[1])
with open(filename, 'rt', encoding='utf-8') as f:
self.json_event = self._get_event(json.loads(f.read()))
self.filename = filename
def buildEvent(self):
try:
stix_packages = self._get_packages()
outputfile = "{}.out".format(self.filename)
with open(outputfile, 'wt', encoding='utf-8') as f:
f.write(json.dumps(stix_packages, cls=STIXJSONEncoder))
print(json.dumps({'success': 1}))
except Exception as e:
print(json.dumps({'error': e.__str__()}))
@staticmethod
def _get_event(events):
if events.get('response'):
return {'response': [event['Event'] if event.get('Event') else event for event in events['response']]}
return events['Event'] if events.get('Event') else events
def _get_packages(self):
if self.json_event.get('response'):
return [sdo for event in self.json_event['response'] for sdo in self.handler(event)]
return self.handler(self.json_event)
def eventReport(self):
if not self.object_refs and self.links:
self.add_custom(self.links.pop(0))
external_refs = [self.__parse_link(link) for link in self.links]
report_args = {'type': 'report', 'id': self.report_id, 'name': self.misp_event['info'],
'created': datetime.strptime(self.misp_event['date'], '%Y-%m-%d'),
'published': self.get_datetime_from_timestamp(self.misp_event['publish_timestamp']),
'modified': self.get_datetime_from_timestamp(self.misp_event['timestamp']),
'created_by_ref': self.identity_id, 'interoperability': True}
labels = [tag for tag in _MISP_event_tags]
if self.misp_event.get('Tag'):
markings = []
for tag in self.misp_event['Tag']:
name = tag['name']
markings.append(name) if name.startswith('tlp:') else labels.append(name)
if markings:
report_args['object_marking_refs'] = self.handle_tags(markings)
report_args['labels'] = labels
if external_refs:
report_args['external_references'] = external_refs
self.add_all_markings()
self.add_all_relationships()
report_args['object_refs'] = self.object_refs
return Report(**report_args)
@staticmethod
def __parse_link(link):
url = link['value']
source = "url"
if link.get('comment'):
source += " - {}".format(link['comment'])
return {'source_name': source, 'url': url}
def add_all_markings(self):
for marking in self.markings.values():
self.append_object(marking)
def add_all_relationships(self):
for source, targets in self.relationships['defined'].items():
if source.startswith('report'):
continue
source_type,_ = source.split('--')
for target in targets:
target_type,_ = target.split('--')
try:
relation = misp2stix2_mapping.relationshipsSpecifications[source_type][target_type]
except KeyError:
# custom relationship (suggested by iglocska)
relation = "has"
relationship = Relationship(source_ref=source, target_ref=target,
relationship_type=relation, interoperability=True)
self.append_object(relationship, id_mapping=False)
for source_uuid, references in self.relationships['to_define'].items():
for reference in references:
target_uuid, relationship_type = reference
try:
source = '{}--{}'.format(self.ids[source_uuid], source_uuid)
target = '{}--{}'.format(self.ids[target_uuid], target_uuid)
except KeyError:
continue
relationship = Relationship(source_ref=source, target_ref=target, interoperability=True,
relationship_type=relationship_type.strip())
self.append_object(relationship, id_mapping=False)
def __set_identity(self):
org = self.misp_event['Orgc']
org_uuid = org['uuid']
identity_id = 'identity--{}'.format(org_uuid)
self.identity_id = identity_id
if org_uuid not in self.orgs:
identity = Identity(type="identity", id=identity_id, name=org["name"],
identity_class="organization", interoperability=True)
self.SDOs.append(identity)
self.orgs.append(org_uuid)
return 1
return 0
def handler(self, event):
self.misp_event = event
self.report_id = "report--{}".format(self.misp_event['uuid'])
self.SDOs = []
self.object_refs = []
self.links = []
self.markings = {}
self.relationships = {'defined': defaultdict(list),
'to_define': {}}
i = self.__set_identity()
if self.misp_event.get('Attribute'):
for attribute in self.misp_event['Attribute']:
a_type = attribute['type']
to_call = self._get_function_to_call(a_type)
getattr(self, to_call)(attribute)
if self.misp_event.get('Object'):
self.objects_to_parse = defaultdict(dict)
for misp_object in self.misp_event['Object']:
name = misp_object['name']
if name == 'original-imported-file':
continue
to_ids = self.fetch_ids_flag(misp_object['Attribute'])
try:
getattr(self, misp2stix2_mapping.objectsMapping[name]['to_call'])(misp_object, to_ids)
except KeyError:
self.add_object_custom(misp_object, to_ids)
if misp_object.get('ObjectReference'):
self.relationships['to_define'][misp_object['uuid']] = tuple((r['referenced_uuid'], r['relationship_type']) for r in misp_object['ObjectReference'])
if self.objects_to_parse:
self.resolve_objects2parse()
if self.misp_event.get('Galaxy'):
for galaxy in self.misp_event['Galaxy']:
self.parse_galaxy(galaxy, self.report_id)
report = self.eventReport()
self.SDOs.insert(i, report)
return self.SDOs
def get_object_by_uuid(self, uuid):
for _object in self.misp_event['Object']:
if _object.get('uuid') and _object['uuid'] == uuid:
return _object
raise Exception('Object with uuid {} does not exist in this event.'.format(uuid))
def handle_person(self, attribute):
if attribute['category'] == "Person":
self.add_identity(attribute)
else:
self.add_custom(attribute)
def handle_usual_type(self, attribute):
try:
if attribute['to_ids']:
self.add_indicator(attribute)
else:
self.add_observed_data(attribute)
except (AtLeastOnePropertyError, InvalidValueError):
self.add_custom(attribute)
def handle_usual_object_name(self, misp_object, to_ids):
name = misp_object['name']
if name == 'file' and misp_object.get('ObjectReference'):
for reference in misp_object['ObjectReference']:
if reference['relationship_type'] in ('includes', 'included-in') and reference['Object']['name'] == "pe":
self.objects_to_parse[name][misp_object['uuid']] = to_ids, misp_object
return
try:
if to_ids or name == "stix2-pattern":
self.add_object_indicator(misp_object)
else:
self.add_object_observable(misp_object)
except Exception:
self.add_object_custom(misp_object, to_ids)
def handle_link(self, attribute):
self.links.append(attribute)
def populate_objects_to_parse(self, misp_object, to_ids):
self.objects_to_parse[misp_object['name']][misp_object['uuid']] = to_ids, misp_object
def resolve_objects2parse(self):
for misp_object in self.objects_to_parse['file'].values():
to_ids_file, file_object = misp_object
file_id = "file--{}".format(file_object['uuid'])
to_ids_list = [to_ids_file]
for reference in file_object['ObjectReference']:
if reference['relationship_type'] in ("includes", "included-in") and reference['Object']['name'] == "pe":
pe_uuid = reference['referenced_uuid']
break
to_ids_pe, pe_object = self.objects_to_parse['pe'][pe_uuid]
to_ids_list.append(to_ids_pe)
sections = []
for reference in pe_object['ObjectReference']:
if reference['Object']['name'] == "pe-section" and reference['referenced_uuid'] in self.objects_to_parse['pe-section']:
to_ids_section, section_object = self.objects_to_parse['pe-section'][reference['referenced_uuid']]
to_ids_list.append(to_ids_section)
sections.append(section_object)
if True in to_ids_list:
patterns = self.resolve_file_pattern(file_object['Attribute'], file_id)
patterns.extend(self.parse_pe_extensions_pattern(pe_object, sections))
self.add_object_indicator(file_object, pattern_arg=f"[{' AND '.join(patterns)}]")
else:
observable = self.resolve_file_observable(file_object['Attribute'], file_id)
key = '0' if len(observable) == 1 else self._fetch_file_observable(observable)
pe_type = self._get_pe_type_from_filename(observable[key])
observable[key]['extensions'] = self.parse_pe_extensions_observable(pe_object, sections, pe_type)
self.add_object_observable(file_object, observable_arg=observable)
@staticmethod
def _create_pe_type_test(observable, extension):
return [
('name' in observable and observable['name'].endswith(f'.{extension}')),
('mime_type' in observable and re.compile(".* .+{0}.+ .*|.* {0} .*".format(extension)).match(observable['mime_type'].lower()))]
def _get_pe_type_from_filename(self, observable):
for extension in ('exe', 'dll'):
if any(self._create_pe_type_test(observable, extension)):
return extension
return 'sys'
@staticmethod
def _fetch_file_observable(observable_objects):
for key, observable in observable_objects.items():
if observable['type'] == 'file':
return key
return '0'
def parse_pe_extensions_observable(self, pe_object, sections, pe_type):
extension = defaultdict(list)
extension['pe_type'] = pe_type
for attribute in pe_object['Attribute']:
try:
extension[misp2stix2_mapping.peMapping[attribute['object_relation']]] = attribute['value']
except KeyError:
extension["x_misp_{}_{}".format(attribute['type'], attribute['object_relation'].replace('-', '_'))] = attribute['value']
for section in sections:
d_section = defaultdict(dict)
for attribute in section['Attribute']:
relation = attribute['object_relation']
if relation in misp2stix2_mapping.misp_hash_types:
d_section['hashes'][relation] = attribute['value']
else:
try:
d_section[misp2stix2_mapping.peSectionMapping[relation]] = attribute['value']
except KeyError:
continue
if 'name' not in d_section:
d_section['name'] = 'Section {}'.format(sections.index(section))
extension['sections'].append(WindowsPESection(**d_section))
if len(sections) != int(extension['number_of_sections']):
extension['number_of_sections'] = str(len(sections))
return {"windows-pebinary-ext": extension}
def parse_pe_extensions_pattern(self, pe_object, sections):
pattern = []
mapping = misp2stix2_mapping.objectsMapping['file']['pattern']
pe_mapping = "extensions.'windows-pebinary-ext'"
for attribute in pe_object['Attribute']:
try:
stix_type = f"{pe_mapping}.{misp2stix2_mapping.peMapping[attribute['object_relation']]}"
except KeyError:
stix_type = f"{pe_mapping}.x_misp_{attribute['type']}_{attribute['object_relation'].replace('-', '_')}"
pattern.append(mapping.format(stix_type, attribute['value']))
n_section = 0
for section in sections:
section_mapping = f"{pe_mapping}.sections[{str(n_section)}]"
for attribute in section['Attribute']:
relation = attribute['object_relation']
if relation in misp2stix2_mapping.misp_hash_types:
stix_type = "{}.hashes.'{}'".format(section_mapping, relation)
pattern.append(mapping.format(stix_type, attribute['value']))
else:
try:
stix_type = "{}.{}".format(section_mapping, misp2stix2_mapping.peSectionMapping[relation])
pattern.append(mapping.format(stix_type, attribute['value']))
except KeyError:
continue
n_section += 1
return pattern
def parse_galaxies(self, galaxies, source_id):
for galaxy in galaxies:
self.parse_galaxy(galaxy, source_id)
def parse_galaxy(self, galaxy, source_id):
galaxy_type = galaxy.get('type')
galaxy_uuid = galaxy['GalaxyCluster'][0]['collection_uuid']
try:
stix_type, to_call = misp2stix2_mapping.galaxies_mapping[galaxy_type]
except Exception:
return
if galaxy_uuid not in self.galaxies:
getattr(self, to_call)(galaxy)
self.galaxies.append(galaxy_uuid)
self.relationships['defined'][source_id].append("{}--{}".format(stix_type, galaxy_uuid))
def generate_galaxy_args(self, galaxy, b_killchain, b_alias, sdo_type):
cluster = galaxy['GalaxyCluster'][0]
try:
cluster_uuid = cluster['collection_uuid']
except KeyError:
cluster_uuid = cluster['uuid']
sdo_id = "{}--{}".format(sdo_type, cluster_uuid)
description = "{} | {}".format(galaxy['description'], cluster['description'])
labels = ['misp:name=\"{}\"'.format(galaxy['name'])]
sdo_args = {
'id': sdo_id,
'type': sdo_type,
'created': datetime.strptime(self.misp_event['date'], '%Y-%m-%d'),
'modified': self.get_datetime_from_timestamp(self.misp_event['timestamp']),
'name': cluster['value'],
'description': description,
'interoperability': True
}
if b_killchain:
killchain = [{'kill_chain_name': 'misp-category',
'phase_name': galaxy['type']}]
sdo_args['kill_chain_phases'] = killchain
if cluster['tag_name']:
labels.append(cluster.get('tag_name'))
meta = cluster.get('meta')
if 'synonyms' in meta and b_alias:
aliases = []
for a in meta['synonyms']:
aliases.append(a)
sdo_args['aliases'] = aliases
sdo_args['labels'] = labels
return sdo_args
def add_attack_pattern(self, galaxy):
a_p_args = self.generate_galaxy_args(galaxy, True, False, 'attack-pattern')
a_p_args['created_by_ref'] = self.identity_id
attack_pattern = AttackPattern(**a_p_args)
self.append_object(attack_pattern)
def add_attack_pattern_object(self, misp_object, to_ids):
attack_pattern_args = {'id': f'attack-pattern--{misp_object["uuid"]}', 'type': 'attack-pattern',
'created_by_ref': self.identity_id, 'interoperability': True}
attack_pattern_args.update(self.parse_attack_pattern_fields(misp_object['Attribute']))
attack_pattern_args['labels'] = self.create_object_labels(misp_object['name'], misp_object['meta-category'], to_ids)
attack_pattern = AttackPattern(**attack_pattern_args)
self.append_object(attack_pattern)
def add_course_of_action(self, misp_object):
coa_args= self.generate_galaxy_args(misp_object, False, False, 'course-of-action')
self.add_coa_stix_object(coa_args)
def add_course_of_action_from_object(self, misp_object, to_ids):
coa_id = 'course-of-action--{}'.format(misp_object['uuid'])
coa_args = {'id': coa_id, 'type': 'course-of-action', 'created_by_ref': self.identity_id}
coa_args['labels'] = self.create_object_labels(misp_object['name'], misp_object['meta-category'], to_ids)
for attribute in misp_object['Attribute']:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], coa_id)
relation = attribute['object_relation']
if relation in ('name', 'description'):
coa_args[relation] = attribute['value']
else:
coa_args[f'x_misp_{attribute["type"]}_{relation}'] = attribute['value']
if not 'name' in coa_args:
return
self.add_coa_stix_object(coa_args)
def add_coa_stix_object(self, coa_args):
coa_args['created_by_ref'] = self.identity_id
course_of_action = CourseOfAction(**coa_args, allow_custom=True)
self.append_object(course_of_action)
def add_custom(self, attribute):
attribute_type = attribute['type'].replace('|', '-').replace(' ', '-').lower()
custom_object_id = "x-misp-object-{}--{}".format(attribute_type, attribute['uuid'])
custom_object_type = "x-misp-object-{}".format(attribute_type)
labels, markings = self.create_labels(attribute)
stix_labels = ListProperty(StringProperty)
stix_labels.clean(labels)
stix_markings = ListProperty(StringProperty)
timestamp = self.get_datetime_from_timestamp(attribute['timestamp'])
custom_object_args = {'id': custom_object_id, 'x_misp_category': attribute['category'],
'created': timestamp, 'modified': timestamp, 'labels': labels,
'x_misp_value': attribute['value'], 'created_by_ref': self.identity_id}
if attribute.get('comment'):
custom_object_args['x_misp_comment'] = attribute['comment']
if markings:
markings = self.handle_tags(markings)
custom_object_args['object_marking_refs'] = markings
stix_markings.clean(markings)
if custom_object_type not in self.custom_objects:
@CustomObject(custom_object_type, [
('id', StringProperty(required=True)),
('labels', ListProperty(stix_labels, required=True)),
('x_misp_value', StringProperty(required=True)),
('created', TimestampProperty(required=True, precision='millisecond')),
('modified', TimestampProperty(required=True, precision='millisecond')),
('created_by_ref', StringProperty(required=True)),
('object_marking_refs', ListProperty(stix_markings)),
('x_misp_comment', StringProperty()),
('x_misp_category', StringProperty())
])
class Custom(object):
def __init__(self, **kwargs):
return
self.custom_objects[custom_object_type] = Custom
else:
Custom = self.custom_objects[custom_object_type]
custom_object = Custom(**custom_object_args)
self.append_object(custom_object)
def add_identity(self, attribute):
identity_id = "identity--{}".format(attribute['uuid'])
name = attribute['value']
labels, markings = self.create_labels(attribute)
identity_args = {'id': identity_id, 'type': 'identity', 'name': name, 'labels': labels,
'identity_class': 'individual', 'created_by_ref': self.identity_id,
'interoperability': True}
if attribute.get('comment'):
identity_args['description'] = attribute['comment']
if markings:
identity_args['object_marking_refs'] = self.handle_tags(markings)
identity = Identity(**identity_args)
self.append_object(identity)
def add_indicator(self, attribute):
indicator_id = "indicator--{}".format(attribute['uuid'])
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], indicator_id)
category = attribute['category']
killchain = self.create_killchain(category)
labels, markings = self.create_labels(attribute)
pattern = f'[{self.define_pattern(attribute)}]'
timestamp = self.get_datetime_from_timestamp(attribute['timestamp'])
indicator_args = {'id': indicator_id, 'type': 'indicator', 'labels': labels,
'kill_chain_phases': killchain, 'created_by_ref': self.identity_id,
'pattern': pattern, 'interoperability': True}
indicator_args.update(self.handle_time_fields(attribute, timestamp, 'indicator'))
if attribute.get('comment'):
indicator_args['description'] = attribute['comment']
if markings:
indicator_args['object_marking_refs'] = self.handle_tags(markings)
indicator = Indicator(**indicator_args)
self.append_object(indicator)
def add_intrusion_set(self, galaxy):
i_s_args = self.generate_galaxy_args(galaxy, False, True, 'intrusion-set')
i_s_args['created_by_ref'] = self.identity_id
intrusion_set = IntrusionSet(**i_s_args)
self.append_object(intrusion_set)
def add_malware(self, galaxy):
malware_args= self.generate_galaxy_args(galaxy, True, False, 'malware')
malware_args['created_by_ref'] = self.identity_id
malware = Malware(**malware_args)
self.append_object(malware)
def add_observed_data(self, attribute):
observed_data_id = "observed-data--{}".format(attribute['uuid'])
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], observed_data_id)
timestamp = self.get_datetime_from_timestamp(attribute['timestamp'])
labels, markings = self.create_labels(attribute)
observable = self.define_observable(attribute)
observed_data_args = {'id': observed_data_id, 'type': 'observed-data', 'number_observed': 1,
'objects': observable, 'created_by_ref': self.identity_id,
'labels': labels, 'interoperability': True}
observed_data_args.update(self.handle_time_fields(attribute, timestamp, 'observed-data'))
if markings:
observed_data_args['object_marking_refs'] = self.handle_tags(markings)
observed_data = ObservedData(**observed_data_args)
self.append_object(observed_data)
def add_threat_actor(self, galaxy):
t_a_args = self.generate_galaxy_args(galaxy, False, True, 'threat-actor')
t_a_args['created_by_ref'] = self.identity_id
threat_actor = ThreatActor(**t_a_args)
self.append_object(threat_actor)
def add_tool(self, galaxy):
tool_args = self.generate_galaxy_args(galaxy, True, False, 'tool')
tool_args['created_by_ref'] = self.identity_id
tool = Tool(**tool_args)
self.append_object(tool)
def add_vulnerability(self, attribute):
vulnerability_id = "vulnerability--{}".format(attribute['uuid'])
name = attribute['value']
vulnerability_data = [self._get_vulnerability_data(name)]
labels, markings = self.create_labels(attribute)
vulnerability_args = {'id': vulnerability_id, 'type': 'vulnerability',
'name': name, 'external_references': vulnerability_data,
'created_by_ref': self.identity_id, 'labels': labels,
'interoperability': True}
if markings:
vulnerability_args['object_marking_refs'] = self.handle_tags(markings)
vulnerability = Vulnerability(**vulnerability_args)
self.append_object(vulnerability)
def add_vulnerability_from_galaxy(self, attribute):
vulnerability_id = "vulnerability--{}".format(attribute['uuid'])
cluster = attribute['GalaxyCluster'][0]
name = cluster['value']
vulnerability_names = [name]
if cluster.get('meta') and cluster['meta'].get('aliases'):
vulnerability_names.extend(cluster['meta']['aliases'])
vulnerability_data = [self._get_vulnerability_data(name) for name in vulnerability_names]
labels = ['misp:type=\"{}\"'.format(attribute.get('type'))]
if cluster['tag_name']:
labels.append(cluster['tag_name'])
description = "{} | {}".format(attribute.get('description'), cluster.get('description'))
vulnerability_args = {'id': vulnerability_id, 'type': 'vulnerability',
'name': name, 'external_references': vulnerability_data,
'created_by_ref': self.identity_id, 'labels': labels,
'description': description, 'interoperability': True}
vulnerability = Vulnerability(**vulnerability_args)
self.append_object(vulnerability)
def add_object_custom(self, misp_object, to_ids):
name = misp_object['name'].replace('_', '-')
custom_object_id = 'x-misp-object-{}--{}'.format(name, misp_object['uuid'])
custom_object_type = 'x-misp-object-{}'.format(name)
category = misp_object.get('meta-category')
labels = [
f'misp:type="{name}"',
f'misp:category="{category}"',
f'misp:to_ids="{to_ids}"',
'from_object'
]
stix_labels = ListProperty(StringProperty)
stix_labels.clean(labels)
values = self.fetch_custom_values(misp_object['Attribute'], custom_object_id)
timestamp = self.get_datetime_from_timestamp(misp_object['timestamp'])
custom_object_args = {'id': custom_object_id, 'x_misp_values': values,
'created': timestamp, 'modified': timestamp, 'labels': labels,
'x_misp_category': category, 'created_by_ref': self.identity_id}
if hasattr(misp_object, 'comment') and misp_object.get('comment'):
custom_object_args['x_misp_comment'] = misp_object['comment']
if custom_object_type not in self.custom_objects:
@CustomObject(custom_object_type, [
('id', StringProperty(required=True)),
('labels', ListProperty(stix_labels, required=True)),
('x_misp_values', DictionaryProperty(required=True)),
('created', TimestampProperty(required=True, precision='millisecond')),
('modified', TimestampProperty(required=True, precision='millisecond')),
('created_by_ref', StringProperty(required=True)),
('x_misp_comment', StringProperty()),
('x_misp_category', StringProperty())
])
class Custom(object):
def __init__(self, **kwargs):
return
self.custom_objects[custom_object_type] = Custom
else:
Custom = self.custom_objects[custom_object_type]
custom_object = Custom(**custom_object_args)
self.append_object(custom_object)
def add_object_indicator(self, misp_object, pattern_arg=None):
indicator_id = 'indicator--{}'.format(misp_object['uuid'])
if pattern_arg:
name = 'WindowsPEBinaryFile'
pattern = pattern_arg
else:
name = misp_object['name']
pattern = f"[{' AND '.join(getattr(self, misp2stix2_mapping.objects_mapping[name]['pattern'])(misp_object['Attribute'], indicator_id))}]"
category = misp_object.get('meta-category')
killchain = self.create_killchain(category)
labels = self.create_object_labels(name, category, True)
timestamp = self.get_datetime_from_timestamp(misp_object['timestamp'])
indicator_args = {'id': indicator_id, 'type': 'indicator',
'labels': labels, 'pattern': pattern,
'description': misp_object['description'], 'allow_custom': True,
'kill_chain_phases': killchain, 'interoperability': True,
'created_by_ref': self.identity_id}
indicator_args.update(self.handle_time_fields(misp_object, timestamp, 'indicator'))
indicator = Indicator(**indicator_args)
self.append_object(indicator)
def add_object_observable(self, misp_object, observable_arg=None):
observed_data_id = 'observed-data--{}'.format(misp_object['uuid'])
if observable_arg:
name = 'WindowsPEBinaryFile'
observable_objects = observable_arg
else:
name = misp_object['name']
observable_objects = getattr(self, misp2stix2_mapping.objects_mapping[name]['observable'])(misp_object['Attribute'], observed_data_id)
category = misp_object.get('meta-category')
labels = self.create_object_labels(name, category, False)
timestamp = self.get_datetime_from_timestamp(misp_object['timestamp'])
observed_data_args = {'id': observed_data_id, 'type': 'observed-data', 'labels': labels,
'number_observed': 1, 'objects': observable_objects, 'allow_custom': True,
'created_by_ref': self.identity_id, 'interoperability': True}
observed_data_args.update(self.handle_time_fields(misp_object, timestamp, 'observed-data'))
try:
observed_data = ObservedData(**observed_data_args)
except InvalidValueError:
observed_data = self.fix_enumeration_issues(name, observed_data_args)
self.append_object(observed_data)
@staticmethod
def fix_enumeration_issues(name, args):
if name == 'network-socket':
socket_args = deepcopy(args)
n = None
for index, observable_object in socket_args['objects'].items():
if observable_object['type'] == 'network-traffic':
n = index
break
if n is not None:
extension = socket_args['objects'][n]['extensions']['socket-ext']
feature = 'address_family'
if feature not in extension:
extension[feature] = 'AF_UNSPEC'
elif extension[feature] not in SocketExt._properties[feature].allowed:
extension[f'x_misp_text_{feature}'] = extension[feature]
extension[feature] = 'AF_UNSPEC'
feature = 'protocol_family'
if feature in extension and extension[feature] not in SocketExt._properties[feature].allowed:
extension['x_misp_text_domain_family'] = extension.pop(feature)
return ObservedData(**socket_args)
# If there is still an issue at this point, well at least we tried to fix it
return ObservedData(**args)
def add_object_vulnerability(self, misp_object, to_ids):
vulnerability_id = 'vulnerability--{}'.format(misp_object['uuid'])
vulnerability_args = {'id': vulnerability_id, 'type': 'vulnerability',
'created_by_ref': self.identity_id, 'interoperability': True}
vulnerability_args.update(self.parse_vulnerability_fields(misp_object['Attribute']))
vulnerability_args['labels'] = self.create_object_labels(misp_object['name'], misp_object['meta-category'], to_ids)
vulnerability = Vulnerability(**vulnerability_args)
self.append_object(vulnerability)
def append_object(self, stix_object, id_mapping=True):
self.SDOs.append(stix_object)
self.object_refs.append(stix_object.id)
if id_mapping:
object_type, uuid = stix_object.id.split('--')
self.ids[uuid] = object_type
@staticmethod
def create_killchain(category):
return [{'kill_chain_name': 'misp-category', 'phase_name': category}]
@staticmethod
def create_labels(attribute):
labels = [f'misp:{feature}="{attribute[feature]}"' for feature in ('type', 'category', 'to_ids')]
markings = []
if attribute.get('Tag'):
for tag in attribute['Tag']:
name = tag['name']
markings.append(name) if name.startswith('tlp:') else labels.append(name)
return labels, markings
@staticmethod
def create_object_labels(name, category, to_ids):
return [
f'misp:type="{name}"',
f'misp:category="{category}"',
f'misp:to_ids="{to_ids}"',
'from_object'
]
def create_marking(self, tag):
if tag in misp2stix2_mapping.tlp_markings:
marking_definition = globals()[misp2stix2_mapping.tlp_markings[tag]]
self.markings[tag] = marking_definition
return marking_definition.id
marking_id = 'marking-definition--%s' % uuid.uuid4()
definition_type, definition = tag.split(':')
marking_definition = {'type': 'marking-definition', 'id': marking_id, 'definition_type': definition_type,
'definition': {definition_type: definition}}
try:
self.markings[tag] = MarkingDefinition(**marking_definition)
except (TLPMarkingDefinitionError, ValueError):
return
return marking_id
@staticmethod
def _parse_tag(namespace, predicate):
if '=' not in predicate:
return "{} = {}".format(namespace, predicate)
predicate, value = predicate.split('=')
return "({}) {} = {}".format(namespace, predicate, value.strip('"'))
def define_observable(self, attribute):
attribute_type = attribute['type']
attribute_value = attribute['value']
args = self._get_attribute_arguments(attribute)
observable = getattr(self, misp2stix2_mapping.mispTypesMapping[attribute_type]['observable'])(*args)
if attribute_type == 'port':
observable['0']['protocols'].append(misp2stix2_mapping.defineProtocols[attribute_value] if attribute_value in misp2stix2_mapping.defineProtocols else "tcp")
return observable
def define_pattern(self, attribute):
attribute_value = attribute['value']
if isinstance(attribute_value, str):
attribute['value'] = attribute_value.replace("'", '##APOSTROPHE##').replace('"', '##QUOTE##')
args = self._get_attribute_arguments(attribute)
return getattr(self, misp2stix2_mapping.mispTypesMapping[attribute['type']]['pattern'])(*args)
def fetch_custom_values(self, attributes, object_id):
values = defaultdict(list)
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
attribute_type = '{}_{}'.format(attribute['type'], attribute['object_relation'].replace('.', '_DOT_'))
values[attribute_type].append(attribute['value'])
return {attribute_type: value[0] if len(value) == 1 else value for attribute_type, value in values.items()}
@staticmethod
def fetch_ids_flag(attributes):
for attribute in attributes:
if attribute['to_ids']:
return True
return False
def handle_tags(self, tags):
marking_ids = []
for tag in tags:
marking_id = self.markings[tag]['id'] if tag in self.markings else self.create_marking(tag)
if marking_id:
marking_ids.append(marking_id)
return marking_ids
################################################################################
## MISP ATTRIBUTES PARSING FUNCTIONS. ##
################################################################################
@staticmethod
def _get_artifact_observable(data):
return {'type': 'artifact', 'payload_bin': data}
@staticmethod
def _get_artifact_pattern(data):
return f"file:content_ref.payload_bin = '{data}'"
def _get_as_observable(self, _, attribute_value):
stix_type = 'number'
return {'0': {'type': 'autonomous-system', stix_type: self._parse_as_attribute(stix_type, attribute_value)}}
def _get_as_pattern(self, _, attribute_value):
stix_type = 'number'
return f"autonomous-system:{stix_type} = '{self._parse_as_attribute(stix_type, attribute_value)}'"
def _get_attachment_observable(self, _, attribute_value, data=None):
observable = self._get_file_observable(_, attribute_value)
if data is not None:
observable['0']['content_ref'] = '0'
return {'0': self._get_artifact_observable(data), '1': observable['0']}
return observable
def _get_attachment_pattern(self, _, attribute_value, data=None):
pattern = self._get_file_pattern(_, attribute_value)
if data is not None:
pattern = f'{pattern} AND {self._get_artifact_pattern(data)}'
return pattern
def _get_domain_ip_observable(self, _, attribute_value):
domain_value, ip_value = attribute_value.split('|')
address_type = self._define_address_type(ip_value)
observable = self._get_domain_observable(None, domain_value)
observable['0']['resolves_to_refs'] = ['1']
observable['1'] = {'type': address_type, 'value': ip_value}
return observable
def _get_domain_ip_pattern(self, _, attribute_value):
domain_value, ip_value = attribute_value.split('|')
return f"{self._get_domain_pattern(None, domain_value)} AND domain-name:resolves_to_refs[*].value = '{ip_value}'"
@staticmethod
def _get_domain_observable(_, attribute_value):
return {'0': {'type': 'domain-name', 'value': attribute_value}}
@staticmethod
def _get_domain_pattern(_, attribute_value):
return f"domain-name:value = '{attribute_value}'"
@staticmethod
def _get_email_address_observable(attribute_type, attribute_value):
observable = {
'0': {
'type': 'email-addr',
'value': attribute_value
},
'1': {
'type': 'email-message',
'is_multipart': 'false'
}
}
if 'src' in attribute_type:
observable['1']['from_ref'] = '0'
else:
observable['1']['to_refs'] = ['0']
return observable
@staticmethod
def _get_email_address_pattern(attribute_type, attribute_value):
email_type = 'from_ref' if 'src' in attribute_type else 'to_refs[*]'
return f"email-message:{email_type}.value = '{attribute_value}'"
def _get_email_attachment_observable(self, _, attribute_value):
observable = self._get_file_observable(None, attribute_value)
observable[1] = {
'type': 'email-message',
'is_multipart': 'false',
'body_multipart': [{
'content_disposition': f"attachment; filename='{attribute_value}'",
'body_raw_ref': '0'
}]
}
return observable
@staticmethod
def _get_email_attachment_pattern(_, attribute_value):
return f"email-message:body_multipart[*].body_raw_ref.name = '{attribute_value}'"
@staticmethod
def _get_email_message_observable(attribute_type, attribute_value):
email_type = attribute_type.split('-')[1]
observable = {
'0': {
'type': 'email-message',
email_type: attribute_value,
'is_multipart': 'false'
}
}
return observable
@staticmethod
def _get_email_message_pattern(attribute_type, attribute_value):
email_type = attribute_type.split('-')[1]
return f"email-message:{email_type} = '{attribute_value}'"
@staticmethod
def _get_file_observable(_, attribute_value):
return {'0': {'type': 'file', 'name': attribute_value}}
@staticmethod
def _get_file_pattern(_, attribute_value):
return f"file:name = '{attribute_value}'"
def _get_file_hash_observable(self, attribute_type, attribute_value):
filename, hash_type, hash_value = self._split_composite_attribute(attribute_type, attribute_value, 1)
return {'0': {'type': 'file', 'name': filename, 'hashes': {hash_type: hash_value}}}
def _get_file_hash_pattern(self, attribute_type, attribute_value):
filename, hash_type, hash_value = self._split_composite_attribute(attribute_type, attribute_value, 1)
return f'{self._get_file_pattern(None, filename)} AND {self._get_hash_pattern(hash_type, hash_value)}'
@staticmethod
def _get_hash_observable(attribute_type, attribute_value):
return {'0': {'type': 'file', 'hashes': {attribute_type: attribute_value}}}
@staticmethod
def _get_hash_pattern(attribute_type, attribute_value):
return f"file:hashes.'{attribute_type}' = '{attribute_value}'"
def _get_hostname_port_observable(self, _, attribute_value):
hostname_value, port_value = attribute_value.split('|')
observable = self._get_domain_observable(None, hostname_value)
observable['1'] = self._get_port_observable(None, port_value)[0]
return observable
def _get_hostname_port_pattern(self, _, attribute_value):
hostname_value, port_value = attribute_value.split('|')
return f'{self._get_domain_pattern(None, hostname_value)} AND {self._get_port_pattern(None, port_value)}'
def _get_ip_observable(self, attribute_type, attribute_value):
address_type = self._define_address_type(attribute_value)
observable = {
'0': {
'type': address_type,
'value': attribute_value
},
'1': {
'type': 'network-traffic',
f'{attribute_type.split("-")[1]}_ref': '0',
'protocols': [address_type.split('-')[0]]
}
}
return observable
def _get_ip_pattern(self, attribute_type, attribute_value):
ip_type = attribute_type.split('-')[1]
address_type = self._define_address_type(attribute_value)
return f"network-traffic:{ip_type}_ref.type = '{address_type}' AND network-traffic:{ip_type}_ref.value = '{attribute_value}'"
def _get_ip_port_observable(self, attribute_type, attribute_value):
ip_value, ip_type, port_value = self._split_composite_attribute(attribute_type, attribute_value, 0)
observable = self._get_ip_observable(ip_type, ip_value)
observable['1'][f'{ip_type.split("-")[1]}_port'] = port_value
return observable
def _get_ip_port_pattern(self, attribute_type, attribute_value):
ip_value, ip_type, port_value = self._split_composite_attribute(attribute_type, attribute_value, 0)
port_type = f'{ip_type.split("-")[1]}_port'
return f"network-traffic:{port_type} = '{port_value}' AND {self._get_ip_pattern(ip_type, ip_value)}"
@staticmethod
def _get_mac_address_observable(_, attribute_value):
return {'0': {'type': 'mac-addr', 'value': attribute_value.lower()}}
@staticmethod
def _get_mac_address_pattern(_, attribute_value):
return f"mac-addr:value = '{attribute_value.lower()}'"
def _get_malware_sample_observable(self, _, attribute_value, data=None):
observable = self._get_file_hash_observable('filename|md5', attribute_value)
if data is not None:
observable['0']['content_ref'] = '0'
return {'0': self._get_artifact_observable(data), '1': observable['0']}
return observable
def _get_malware_sample_pattern(self, _, attribute_value, data=None):
pattern = self._get_file_hash_pattern('filename|md5', attribute_value)
if data is not None:
pattern = f'{pattern} AND {self._get_artifact_pattern(data)}'
return pattern
@staticmethod
def _get_mutex_observable(_, attribute_value):
return {'0': {'type': 'mutex', 'name': attribute_value}}
@staticmethod
def _get_mutex_pattern(_, attribute_value):
return f"mutex:name = '{attribute_value}'"
# Usually broken and replaced by a custom object, because the network-traffic
# object requires the protocols fields, and either a src or dst ref
@staticmethod
def _get_port_observable(_, attribute_value):
return {'0': {'type': 'network-traffic', 'dst_port': attribute_value, 'protocols': []}}
@staticmethod
def _get_port_pattern(_, attribute_value):
return f"network-traffic:dst_port = '{attribute_value}'"
@staticmethod
def _get_regkey_observable(_, attribute_value):
return {'0': {'type': 'windows-registry-key', 'key': attribute_value.strip()}}
@staticmethod
def _get_regkey_pattern(_, attribute_value):
if '\\\\' not in attribute_value:
attribute_value = attribute_value.replace('\\', '\\\\')
return f"windows-registry-key:key = '{attribute_value.strip()}'"
def _get_regkey_value_observable(self, _, attribute_value):
regkey, value = attribute_value.split('|')
observable = self._get_regkey_observable(None, regkey)
observable['0']['values'] = WindowsRegistryValueType(**{'data': value.strip(), 'name': ''})
return observable
def _get_regkey_value_pattern(self, _, attribute_value):
if '\\\\' not in attribute_value:
attribute_value = attribute_value.replace('\\', '\\\\')
regkey, value = attribute_value.split('|')
return f"{self._get_regkey_pattern(None, regkey)} AND windows-registry-key:values.data = '{value.strip()}'"
@staticmethod
def _get_reply_to_observable(_, attribute_value):
observable = {
'0': {
'type': 'email-message',
'is_multipart': 'false',
'additional_header_fields': {
'Reply-To': attribute_value
}
}
}
return observable
@staticmethod
def _get_reply_to_pattern(_, attribute_value):
return f"email-message:additional_header_fields.reply_to = '{attribute_value}'"
@staticmethod
def _get_url_observable(_, attribute_value):
return {'0': {'type': 'url', 'value': attribute_value}}
@staticmethod
def _get_url_pattern(_, attribute_value):
return f"url:value = '{attribute_value}'"
@staticmethod
def _get_vulnerability_data(vulnerability_name):
return {'source_name': 'cve', 'external_id': vulnerability_name}
@staticmethod
def _get_x509_observable(attribute_type, attribute_value):
hash_type = attribute_type.split('-')[-1]
return {'0': {'type': 'x509-certificate', 'hashes': {hash_type: attribute_value}}}
@staticmethod
def _get_x509_pattern(attribute_type, attribute_value):
hash_type = attribute_type.split('-')[-1]
return f"x509-certificate:hashes.'{hash_type}' = '{attribute_value}'"
@staticmethod
def _split_composite_attribute(attribute_type, attribute_value, index):
value1, value2 = attribute_value.split('|')
return value1, attribute_type.split('|')[index], value2
################################################################################
## MISP OBJECTS PARSING FUNCTIONS ##
################################################################################
def parse_attack_pattern_fields(self, attributes):
attack_pattern = {}
weaknesses = []
references = []
for attribute in attributes:
relation = attribute['object_relation']
if relation in misp2stix2_mapping.attackPatternObjectMapping:
attack_pattern[misp2stix2_mapping.attackPatternObjectMapping[relation]] = attribute['value']
else:
if relation in ('id', 'references'):
references.append(self._parse_attack_pattern_reference(attribute))
elif relation == 'related-weakness':
weaknesses.append(attribute['value'])
else:
attack_pattern[f"x_misp_{attribute['type']}_{relation.replace('-', '_')}"] = attribute['value']
attack_pattern['allow_custom'] = True
if references:
attack_pattern['external_references'] = references
if weaknesses:
attack_pattern['x_misp_weakness_related_weakness'] = weaknesses[0] if len(weaknesses) == 1 else weaknesses
return attack_pattern
@staticmethod
def _parse_attack_pattern_reference(attribute):
object_relation = attribute['object_relation']
source_name, key = misp2stix2_mapping.attack_pattern_reference_mapping[object_relation]
value = attribute['value']
if object_relation == 'id' and 'CAPEC' not in value:
value = f'CAPEC-{value}'
return {'source_name': source_name, key: value}
@staticmethod
def parse_vulnerability_fields(attributes):
vulnerability = {}
references = []
custom_args = defaultdict(list)
for attribute in attributes:
relation = attribute['object_relation']
if relation in misp2stix2_mapping.vulnerabilityMapping:
vulnerability[misp2stix2_mapping.vulnerabilityMapping[relation]] = attribute['value']
else:
if relation == 'references':
references.append({'source_name': 'url', 'url': attribute['value']})
else:
custom_args[f"x_misp_{attribute['type']}_{relation.replace('-', '_')}"].append(attribute['value'])
vulnerability['allow_custom'] = True
if 'name' in vulnerability:
references.append({'source_name': 'cve', 'external_id': vulnerability['name']})
if references:
vulnerability['external_references'] = references
if custom_args:
vulnerability.update({key: value[0] if len(value) == 1 else value for key, value in custom_args.items()})
return vulnerability
def resolve_asn_observable(self, attributes, object_id):
asn = misp2stix2_mapping.objectsMapping['asn']['observable']
observable = {}
object_num = 0
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
try:
stix_type = misp2stix2_mapping.asnObjectMapping[relation]
except KeyError:
stix_type = "x_misp_{}_{}".format(attribute['type'], relation)
attribute_value = attribute['value']
if relation == "subnet-announced":
observable[str(object_num)] = {'type': self._define_address_type(attribute_value), 'value': attribute_value}
object_num += 1
else:
asn[stix_type] = self._parse_as_attribute(stix_type, attribute_value)
observable[str(object_num)] = asn
for n in range(object_num):
observable[str(n)]['belongs_to_refs'] = [str(object_num)]
return observable
def resolve_asn_pattern(self, attributes, object_id):
mapping = misp2stix2_mapping.objectsMapping['asn']['pattern']
pattern = []
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
try:
stix_type = misp2stix2_mapping.asnObjectMapping[relation]
except KeyError:
stix_type = "'x_misp_{}_{}'".format(attribute['type'], relation)
attribute_value = attribute['value']
if relation == "subnet-announced":
pattern.append("{0}:{1} = '{2}'".format(self._define_address_type(attribute_value), stix_type, attribute_value))
else:
pattern.append(mapping.format(stix_type, attribute_value))
return pattern
def resolve_credential_observable(self, attributes, object_id):
user_account = misp2stix2_mapping.objectsMapping['credential']['observable']
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
try:
stix_type = misp2stix2_mapping.credentialObjectMapping[relation]
except KeyError:
stix_type = "x_misp_{}_{}".format(attribute['type'], relation)
user_account[stix_type] = attribute['value']
return {'0': user_account}
def resolve_credential_pattern(self, attributes, object_id):
mapping = misp2stix2_mapping.objectsMapping['credential']['pattern']
pattern = []
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
try:
stix_type = misp2stix2_mapping.credentialObjectMapping[relation]
except KeyError:
stix_type = "x_misp_{}_{}".format(attribute['type'], relation)
pattern.append(mapping.format(stix_type, attribute['value']))
return pattern
def resolve_domain_ip_observable(self, attributes, object_id):
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
if attribute['type'] == 'ip-dst':
ip_value = attribute['value']
elif attribute['type'] == 'domain':
domain_value = attribute['value']
domain_ip_value = "{}|{}".format(domain_value, ip_value)
return getattr(self, misp2stix2_mapping.mispTypesMapping['domain|ip']['observable'])(None, domain_ip_value)
def resolve_domain_ip_pattern(self, attributes, object_id):
mapping = misp2stix2_mapping.objectsMapping['domain-ip']['pattern']
pattern = []
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
try:
stix_type = misp2stix2_mapping.domainIpObjectMapping[attribute['type']]
except KeyError:
continue
pattern.append(mapping.format(stix_type, attribute['value']))
return pattern
def resolve_email_object_observable(self, attributes, object_id):
observable = {}
message = defaultdict(list)
additional_header = {}
object_num = 0
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
attribute_value = attribute['value']
try:
mapping = misp2stix2_mapping.emailObjectMapping[relation]['stix_type']
if relation in ('from', 'to', 'cc'):
object_str = str(object_num)
observable[object_str] = {'type': 'email-addr', 'value': attribute_value}
if relation == 'from':
message[mapping] = object_str
else:
message[mapping].append(object_str)
object_num += 1
elif relation in ('attachment', 'screenshot'):
object_str = str(object_num)
body = {"content_disposition": "{}; filename='{}'".format(relation, attribute_value),
"body_raw_ref": object_str}
message['body_multipart'].append(body)
observable[object_str] = {'type': 'artifact', 'payload_bin': attribute['data']} if 'data' in attribute and attribute['data'] else {'type': 'file', 'name': attribute_value}
object_num += 1
elif relation in ('x-mailer', 'reply-to'):
key = '-'.join([part.capitalize() for part in relation.split('-')])
additional_header[key] = attribute_value
else:
message[mapping] = attribute_value
except Exception:
mapping = "x_misp_{}_{}".format(attribute['type'], relation)
message[mapping] = {'value': attribute_value, 'data': attribute['data']} if relation == 'eml' else attribute_value
if additional_header:
message['additional_header_fields'] = additional_header
message['type'] = 'email-message'
if 'body_multipart' in message and len(message['body_multipart']) > 1:
message['is_multipart'] = True
else:
message['is_multipart'] = False
observable[str(object_num)] = dict(message)
return observable
def resolve_email_object_pattern(self, attributes, object_id):
pattern_mapping = misp2stix2_mapping.objectsMapping['email']['pattern']
pattern = []
n = 0
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
try:
mapping = misp2stix2_mapping.emailObjectMapping[relation]
email_type = mapping['email_type']
if relation in ('attachment', 'screenshot'):
stix_type = mapping['stix_type'].format(n)
if 'data' in attribute and attribute['data']:
pattern.append(pattern_mapping.format(email_type, 'body_multipart[{}].body_raw_ref.payload_bin'.format(n), attribute['data']))
n += 1
else:
stix_type = self._parse_email_stix_type(relation, mapping['stix_type'])
except KeyError:
email_type = 'message'
stix_type = "'x_misp_{}_{}'".format(attribute['type'], relation)
if relation == 'eml':
stix_type_data = "{}.data".format(stix_type)
pattern.append(pattern_mapping.format(email_type, stix_type_data, attribute['data']))
stix_type += ".value"
pattern.append(pattern_mapping.format(email_type, stix_type, attribute['value']))
return pattern
def resolve_file_observable(self, attributes, object_id):
observable = {}
file_observable = defaultdict(dict)
file_observable['type'] = 'file'
n_object = 0
attributes_dict = self.create_file_attributes_dict(attributes, object_id)
for key, feature in misp2stix2_mapping.fileMapping.items():
if key in attributes_dict:
if key in misp2stix2_mapping.hash_types:
file_observable['hashes'][feature] = attributes_dict[key]
else:
file_observable[feature] = attributes_dict[key]
if 'filename' in attributes_dict:
file_observable['name'] = attributes_dict['filename'][0]
if len(attributes_dict['filename']) > 1:
self._handle_multiple_file_fields_observable(file_observable, attributes_dict['filename'][1:], 'filename')
if 'path' in attributes_dict:
observable[str(n_object)] = {'type': 'directory', 'path': attributes_dict['path'][0]}
file_observable['parent_directory_ref'] = str(n_object)
n_object += 1
if len(attributes_dict['path']) > 1:
self._handle_multiple_file_fields_observable(file_observable, attributes_dict['path'][1:], 'path')
if 'fullpath' in attributes_dict:
if 'parent_directory_ref' not in file_observable:
observable[str(n_object)] = {'type': 'directory', 'path': attributes_dict['fullpath'][0]}
file_observable['parent_directory_ref'] = str(n_object)
n_object += 1
if len(attributes_dict['path']) > 1:
self._handle_multiple_file_fields_observable(file_observable, attributes_dict['fullpath'][1:], 'fullpath')
else:
self._handle_multiple_file_fields_observable(file_observable, attributes_dict['fullpath'], 'fullpath')
if 'malware-sample' in attributes_dict:
artifact, value = self._create_artifact_observable(attributes_dict['malware-sample'])
filename, md5 = value.split('|')
artifact['name'] = filename
artifact['hashes'] = {'MD5': md5}
observable[str(n_object)] = artifact
file_observable['content_ref'] = str(n_object)
n_object += 1
if 'attachment' in attributes_dict:
artifact, value = self._create_artifact_observable(attributes_dict['attachment'])
artifact['name'] = value
observable[str(n_object)] = artifact
n_object += 1
observable[str(n_object)] = file_observable
return observable
def resolve_file_pattern(self, attributes, object_id):
patterns = []
pattern = misp2stix2_mapping.objectsMapping['file']['pattern']
attributes_dict = self.create_file_attributes_dict(attributes, object_id)
for key, feature in misp2stix2_mapping.fileMapping.items():
if key in attributes_dict:
if key in misp2stix2_mapping.hash_types:
feature = f"hashes.'{feature}'"
patterns.append(pattern.format(feature, attributes_dict[key]))
if 'filename' in attributes_dict:
self._handle_multiple_file_fields_pattern(patterns, attributes_dict['filename'], 'name')
for feature in ('path', 'fullpath'):
if feature in attributes_dict:
self._handle_multiple_file_fields_pattern(patterns, attributes_dict[feature], 'parent_directory_ref.path')
for feature, pattern_part in zip(('attachment', 'malware-sample'), ('artifact:', 'file:content_ref.')):
if feature in attributes_dict:
value = attributes_dict[feature]
if ' | ' in value:
value, data = value.split(' | ')
patterns.append(f"{pattern_part}payload_bin = '{data}'")
if feature == 'malware-sample':
value, md5 = value.split('|')
patterns.append(f"{pattern_part}hashes.'MD5' = '{md5}'")
patterns.append(f"{pattern_part}name = '{value}'")
else:
patterns.append(f"{pattern_part}x_misp_text_name = '{value}'")
return patterns
def resolve_ip_port_observable(self, attributes, object_id):
observable = {'type': 'network-traffic', 'protocols': ['tcp']}
ip_address = {}
domain = {}
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
attribute_value = attribute['value']
if relation == 'ip':
ip_address['type'] = self._define_address_type(attribute_value)
ip_address['value'] = attribute_value
elif relation == 'domain':
domain['type'] = 'domain-name'
domain['value'] = attribute_value
else:
try:
observable_type = misp2stix2_mapping.ipPortObjectMapping[relation]
except KeyError:
continue
observable[observable_type] = attribute_value
ref_type = 'dst_ref'
main_observable = None
if 'src_port' in observable or 'dst_port' in observable:
for port in ('src_port', 'dst_port'):
try:
port_value = misp2stix2_mapping.defineProtocols[str(observable[port])]
if port_value not in observable['protocols']:
observable['protocols'].append(port_value)
except KeyError:
pass
main_observable = observable
else:
if domain:
ref_type = 'resolves_to_refs'
return self.ip_port_observable_to_return(ip_address, main_observable, domain, ref_type)
@staticmethod
def ip_port_observable_to_return(ip_address, d_object, domain, s_object):
observable = {}
o_id = 0
if ip_address:
observable['0'] = ip_address
o_id += 1
if d_object:
if ip_address:
d_object[s_object] = '0'
observable[str(o_id)] = d_object
o_id += 1
if domain:
if ip_address and not d_object:
domain[s_object] = '0'
observable[str(o_id)] = domain
return observable
def resolve_ip_port_pattern(self, attributes, object_id):
pattern = []
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
attribute_value = attribute['value']
if relation == 'domain':
mapping_type = 'domain-ip'
stix_type = misp2stix2_mapping.ipPortObjectMapping[relation]
elif relation == 'ip':
mapping_type = 'ip-port'
stix_type = misp2stix2_mapping.ipPortObjectMapping[relation].format('ref', self._define_address_type(attribute_value))
else:
try:
stix_type = misp2stix2_mapping.ipPortObjectMapping[relation]
mapping_type = 'ip-port'
except KeyError:
continue
pattern.append(misp2stix2_mapping.objectsMapping[mapping_type]['pattern'].format(stix_type, attribute_value))
return pattern
def resolve_network_connection_observable(self, attributes, object_id):
attributes = {attribute['object_relation']: attribute['value'] for attribute in attributes}
n, network_object, observable = self.create_network_observable(attributes)
protocols = [attributes[layer] for layer in ('layer3-protocol', 'layer4-protocol', 'layer7-protocol') if layer in attributes]
network_object['protocols'] = protocols if protocols else ['tcp']
observable[str(n)] = network_object
return observable
def resolve_network_connection_pattern(self, attributes, object_id):
mapping = misp2stix2_mapping.objectsMapping['network-connection']['pattern']
attributes = {attribute['object_relation']: attribute['value'] for attribute in attributes}
pattern = self.create_network_pattern(attributes, mapping)
protocols = [attributes[layer] for layer in ('layer3-protocol', 'layer4-protocol', 'layer7-protocol') if layer in attributes]
if protocols:
for p in range(len(protocols)):
pattern.append("network-traffic:protocols[{}] = '{}'".format(p, protocols[p]))
return pattern
def resolve_network_socket_observable(self, attributes, object_id):
states, tmp_attributes = self.parse_network_socket_attributes(attributes, object_id)
n, network_object, observable = self.create_network_observable(tmp_attributes)
socket_extension = {misp2stix2_mapping.networkTrafficMapping[feature]: tmp_attributes[feature] for feature in ('address-family', 'domain-family') if feature in tmp_attributes}
for state in states:
state_type = "is_{}".format(state)
socket_extension[state_type] = True
network_object['protocols'] = [tmp_attributes['protocol']] if 'protocol' in tmp_attributes else ['tcp']
if socket_extension:
network_object['extensions'] = {'socket-ext': socket_extension}
observable[str(n)] = network_object
return observable
def resolve_network_socket_pattern(self, attributes, object_id):
mapping = misp2stix2_mapping.objectsMapping['network-socket']['pattern']
states, tmp_attributes = self.parse_network_socket_attributes(attributes, object_id)
pattern = self.create_network_pattern(tmp_attributes, mapping)
stix_type = "extensions.'socket-ext'.{}"
if "protocol" in tmp_attributes:
pattern.append("network-traffic:protocols[0] = '{}'".format(tmp_attributes['protocol']))
for feature in ('address-family', 'domain-family'):
if feature in tmp_attributes:
pattern.append(mapping.format(stix_type.format(misp2stix2_mapping.networkTrafficMapping[feature]), tmp_attributes[feature]))
for state in states:
state_type = "is_{}".format(state)
pattern.append(mapping.format(stix_type.format(state_type), True))
return pattern
def parse_network_socket_attributes(self, attributes, object_id):
states = []
tmp_attributes = {}
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
if relation == 'state':
states.append(attribute['value'])
else:
tmp_attributes[relation] = attribute['value']
return states, tmp_attributes
def resolve_process_observable(self, attributes, object_id):
observable = {}
current_process = defaultdict(list)
current_process['type'] = 'process'
n = 0
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
if relation == 'parent-pid':
str_n = str(n)
observable[str_n] = {'type': 'process', 'pid': attribute['value']}
current_process['parent_ref'] = str_n
n += 1
elif relation == 'child-pid':
str_n = str(n)
observable[str_n] = {'type': 'process', 'pid': attribute['value']}
current_process['child_refs'].append(str_n)
n += 1
elif relation == 'image':
str_n = str(n)
observable[str_n] = {'type': 'file', 'name': attribute['value']}
current_process['binary_ref'] = str_n
n += 1
else:
try:
current_process[misp2stix2_mapping.processMapping[relation]] = attribute['value']
except KeyError:
pass
observable[str(n)] = current_process
return observable
def resolve_process_pattern(self, attributes, object_id):
mapping = misp2stix2_mapping.objectsMapping['process']['pattern']
pattern = []
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
try:
pattern.append(mapping.format(misp2stix2_mapping.processMapping[attribute['object_relation']], attribute['value']))
except KeyError:
continue
return pattern
def resolve_regkey_observable(self, attributes, object_id):
observable = {'type': 'windows-registry-key'}
values = {}
registry_value_types = ('data', 'data-type', 'name')
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
try:
stix_type = misp2stix2_mapping.regkeyMapping[relation]
except KeyError:
stix_type = "x_misp_{}_{}".format(attribute['type'], relation)
if relation in registry_value_types:
values[stix_type] = attribute['value']
else:
observable[stix_type] = attribute['value']
if values:
if 'name' not in values:
values['name'] = ''
observable['values'] = [values]
return {'0': observable}
def resolve_regkey_pattern(self, attributes, object_id):
mapping = misp2stix2_mapping.objectsMapping['registry-key']['pattern']
pattern = []
fields = ('key', 'value')
registry_value_types = ('data', 'data-type', 'name')
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
try:
stix_type = misp2stix2_mapping.regkeyMapping[relation]
except KeyError:
stix_type = "'x_misp_{}_{}'".format(attribute['type'], relation)
value = attribute['value'].strip().replace('\\', '\\\\') if relation in fields and '\\\\' not in attribute['value'] else attribute['value'].strip()
if relation in registry_value_types:
stix_type = "values.{}".format(stix_type)
pattern.append(mapping.format(stix_type, value))
return pattern
def create_network_observable(self, attributes):
n = 0
network_object = {'type': 'network-traffic'}
observable = {}
for feature in ('src', 'dst'):
ip_feature = 'ip-{}'.format(feature)
host_feature = 'hostname-{}'.format(feature)
refs = []
if host_feature in attributes:
str_n = str(n)
observable[str_n] = {'type': 'domain-name', 'value': attributes[host_feature]}
refs.append(str_n)
n += 1
if ip_feature in attributes:
feature_value = attributes[ip_feature]
str_n = str(n)
observable[str_n] = {'type': self._define_address_type(feature_value), 'value': feature_value}
refs.append(str_n)
n +=1
if refs:
ref_str, ref_list = ('ref', refs[0]) if len(refs) == 1 else ('refs', refs)
network_object['{}_{}'.format(feature, ref_str)] = ref_list
for feature in ('src-port', 'dst-port'):
if feature in attributes:
network_object[misp2stix2_mapping.networkTrafficMapping[feature]] = attributes[feature]
return n, network_object, observable
def create_network_pattern(self, attributes, mapping):
pattern = []
features = ('ip-{}', 'hostname-{}')
for feature in ('src', 'dst'):
index = 0
references = {ftype: attributes[ftype] for ftype in (f_type.format(feature) for f_type in features) if ftype in attributes}
ref = 'ref' if len(references) == 1 else 'ref[{}]'
if f'ip-{feature}' in attributes:
value = references[f'ip-{feature}']
pattern.append(mapping.format(misp2stix2_mapping.networkTrafficMapping[f'ip-{feature}'].format(ref.format(index), self._define_address_type(value)), value))
index += 1
if f'hostname-{feature}' in attributes:
key = f'hostname-{feature}'
pattern.append(mapping.format(misp2stix2_mapping.networkTrafficMapping[key].format(ref.format(index), 'domain-name'), references[key]))
if f'{feature}-port' in attributes:
key = f'{feature}-port'
pattern.append(mapping.format(misp2stix2_mapping.networkTrafficMapping[key], attributes[key]))
return pattern
@staticmethod
def resolve_stix2_pattern(attributes, _):
for attribute in attributes:
if attribute['object_relation'] == 'stix2-pattern':
return attribute['value']
def resolve_url_observable(self, attributes, object_id):
url_args = {}
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
if attribute['type'] == 'url':
# If we have the url (WE SHOULD), we return the observable supported atm with the url value
observable = {'0': {'type': 'url', 'value': attribute['value']}}
else:
# otherwise, we need to see if there is a port or domain value to parse
url_args[attribute['type']] = attribute['value']
if 'domain' in url_args:
observable['1'] = {'type': 'domain-name', 'value': url_args['domain']}
if 'port' in url_args:
port_value = url_args['port']
port = {'type': 'network-traffic', 'dst_ref': '1', 'protocols': ['tcp'], 'dst_port': port_value}
try:
port['protocols'].append(misp2stix2_mapping.defineProtocols[port_value])
except KeyError:
pass
if '1' in observable:
observable['2'] = port
else:
observable['1'] = port
return observable
def resolve_url_pattern(self, attributes, object_id):
pattern = []
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
attribute_type = attribute['type']
try:
stix_type = misp2stix2_mapping.urlMapping[attribute_type]
except KeyError:
continue
if attribute_type == 'port':
mapping = 'ip-port'
elif attribute_type == 'domain':
mapping = 'domain-ip'
else:
mapping = attribute_type
pattern.append(misp2stix2_mapping.objectsMapping[mapping]['pattern'].format(stix_type, attribute['value']))
return pattern
def resolve_user_account_observable(self, attributes, object_id):
attributes = self.parse_user_account_attributes(attributes, object_id)
observable = {'type': 'user-account'}
extension = {}
for relation, value in attributes.items():
try:
observable[misp2stix2_mapping.userAccountMapping[relation]] = value
except KeyError:
try:
extension[misp2stix2_mapping.unixAccountExtensionMapping[relation]] = value
except KeyError:
continue
if extension:
observable['extensions'] = {'unix-account-ext': extension}
return {'0': observable}
def resolve_user_account_pattern(self, attributes, object_id):
mapping = misp2stix2_mapping.objectsMapping['user-account']['pattern']
extension_pattern = "extensions.'unix-account-ext'.{}"
attributes = self.parse_user_account_attributes(attributes, object_id)
pattern = []
if 'group' in attributes:
i = 0
for group in attributes.pop('group'):
pattern.append(mapping.format(extension_pattern.format('groups[{}]'.format(i)), group))
i += 1
for relation, value in attributes.items():
try:
pattern_part = mapping.format(misp2stix2_mapping.userAccountMapping[relation], value)
except KeyError:
try:
pattern_part = mapping.format(extension_pattern.format(misp2stix2_mapping.unixAccountExtensionMapping[relation]), value)
except KeyError:
continue
pattern.append(pattern_part)
return pattern
def parse_user_account_attributes(self, attributes, object_id):
tmp_attributes = defaultdict(list)
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
if relation == 'group':
tmp_attributes[relation].append(attribute['value'])
else:
tmp_attributes[relation] = attribute['value']
if 'user-id' not in tmp_attributes and 'username' in tmp_attributes:
tmp_attributes['user-id'] = tmp_attributes.pop('username')
if 'text' in tmp_attributes:
tmp_attributes.pop('text')
return tmp_attributes
def resolve_x509_observable(self, attributes, object_id):
observable = {'type': 'x509-certificate'}
hashes = {}
attributes2parse = defaultdict(list)
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
if relation in ("x509-fingerprint-md5", "x509-fingerprint-sha1", "x509-fingerprint-sha256"):
hashes[relation.split('-')[2]] = attribute['value']
else:
try:
observable[misp2stix2_mapping.x509mapping[relation]] = attribute['value']
except KeyError:
value = bool(attribute['value']) if attribute['type'] == 'boolean' else attribute['value']
attributes2parse["x_misp_{}_{}".format(attribute['type'], relation)].append(value)
if hashes:
observable['hashes'] = hashes
for stix_type, value in attributes2parse.items():
observable[stix_type] = value if len(value) > 1 else value[0]
return {'0': observable}
def resolve_x509_pattern(self, attributes, object_id):
mapping = misp2stix2_mapping.objectsMapping['x509']['pattern']
pattern = []
for attribute in attributes:
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
relation = attribute['object_relation']
if relation in ("x509-fingerprint-md5", "x509-fingerprint-sha1", "x509-fingerprint-sha256"):
stix_type = f"hashes.'{relation.split('-')[2]}'"
else:
try:
stix_type = misp2stix2_mapping.x509mapping[relation]
except KeyError:
stix_type = "'x_misp_{}_{}'".format(attribute['type'], relation)
value = bool(attribute['value']) if attribute['type'] == 'boolean' else attribute['value']
pattern.append(mapping.format(stix_type, value))
return pattern
################################################################################
## UTILITY FUNCTIONS. ##
################################################################################
@staticmethod
def _create_artifact_observable(value):
artifact = {'type': 'artifact'}
if ' | ' in value:
value, data = value.split(' | ')
artifact['payload_bin'] = data
return artifact, value
def create_file_attributes_dict(self, attributes, object_id):
multiple_fields = ('filename', 'path', 'fullpath')
attributes_dict = defaultdict(list)
for attribute in attributes:
attributes_dict[attribute['object_relation']].append(self._parse_attribute(attribute))
if attribute.get('Galaxy'):
self.parse_galaxies(attribute['Galaxy'], object_id)
return {key: value[0] if key not in multiple_fields and len(value) == 1 else value for key, value in attributes_dict.items()}
@staticmethod
def _define_address_type(address):
if ':' in address:
return 'ipv6-addr'
return 'ipv4-addr'
@staticmethod
def _get_attribute_arguments(attribute):
if attribute.get('data'):
return (attribute['type'], attribute['value'], attribute['data'])
return (attribute['type'], attribute['value'])
@staticmethod
def _get_function_to_call(attribute_type):
if attribute_type in misp2stix2_mapping.mispTypesMapping:
return 'handle_usual_type'
if attribute_type == 'link':
return 'handle_link'
if attribute_type == 'vulnerability':
return 'add_vulnerability'
return 'add_custom'
@staticmethod
def get_datetime_from_timestamp(timestamp):
return datetime.utcfromtimestamp(int(timestamp))
@staticmethod
def _handle_multiple_file_fields_observable(file_observable, values, feature):
if len(values) > 1:
file_observable[f'x_misp_multiple_{feature}s'] = values
else:
file_observable[f'x_misp_multiple_{feature}'] = values[0]
@staticmethod
def _handle_multiple_file_fields_pattern(patterns, values, feature):
if len(values) > 1:
patterns.extend([f"file:{feature} = '{value}'" for value in values])
else:
patterns.append(f"file:{feature} = '{values[0]}'")
@staticmethod
def handle_time_fields(attribute, timestamp, stix_type):
to_return = {'created': timestamp, 'modified': timestamp}
iso_timestamp = f"{timestamp.isoformat(timespec='milliseconds')}Z"
for misp_field, stix_field in zip(('first_seen', 'last_seen'), _time_fields[stix_type]):
to_return[stix_field] = datetime.strptime(attribute[misp_field].split('+')[0], '%Y-%m-%dT%H:%M:%S.%f') if attribute.get(misp_field) else iso_timestamp
return to_return
@staticmethod
def _parse_as_attribute(stix_type, attribute_value):
if stix_type == 'number' and attribute_value.startswith('AS'):
return attribute_value[2:]
return attribute_value
@staticmethod
def _parse_attribute(attribute):
if attribute['type'] in ('attachment', 'malware-sample') and attribute.get('data') is not None:
return f"{attribute['value'].replace(' | ', '|')} | {attribute['data']}"
return attribute['value']
@staticmethod
def _parse_email_stix_type(relation, mapping):
if relation == 'from':
return f'{mapping}.value'
if relation in ('to', 'cc'):
return f'{mapping}[*].value'
return mapping
def main(args):
stix_builder = StixBuilder()
stix_builder.loadEvent(args)
stix_builder.buildEvent()
if __name__ == "__main__":
main(sys.argv)
| agpl-3.0 | 985,739,337,408,242,000 | 47.269251 | 191 | 0.583906 | false |
matthijsvk/multimodalSR | code/Experiments/neon-master/examples/faster-rcnn/tests/test_anchor_target_layer.py | 1 | 3984 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from __future__ import division
import numpy as np
from neon.backends import gen_backend
import itertools as itt
from anchor_target_layer_ref import AnchorTargetLayer
from objectlocalization import ObjectLocalization, PASCALVOC
from neon.data.dataloader_transformers import TypeCast
from aeon import DataLoader
import os
MIN_SIZE = 600
MAX_SIZE = 1000
def pytest_generate_tests(metafunc):
if 'fargs' in metafunc.fixturenames:
height = [1000]
width = [1000]
fargs = itt.product(height, width)
metafunc.parametrize('fargs', fargs)
def test_anchor_target_layer(backend_default, fargs):
(height, width) = fargs
manifest_path = os.environ['PASCAL_MANIFEST_PATH']
assert manifest_path is not None, "Please set the PASCAL_MANIFEST_PATH variable."
manifest_root = os.environ['PASCAL_MANIFEST_ROOT']
assert manifest_root is not None, "Please set the PASCAL_MANIFEST_ROOT variable."
config = PASCALVOC(manifest_path, manifest_root, cache_dir='',
height=height, width=width, inference=False)
config['subset_fraction'] = 0.1
dl = DataLoader(config, backend_default)
dl = TypeCast(dl, index=0, dtype=np.float32)
train_set = ObjectLocalization(dl, frcn_rois_per_img=128)
for idx, (X, Y) in enumerate(train_set):
reference_test(train_set, X, Y)
def reference_test(dataloader, X, Y):
bbtargets_mask = Y[1][1]
target = AnchorTargetLayer()
im_shape = dataloader.im_shape.get()
im_scale = dataloader.im_scale.get()[0][0]
num_gt_boxes = dataloader.num_gt_boxes.get()[0][0]
# prepare inputs
bottom = [0, 1, 2]
bottom[0] = np.zeros((dataloader.conv_height, dataloader.conv_width))
bottom[1] = dataloader.gt_boxes.get()[:num_gt_boxes]
bottom[2] = [im_shape[0], im_shape[1], im_scale]
# obtain forward pass output
top = [0, 1, 2, 3]
target.setup(bottom, top)
target.forward(bottom, top)
py_labels, py_bbtargets, py_iw, py_ow = top
label = bbtargets_mask.get().reshape((4, -1))[0, :]
# positive labels should match
if np.sum(label == 1) < 128:
# assert positive labels match since positives (usually) dont get under sampled
assert np.allclose(np.where(label == 1)[0],
np.where(py_labels.flatten() == 1)[0])
# our bboxes are in 4 * K, whereas reference is in K * 4 order, so reshape
bb = Y[1][0].get() * Y[1][1].get()
pybb = py_bbtargets * py_iw
pybb = pybb.reshape((1, 9, 4, dataloader.conv_height, dataloader.conv_width)) \
.transpose(0, 2, 1, 3, 4)
pybb = pybb.reshape(1, 36, dataloader.conv_height, dataloader.conv_width) \
.flatten()
# bounding box target locations and values must match
assert np.allclose(np.where(bb != 0)[0], np.where(pybb != 0)[0], atol=0.001)
assert np.allclose(bb[np.where(bb != 0)], pybb[np.where(pybb != 0)], atol=0.001)
if __name__ == "__main__":
be = gen_backend()
fargs = (1000, 1000) # height, width
test_anchor_target_layer(be, fargs)
| mit | 66,151,425,816,529,656 | 35.888889 | 92 | 0.61496 | false |
numerodix/spiderfetch | spiderfetch/spiderfetch.py | 1 | 10626 | #!/usr/bin/env python
#
# <desc> Web spider and fetcher </desc>
from __future__ import absolute_import
import os
import shutil
import sys
import traceback
import time
import ansicolor
from spiderfetch import fetch
from spiderfetch import ioutils
from spiderfetch import recipe
from spiderfetch import spider
from spiderfetch import urlrewrite
from spiderfetch import web
class Session(object):
def __init__(self, wb=None, queue=None, rules=None):
self.wb = wb
self.queue = queue
self.rules = rules
# well, not really, it may not have been saved yet
self.last_save = time.time()
self.save_interval = 60 * 30 # 30min
def save(self):
hostname = urlrewrite.get_hostname(self.wb.root.url)
filename = urlrewrite.hostname_to_filename(hostname)
ioutils.write_err("Saving session to %s ..." %
ansicolor.yellow(filename + ".{web,session}"))
ioutils.serialize(self.wb, filename + ".web", dir=ioutils.LOGDIR)
if self.queue:
ioutils.serialize(self.queue, filename + ".session", dir=ioutils.LOGDIR)
# only web being saved, ie. spidering complete, remove old session
elif ioutils.file_exists(filename + ".session", dir=ioutils.LOGDIR):
ioutils.delete(filename + ".session", dir=ioutils.LOGDIR)
ioutils.write_err(ansicolor.green("done\n"))
def maybe_save(self):
t = time.time()
if self.last_save + self.save_interval < t:
self.save()
self.last_save = t
@classmethod
def restore(cls, url):
hostname = urlrewrite.get_hostname(url)
filename = urlrewrite.hostname_to_filename(hostname)
q, wb = None, None
if (ioutils.file_exists(filename + ".web", dir=ioutils.LOGDIR)):
ioutils.write_err("Restoring web from %s ..." %
ansicolor.yellow(filename + ".web"))
wb = ioutils.deserialize(filename + ".web", dir=ioutils.LOGDIR)
ioutils.write_err(ansicolor.green("done\n"))
if (ioutils.file_exists(filename + ".session", dir=ioutils.LOGDIR)):
ioutils.write_err("Restoring session from %s ..." %
ansicolor.yellow(filename + ".session"))
q = ioutils.deserialize(filename + ".session", dir=ioutils.LOGDIR)
q = recipe.overrule_records(q)
ioutils.write_err(ansicolor.green("done\n"))
return cls(wb=wb, queue=q)
class SpiderFetcher(object):
def __init__(self, session):
self.session = session
def log_exc(self, exc, url):
exc_filename = ioutils.safe_filename("exc", dir=ioutils.LOGDIR)
ioutils.serialize(exc, exc_filename, dir=ioutils.LOGDIR)
s = traceback.format_exc()
s += "\nBad url: |%s|\n" % url
node = self.session.wb.get(url)
for u in node.incoming.keys():
s += "Ref : |%s|\n" % u
s += "Exception object serialized to file: %s\n\n" % exc_filename
ioutils.savelog(s, "error_log", "a")
def get_url(self, fetcher, host_filter=False):
"""http 30x redirects produce a recursion with new urls that may or may not
have been seen before"""
while True:
try:
fetcher.launch_w_tries()
break
except fetch.ChangedUrlWarning as e:
url = urlrewrite.rewrite_urls(fetcher.url, [e.new_url]).next()
if url in self.session.wb:
raise fetch.DuplicateUrlWarning
if not recipe.apply_hostfilter(host_filter, url):
raise fetch.UrlRedirectsOffHost
self.session.wb.add_ref(fetcher.url, url)
fetcher.url = url
return fetcher.url
def qualify_urls(self, ref_url, urls, rule, newqueue):
for url in urls:
_dump, _fetch, _spider = False, False, False
# apply patterns to determine how to qualify url
if recipe.apply_mask(rule.get("dump"), url):
_dump = True
if recipe.apply_mask(rule.get("fetch"), url):
_fetch = True
if (recipe.apply_mask(rule.get("spider"), url) and
recipe.apply_hostfilter(rule.get("host_filter"), url)):
_spider = True
# build a record based on qualification
record = {"url": url}
if url not in self.session.wb:
if _dump:
ioutils.write_out("%s\n" % url)
if _fetch and _spider:
record["mode"] = fetch.Fetcher.SPIDER_FETCH
elif _fetch:
record["mode"] = fetch.Fetcher.FETCH
elif _spider:
record["mode"] = fetch.Fetcher.SPIDER
if _fetch or _spider:
newqueue.append(record)
# add url to web if it was matched by anything
if _dump or _fetch or _spider:
self.session.wb.add_url(ref_url, [url])
return newqueue
def process_records(self, rule):
newqueue = []
for record in self.session.queue:
self.session.maybe_save()
url = record.get("url")
try:
(fp, filename) = ioutils.get_tempfile()
f = fetch.Fetcher(mode=record.get("mode"), url=url, filename=filename)
url = self.get_url(f, host_filter=rule.get("host_filter"))
filename = f.filename
# consider retrying the fetch if it failed
if f.error and fetch.err.is_temporal(f.error):
if not record.get("retry"):
record["retry"] = True
self.session.queue.append(record)
if record.get("mode") == fetch.Fetcher.SPIDER:
data = open(filename, 'r').read()
urls = spider.unbox_it_to_ss(spider.findall(data, url))
urls = urlrewrite.rewrite_urls(url, urls)
newqueue = self.qualify_urls(url, urls, rule, newqueue)
if record.get("mode") == fetch.Fetcher.FETCH:
shutil.move(filename,
ioutils.safe_filename(urlrewrite.url_to_filename(url)))
except (fetch.DuplicateUrlWarning, fetch.UrlRedirectsOffHost):
pass
except KeyboardInterrupt:
q = self.session.queue[self.session.queue.index(record):]
q.extend(newqueue)
self.session.queue = q
self.session.save()
sys.exit(1)
except Exception as exc:
self.log_exc(exc, url)
finally:
try:
if filename and os.path.exists(filename):
os.unlink(filename)
if fp:
os.close(fp)
except (NameError, OSError):
pass
pause = os.environ.get('PAUSE')
if pause:
time.sleep(int(pause))
return newqueue
def split_queue(self, lastrule=False):
fetch_queue, spider_queue = [], []
for record in self.session.queue:
mode = record.get("mode")
if mode == fetch.Fetcher.FETCH or mode == fetch.Fetcher.SPIDER_FETCH:
r = record.copy()
r["mode"] = fetch.Fetcher.FETCH
fetch_queue.append(r)
# if this isn't the last rule, defer remaining spidering to the
# next rule
if not lastrule:
if mode == fetch.Fetcher.SPIDER or mode == fetch.Fetcher.SPIDER_FETCH:
r = record.copy()
r["mode"] = fetch.Fetcher.SPIDER
spider_queue.append(r)
return fetch_queue, spider_queue
def main(self):
outer_queue = self.session.queue
for rule in self.session.rules:
depth = rule.get("depth", 1)
# queue will be exhausted in inner loop, but once depth is reached
# the contents to spider will fall through to outer_queue
outer_queue, self.session.queue = [], outer_queue
while self.session.queue:
if depth > 0:
depth -= 1
# There may still be records in the queue, but since depth is reached
# no more spidering is allowed, so we allow one more iteration, but
# only for fetching
elif depth == 0:
self.session.queue, outer_queue = self.split_queue(
self.session.rules.index(rule) == len(self.session.rules) - 1)
self.session.queue = self.process_records(rule)
self.session.save()
def run_script():
(parser, a) = ioutils.init_opts("<url> ['<pattern>'] [options]")
a("--recipe", metavar="<recipe>", dest="recipe", help="Use a spidering recipe")
a("--fetch", action="store_true", help="Fetch urls, don't dump")
a("--dump", action="store_true", help="Dump urls, don't fetch")
a("--host", action="store_true", help="Only spider this host")
a("--pause", type="int", metavar="<pause>", dest="pause", help="Pause for x seconds between requests")
a("--depth", type="int", metavar="<depth>", dest="depth", help="Spider to this depth")
(opts, args) = ioutils.parse_args(parser)
try:
if opts.fetch:
os.environ["FETCH_ALL"] = "1"
elif opts.dump:
os.environ["DUMP_ALL"] = "1"
if opts.host:
os.environ["HOST_FILTER"] = "1"
if opts.pause:
os.environ["PAUSE"] = str(opts.pause)
if opts.depth:
os.environ["DEPTH"] = str(opts.depth)
url = args[0]
if opts.recipe:
rules = recipe.load_recipe(opts.recipe, url)
else:
pattern = args[1]
rules = recipe.get_recipe(pattern, url)
session = Session.restore(url)
session.rules = rules
if session.queue is None:
session.queue = recipe.get_queue(url, mode=fetch.Fetcher.SPIDER)
if session.wb is None:
session.wb = web.Web(url)
except recipe.PatternError as e:
ioutils.write_err(ansicolor.red("%s\n" % e))
sys.exit(1)
except IndexError:
ioutils.opts_help(None, None, None, parser)
spiderfetcher = SpiderFetcher(session)
spiderfetcher.main()
if __name__ == "__main__":
run_script()
| gpl-2.0 | 3,621,005,735,312,478,000 | 36.814947 | 106 | 0.547619 | false |
BrainTech/openbci | obci/logic/configs/config_robot_3_dron.py | 1 | 1172 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Config(object):
def __init__(self):
self.number_of_decisions = 8
self.number_of_states = 1
self.states_configs = ['state', 'letters', 'actions',
'letters_solver', 'actions_solver']
self.other_configs = []
self.state = self.number_of_states * [self.number_of_decisions * [0]]
self.state[0] = [0, 0, 0, 0, 0, 0, 0, 0]
self.letters = self.number_of_states * [self.number_of_decisions * [""]]
self.letters[0] = [u"Left",u"", u"Right",u"",u"", u"Fwd",u"",u""]
self.letters_solver = self.number_of_states * [self.number_of_decisions
* [""]]
self.actions = self.number_of_states * [self.number_of_decisions * [""]]
self.actions[0] = [u"robot('left')" ,u"", u"robot('right')",u"",u"",
u"robot('forward')",u"",u""]
self.actions_solver = self.number_of_states * [self.number_of_decisions
* [""]]
def _finish_action(self):
return "finish()"
| gpl-3.0 | 9,074,573,015,595,318,000 | 49.956522 | 80 | 0.482082 | false |
IECS/MansOS | tools/IDE/src/newMote.py | 1 | 3693 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2012 the MansOS team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import wx
from motelist import Motelist
from Translater import localize
class NewMote(wx.Dialog):
def __init__(self, parent, API):
self.API = API
super(NewMote, self).__init__(parent = parent, title = localize("Add new mote"))
self.main = wx.BoxSizer(wx.VERTICAL)
self.controls = wx.GridBagSizer(10, 10)
self.newMote = wx.Button(self, label = localize("Add mote"))
self.close = wx.Button(self, label = localize("Close"))
self.controls.Add(self.close, (2, 0), flag = wx.EXPAND | wx.ALL)
self.controls.Add(self.newMote, (2, 1), flag = wx.EXPAND | wx.ALL)
nameText = wx.StaticText(self, label = localize("Mote name") + ":")
self.controls.Add(nameText, (0, 0), flag = wx.EXPAND | wx.ALL)
self.name = wx.TextCtrl(self)
self.controls.Add(self.name, (0, 1), flag = wx.EXPAND | wx.ALL)
portText = wx.StaticText(self, label = localize("Mote port") + ":")
self.controls.Add(portText, (1, 0), flag = wx.EXPAND | wx.ALL)
self.port = wx.TextCtrl(self)
self.controls.Add(self.port, (1, 1), flag = wx.EXPAND | wx.ALL)
self.portError = wx.StaticText(self, label = "")
self.controls.Add(self.portError, (1, 2), flag = wx.EXPAND | wx.ALL)
self.main.Add(self.controls, 0, wx.EXPAND | wx.ALL, 3);
self.Bind(wx.EVT_BUTTON, self.addNewMote, self.newMote)
self.Bind(wx.EVT_BUTTON, self.doClose, self.close)
self.SetSizerAndFit(self.main)
self.SetAutoLayout(1)
self.Show()
def doClose(self, event):
self.Close()
def addNewMote(self, event):
if not Motelist.portExists(self.port.GetValue()):
self.portError.SetLabel(localize("No device found on this port") + "!")
self.SetSizerAndFit(self.main)
self.SetAutoLayout(1)
self.Show()
else:
if Motelist.addMote(self.port.GetValue(), self.name.GetValue(), "User defined"):
self.API.updateUserMotes()
self.Close()
else:
self.portError.SetLabel(localize("There already is device on that port in list") + "!")
self.SetSizerAndFit(self.main)
self.SetAutoLayout(1)
self.Show()
| mit | 4,573,243,836,435,364,000 | 40.494382 | 103 | 0.661251 | false |
Thor77/TeamspeakStats | tsstats/client.py | 1 | 4656 | # -*- coding: utf-8 -*-
import datetime
import logging
from collections import MutableMapping
logger = logging.getLogger('tsstats')
class Clients(MutableMapping):
'''
A high-level-interface to multiple Client-objects
'''
def __init__(self, ident_map=None, *args, **kwargs):
'''
Initialize a new Client-collection
:param ident_map: Identity-map (see :doc:`identmap`)
:type ident_map: dict
'''
self.ident_map = ident_map or {}
self.store = dict()
self.update(dict(*args, **kwargs))
def apply_events(self, events):
'''
Apply events to this Client-collection
:param events: list of events to apply
:type events: list
'''
for event in events:
# find corresponding client
client = self.setdefault(
event.identifier,
Client(self.ident_map.get(event.identifier, event.identifier))
)
if event.action == 'set_nick':
client.nick = event.arg
continue
if event.arg_is_client:
# if arg is client, replace identifier with Client-obj
event = event._replace(
arg=self.setdefault(event.arg, Client(event.arg))
)
client.__getattribute__(event.action)(event.arg)
def __add__(self, client):
'''
Add a Client to the collection
:param client: Client to add to the collection
:type id_or_uid: Client
'''
identifier = client.identifier
self.store[self.ident_map.get(identifier, identifier)] = client
return self
def __iter__(self):
'''
Yield all Client-objects from the collection
'''
return iter(self.store.keys())
def __getitem__(self, key):
return self.store[self.ident_map.get(key, key)]
def __delitem__(self, key):
del self.store[key]
def __len__(self):
return len(self.store)
def __setitem__(self, key, value):
self.store[self.ident_map.get(key, key)] = value
def __str__(self):
return str(list(map(str, self)))
class Client(object):
'''
Client provides high-level-access to a Teamspeak-Client
'''
def __init__(self, identifier, nick=None):
'''
Initialize a new Client
:param identifier: Identifier of the client
:type identifier: int or str
'''
# public
self.identifier = identifier
self._nick = nick
self.nick_history = set()
self.connected = 0
self.onlinetime = datetime.timedelta()
self.kicks = 0
self.pkicks = 0
self.bans = 0
self.pbans = 0
self.last_seen = None
# private
self._last_connect = 0
@property
def nick(self):
return self._nick
@nick.setter
def nick(self, new_nick):
if self._nick and new_nick != self._nick:
# add old nick to history
self.nick_history.add(self._nick)
# set new nick
self._nick = new_nick
def connect(self, timestamp):
'''
Connect client at `timestamp`
:param timestamp: time of connect
:type timestamp: int
'''
logger.debug('[%s] CONNECT %s', timestamp, self)
self.connected += 1
self._last_connect = timestamp
def disconnect(self, timestamp):
'''
Disconnect client at `timestamp`
:param timestamp: time of disconnect
:type timestamp: int
'''
logger.debug('[%s] DISCONNECT %s', timestamp, self)
if not self.connected:
logger.debug('^ disconnect before connect')
return
self.connected -= 1
session_time = timestamp - self._last_connect
logger.debug('Session lasted %s', session_time)
self.onlinetime += session_time
self.last_seen = timestamp
def kick(self, target):
'''
Let client kick `target`
:param target: client to kick
:type target: Client
'''
logger.debug('KICK %s -> %s', self, target)
target.pkicks += 1
self.kicks += 1
def ban(self, target):
'''
Let client ban `target`
:param target: client to ban
:type target: Client
'''
logger.debug('BAN %s -> %s', self, target)
target.pbans += 1
self.bans += 1
def __str__(self):
return u'<{}, {}>'.format(self.identifier, self.nick)
def __repr__(self):
return self.__str__()
| mit | -7,876,739,570,331,043,000 | 25.913295 | 78 | 0.545318 | false |
angelverde/evadoc | models/menu.py | 2 | 1321 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
response.KEY = KEY = 'youshallnotpass'
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
response.menu = [
]
es_nuevo = not auth.is_logged_in() and not session.id_facultad
response.nav = auth.navbar(mode="dropdown")
if es_nuevo:
# si no esta logueado y no tiene facultad como anonimo
link = response.nav.element('a')
link.components[0] = 'Iniciar'
e = response.nav.element('.dropdown-menu')
e.insert(0, '')
e[0]['_class'] = 'divider'
e.insert(0, A(I(_class='icon-question-sign'),' Anónimo',
_href='#modalFacultad', **{'_data-toggle': 'modal'}))
elif not auth.is_logged_in() and session.id_facultad:
# si no esta logueado pero tiene facultad de anonimo
link = response.nav.element('a')
link.components[0] = 'Anónimo'
e = response.nav.element('.dropdown-menu')
e.insert(0, '')
e[0]['_class'] = 'divider'
e.insert(0, A(I(_class='icon-question-sign'),' Olvidame',
_href=URL('utilidades','borrar_session',hmac_key=KEY)))
if "auth" in locals(): auth.wikimenu()
| gpl-3.0 | 6,759,180,747,864,015,000 | 38.969697 | 79 | 0.571645 | false |
safwanrahman/kitsune | kitsune/kbforums/forms.py | 5 | 3535 | from django import forms
from django.utils.translation import ugettext_lazy as _lazy
from kitsune.kbforums.models import Thread, Post
from kitsune.sumo.form_fields import StrippedCharField
MSG_TITLE_REQUIRED = _lazy(u'Please provide a title.')
MSG_TITLE_SHORT = _lazy(u'Your title is too short (%(show_value)s '
u'characters). It must be at least %(limit_value)s '
u'characters.')
MSG_TITLE_LONG = _lazy(u'Please keep the length of your title to '
u'%(limit_value)s characters or less. It is '
u'currently %(show_value)s characters.')
MSG_CONTENT_REQUIRED = _lazy(u'Please provide a message.')
MSG_CONTENT_SHORT = _lazy(u'Your message is too short (%(show_value)s '
u'characters). It must be at least %(limit_value)s '
u'characters.')
MSG_CONTENT_LONG = _lazy(u'Please keep the length of your message to '
u'%(limit_value)s characters or less. It is '
u'currently %(show_value)s characters.')
class ReplyForm(forms.ModelForm):
"""Reply form for forum threads."""
content = StrippedCharField(
label=_lazy(u'Content:'),
min_length=5,
max_length=10000,
widget=forms.Textarea(attrs={'rows': 10, 'cols': 80}),
error_messages={'required': MSG_CONTENT_REQUIRED,
'min_length': MSG_CONTENT_SHORT,
'max_length': MSG_CONTENT_LONG})
class Meta:
model = Post
fields = ('content', )
class NewThreadForm(forms.Form):
"""Form to start a new thread."""
title = StrippedCharField(min_length=5, max_length=255,
label=_lazy(u'Title:'),
widget=forms.TextInput(attrs={'size': 80}),
error_messages={'required': MSG_TITLE_REQUIRED,
'min_length': MSG_TITLE_SHORT,
'max_length': MSG_TITLE_LONG})
content = StrippedCharField(
label=_lazy(u'Content:'),
min_length=5,
max_length=10000,
widget=forms.Textarea(attrs={'rows': 30, 'cols': 76}),
error_messages={'required': MSG_CONTENT_REQUIRED,
'min_length': MSG_CONTENT_SHORT,
'max_length': MSG_CONTENT_LONG})
class EditThreadForm(forms.ModelForm):
"""Form to start a new thread."""
title = StrippedCharField(min_length=5, max_length=255,
label=_lazy(u'Title:'),
widget=forms.TextInput(attrs={'size': 80}),
error_messages={'required': MSG_TITLE_REQUIRED,
'min_length': MSG_TITLE_SHORT,
'max_length': MSG_TITLE_LONG})
class Meta:
model = Thread
fields = ('title',)
class EditPostForm(forms.Form):
"""Form to edit an existing post."""
content = StrippedCharField(
label=_lazy(u'Content:'),
min_length=5,
max_length=10000,
widget=forms.Textarea(attrs={'rows': 30, 'cols': 76}),
error_messages={'required': MSG_CONTENT_REQUIRED,
'min_length': MSG_CONTENT_SHORT,
'max_length': MSG_CONTENT_LONG})
class Meta:
model = Post
exclude = ('thread', 'author', 'updated', 'created', 'updated_by')
| bsd-3-clause | 3,205,294,624,544,372,000 | 40.588235 | 78 | 0.534936 | false |
tomjelinek/pcs | pcs_test/tier0/lib/cib/test_resource_operations.py | 3 | 17581 | from functools import partial
from unittest import mock, TestCase
from lxml import etree
from pcs_test.tools import fixture
from pcs_test.tools.assertions import assert_report_item_list_equal
from pcs_test.tools.custom_mock import MockLibraryReportProcessor
from pcs_test.tools.misc import create_patcher
from pcs.common.reports import codes as report_codes
from pcs.lib.cib.resource import operations
from pcs.common.reports import ReportItemSeverity as severities
from pcs.lib.validate import ValuePair
# pylint: disable=no-self-use
patch_operations = create_patcher("pcs.lib.cib.resource.operations")
@patch_operations("get_remaining_defaults")
@patch_operations("complete_all_intervals")
@patch_operations("validate_different_intervals")
@patch_operations("validate_operation_list")
@patch_operations("normalized_to_operations")
@patch_operations("operations_to_normalized")
class Prepare(TestCase):
def test_prepare(
self,
operations_to_normalized,
normalized_to_operations,
validate_operation_list,
validate_different_intervals,
complete_all_intervals,
get_remaining_defaults,
):
validate_operation_list.return_value = ["options_report"]
validate_different_intervals.return_value = [
"different_interval_report"
]
operations_to_normalized.return_value = [
{"name": ValuePair("Start", "start")},
{"name": ValuePair("Monitor", "monitor")},
]
normalized_to_operations.return_value = [
{"name": "start"},
{"name": "monitor"},
]
report_processor = mock.MagicMock()
report_processor.report_list.return_value = report_processor
report_processor.has_errors = False
raw_operation_list = [
{"name": "Start"},
{"name": "Monitor"},
]
default_operation_list = [
{"name": "stop"},
]
allowed_operation_name_list = ["start", "stop", "monitor"]
allow_invalid = True
operations.prepare(
report_processor,
raw_operation_list,
default_operation_list,
allowed_operation_name_list,
allow_invalid,
)
operations_to_normalized.assert_called_once_with(raw_operation_list)
normalized_to_operations.assert_called_once_with(
operations_to_normalized.return_value
)
validate_operation_list.assert_called_once_with(
operations_to_normalized.return_value,
allowed_operation_name_list,
allow_invalid,
)
validate_different_intervals.assert_called_once_with(
normalized_to_operations.return_value
)
complete_all_intervals.assert_called_once_with(
normalized_to_operations.return_value
)
get_remaining_defaults.assert_called_once_with(
report_processor,
normalized_to_operations.return_value,
default_operation_list,
)
report_processor.report_list.assert_called_once_with(
[
"options_report",
"different_interval_report",
]
)
class ValidateDifferentIntervals(TestCase):
def test_return_empty_reports_on_empty_list(self):
operations.validate_different_intervals([])
def test_return_empty_reports_on_operations_without_duplication(self):
operations.validate_different_intervals(
[
{"name": "monitor", "interval": "10s"},
{"name": "monitor", "interval": "5s"},
{"name": "start", "interval": "5s"},
]
)
def test_return_report_on_duplicated_intervals(self):
assert_report_item_list_equal(
operations.validate_different_intervals(
[
{"name": "monitor", "interval": "3600s"},
{"name": "monitor", "interval": "60m"},
{"name": "monitor", "interval": "1h"},
{"name": "monitor", "interval": "60s"},
{"name": "monitor", "interval": "1m"},
{"name": "monitor", "interval": "5s"},
]
),
[
(
severities.ERROR,
report_codes.RESOURCE_OPERATION_INTERVAL_DUPLICATION,
{
"duplications": {
"monitor": [
["3600s", "60m", "1h"],
["60s", "1m"],
],
},
},
)
],
)
class MakeUniqueIntervals(TestCase):
def setUp(self):
self.report_processor = MockLibraryReportProcessor()
self.run = partial(
operations.make_unique_intervals, self.report_processor
)
def test_return_copy_input_when_no_interval_duplication(self):
operation_list = [
{"name": "monitor", "interval": "10s"},
{"name": "monitor", "interval": "5s"},
{
"name": "monitor",
},
{"name": "monitor", "interval": ""},
{"name": "start", "interval": "5s"},
]
self.assertEqual(operation_list, self.run(operation_list))
def test_adopt_duplicit_values(self):
self.assertEqual(
self.run(
[
{"name": "monitor", "interval": "60s"},
{"name": "monitor", "interval": "1m"},
{"name": "monitor", "interval": "5s"},
{"name": "monitor", "interval": "6s"},
{"name": "monitor", "interval": "5s"},
{"name": "start", "interval": "5s"},
]
),
[
{"name": "monitor", "interval": "60s"},
{"name": "monitor", "interval": "61"},
{"name": "monitor", "interval": "5s"},
{"name": "monitor", "interval": "6s"},
{"name": "monitor", "interval": "7"},
{"name": "start", "interval": "5s"},
],
)
assert_report_item_list_equal(
self.report_processor.report_item_list,
[
(
severities.WARNING,
report_codes.RESOURCE_OPERATION_INTERVAL_ADAPTED,
{
"operation_name": "monitor",
"original_interval": "1m",
"adapted_interval": "61",
},
),
(
severities.WARNING,
report_codes.RESOURCE_OPERATION_INTERVAL_ADAPTED,
{
"operation_name": "monitor",
"original_interval": "5s",
"adapted_interval": "7",
},
),
],
)
def test_keep_duplicit_values_when_are_not_valid_interval(self):
self.assertEqual(
self.run(
[
{"name": "monitor", "interval": "some"},
{"name": "monitor", "interval": "some"},
]
),
[
{"name": "monitor", "interval": "some"},
{"name": "monitor", "interval": "some"},
],
)
class Normalize(TestCase):
def test_return_operation_with_the_same_values(self):
operation = {
"name": "monitor",
"role": "Master",
"timeout": "10",
}
self.assertEqual(
operation,
{
key: operations.normalize(key, value)
for key, value in operation.items()
},
)
def test_return_operation_with_normalized_values(self):
self.assertEqual(
{
"name": "monitor",
"role": "Master",
"timeout": "10",
"on-fail": "ignore",
"record-pending": "true",
"enabled": "1",
},
{
key: operations.normalize(key, value)
for key, value in {
"name": "monitor",
"role": "master",
"timeout": "10",
"on-fail": "Ignore",
"record-pending": "True",
"enabled": "1",
}.items()
},
)
class ValidateOperation(TestCase):
def assert_operation_produces_report(self, operation, report_list):
assert_report_item_list_equal(
operations.validate_operation_list(
[operation],
["monitor"],
),
report_list,
)
def test_return_empty_report_on_valid_operation(self):
self.assert_operation_produces_report(
{"name": "monitor", "role": "Master"}, []
)
def test_validate_all_individual_options(self):
self.assert_operation_produces_report(
{
"name": "monitro",
"role": "a",
"on-fail": "b",
"record-pending": "c",
"enabled": "d",
"id": "0",
"unknown": "invalid",
},
[
fixture.error(
report_codes.INVALID_OPTIONS,
option_names=["unknown"],
option_type="resource operation",
allowed=[
"OCF_CHECK_LEVEL",
"description",
"enabled",
"id",
"interval",
"interval-origin",
"name",
"on-fail",
"record-pending",
"role",
"start-delay",
"timeout",
],
allowed_patterns=[],
),
fixture.error(
report_codes.INVALID_OPTION_VALUE,
force_code=report_codes.FORCE_OPTIONS,
option_value="monitro",
option_name="operation name",
allowed_values=["monitor"],
cannot_be_empty=False,
forbidden_characters=None,
),
fixture.error(
report_codes.INVALID_OPTION_VALUE,
option_value="a",
option_name="role",
allowed_values=("Master", "Slave", "Started", "Stopped"),
cannot_be_empty=False,
forbidden_characters=None,
),
fixture.error(
report_codes.INVALID_OPTION_VALUE,
option_value="b",
option_name="on-fail",
allowed_values=[
"block",
"demote",
"fence",
"ignore",
"restart",
"restart-container",
"standby",
"stop",
],
cannot_be_empty=False,
forbidden_characters=None,
),
fixture.error(
report_codes.INVALID_OPTION_VALUE,
option_value="c",
option_name="record-pending",
allowed_values=["0", "1", "true", "false"],
cannot_be_empty=False,
forbidden_characters=None,
),
fixture.error(
report_codes.INVALID_OPTION_VALUE,
option_value="d",
option_name="enabled",
allowed_values=["0", "1", "true", "false"],
cannot_be_empty=False,
forbidden_characters=None,
),
fixture.error(
report_codes.INVALID_ID_BAD_CHAR,
id="0",
id_description="operation id",
is_first_char=True,
invalid_character="0",
),
],
)
def test_return_error_when_unknown_operation_attribute(self):
self.assert_operation_produces_report(
{
"name": "monitor",
"unknown": "invalid",
},
[
(
severities.ERROR,
report_codes.INVALID_OPTIONS,
{
"option_names": ["unknown"],
"option_type": "resource operation",
"allowed": sorted(operations.ATTRIBUTES),
"allowed_patterns": [],
},
None,
),
],
)
def test_return_errror_when_missing_key_name(self):
self.assert_operation_produces_report(
{"role": "Master"},
[
(
severities.ERROR,
report_codes.REQUIRED_OPTIONS_ARE_MISSING,
{
"option_names": ["name"],
"option_type": "resource operation",
},
None,
),
],
)
def test_return_error_when_both_interval_origin_and_start_delay(self):
self.assert_operation_produces_report(
{
"name": "monitor",
"interval-origin": "a",
"start-delay": "b",
},
[
(
severities.ERROR,
report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
{
"option_names": ["interval-origin", "start-delay"],
"option_type": "resource operation",
},
None,
),
],
)
def test_return_error_on_invalid_id(self):
self.assert_operation_produces_report(
{
"name": "monitor",
"id": "a#b",
},
[
(
severities.ERROR,
report_codes.INVALID_ID_BAD_CHAR,
{
"id": "a#b",
"id_description": "operation id",
"invalid_character": "#",
"is_first_char": False,
},
None,
),
],
)
class GetRemainingDefaults(TestCase):
@mock.patch("pcs.lib.cib.resource.operations.make_unique_intervals")
def test_returns_remining_operations(self, make_unique_intervals):
make_unique_intervals.side_effect = (
lambda report_processor, operations: operations
)
self.assertEqual(
operations.get_remaining_defaults(
report_processor=None,
operation_list=[{"name": "monitor"}],
default_operation_list=[{"name": "monitor"}, {"name": "start"}],
),
[{"name": "start"}],
)
class GetResourceOperations(TestCase):
resource_el = etree.fromstring(
"""
<primitive class="ocf" id="dummy" provider="pacemaker" type="Stateful">
<operations>
<op id="dummy-start" interval="0s" name="start" timeout="20"/>
<op id="dummy-stop" interval="0s" name="stop" timeout="20"/>
<op id="dummy-monitor-m" interval="10" name="monitor"
role="Master" timeout="20"/>
<op id="dummy-monitor-s" interval="11" name="monitor"
role="Slave" timeout="20"/>
</operations>
</primitive>
"""
)
resource_noop_el = etree.fromstring(
"""
<primitive class="ocf" id="dummy" provider="pacemaker" type="Stateful">
</primitive>
"""
)
def assert_op_list(self, op_list, expected_ids):
self.assertEqual([op.attrib.get("id") for op in op_list], expected_ids)
def test_all_operations(self):
self.assert_op_list(
operations.get_resource_operations(self.resource_el),
["dummy-start", "dummy-stop", "dummy-monitor-m", "dummy-monitor-s"],
)
def test_filter_operations(self):
self.assert_op_list(
operations.get_resource_operations(self.resource_el, ["start"]),
["dummy-start"],
)
def test_filter_more_operations(self):
self.assert_op_list(
operations.get_resource_operations(
self.resource_el, ["monitor", "stop"]
),
["dummy-stop", "dummy-monitor-m", "dummy-monitor-s"],
)
def test_filter_none(self):
self.assert_op_list(
operations.get_resource_operations(self.resource_el, ["promote"]),
[],
)
def test_no_operations(self):
self.assert_op_list(
operations.get_resource_operations(self.resource_noop_el), []
)
| gpl-2.0 | -154,898,943,181,782,850 | 33.005803 | 80 | 0.451567 | false |
jostep/tensorflow | tensorflow/python/framework/tensor_util_test.py | 7 | 32860 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for tensor_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TensorUtilTest(test.TestCase):
def testFloat(self):
value = 10.0
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape {}
float_val: %.1f
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array(value, dtype=np.float32), a)
def testFloatN(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0])
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTyped(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerce(self):
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerceNdarray(self):
arr = np.asarray([10, 20, 30], dtype="int")
t = tensor_util.make_tensor_proto(arr, dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatSizes(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[1, 3])
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float32), a)
def testFloatSizes2(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[3, 1])
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0], [20.0], [30.0]], dtype=np.float32), a)
def testFloatSizesLessValues(self):
t = tensor_util.make_tensor_proto(10.0, shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
float_val: 10.0
""", t)
# No conversion to Ndarray for this one: not enough values.
def testFloatNpArrayFloat64(self):
t = tensor_util.make_tensor_proto(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64))
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "@$\000\000\000\000\000\000@4\000\000\000\000\000\000@>\000\000\000\000\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000$@\000\000\000\000\000\0004@\000\000\000\000\000\000>@"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float64, a.dtype)
self.assertAllClose(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64),
tensor_util.MakeNdarray(t))
def testFloatTypesWithImplicitRepeat(self):
for dtype, nptype in [(dtypes.float32, np.float32),
(dtypes.float64, np.float64)]:
t = tensor_util.make_tensor_proto([10.0], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0]],
dtype=nptype),
a)
def testHalf(self):
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=np.float16))
self.assertProtoEquals("""
dtype: DT_HALF
tensor_shape {
dim {
size: 2
}
}
half_val: 18688
half_val: 19712
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float16, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=np.float16), a)
def testInt(self):
t = tensor_util.make_tensor_proto(10)
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape {}
int_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int32), a)
def testLargeInt(self):
value = np.iinfo(np.int64).max
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testLargeNegativeInt(self):
# We don't use the min np.int64 value here
# because it breaks np.abs().
#
# np.iinfo(np.int64).min = -9223372036854775808
# np.iinfo(np.int64).max = 9223372036854775807
# np.abs(-9223372036854775808) = -9223372036854775808
value = np.iinfo(np.int64).min + 1
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\000\000\000\\n\000\000\000\024\000\000\000\036\000\000\000("
""", t)
else:
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array([[10, 20], [30, 40]], dtype=np.int32), a)
def testIntTypes(self):
for dtype, nptype in [(dtypes.int32, np.int32),
(dtypes.uint8, np.uint8),
(dtypes.uint16, np.uint16),
(dtypes.int16, np.int16),
(dtypes.int8, np.int8)]:
# Test with array.
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
# Test with ndarray.
t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testIntTypesWithImplicitRepeat(self):
for dtype, nptype in [(dtypes.int64, np.int64),
(dtypes.int32, np.int32),
(dtypes.uint8, np.uint8),
(dtypes.uint16, np.uint16),
(dtypes.int16, np.int16),
(dtypes.int8, np.int8)]:
self.assertAllEqual(
np.array(
[[10, 10, 10, 10],
[10, 10, 10, 10],
[10, 10, 10, 10]],
dtype=nptype),
tensor_util.MakeNdarray(
tensor_util.make_tensor_proto(
[10],
shape=[3, 4],
dtype=dtype)))
def testLong(self):
t = tensor_util.make_tensor_proto(10, dtype=dtypes.int64)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int64), a)
def testLongN(self):
t = tensor_util.make_tensor_proto(
[10, 20, 30], shape=[1, 3], dtype=dtypes.int64)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([[10, 20, 30]], dtype=np.int64), a)
def testLongNpArray(self):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30]))
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a)
def testQuantizedTypes(self):
# Test with array.
data = [(21,), (22,), (23,)]
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint32)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\025\000\000\000\026\000\000\000\027"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\000\000\026\000\000\000\027\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint32.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint8)
self.assertProtoEquals("""
dtype: DT_QUINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.quint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint8)
self.assertProtoEquals("""
dtype: DT_QINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint16)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.quint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint16)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
def testString(self):
t = tensor_util.make_tensor_proto("foo")
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape {}
string_val: "foo"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertEquals([b"foo"], a)
def testStringWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto("f", shape=[3, 4])
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(np.array([[b"f"] * 4] * 3, dtype=np.object), a)
def testStringN(self):
t = tensor_util.make_tensor_proto([b"foo", b"bar", b"baz"], shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringNpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[b"a", b"ab"], [b"abc", b"abcd"]]))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
def testStringTuple(self):
t = tensor_util.make_tensor_proto((b"a", b"ab", b"abc", b"abcd"))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 4 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array((b"a", b"ab", b"abc", b"abcd")), a)
def testStringNestedTuple(self):
t = tensor_util.make_tensor_proto(((b"a", b"ab"), (b"abc", b"abcd")))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array(((b"a", b"ab"), (b"abc", b"abcd"))), a)
def testComplex64(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape {}
scomplex_val: 1
scomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplex128(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape {}
dcomplex_val: 1
dcomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplexWithImplicitRepeat(self):
for dtype, np_dtype in [(dtypes.complex64, np.complex64),
(dtypes.complex128, np.complex128)]:
t = tensor_util.make_tensor_proto((1 + 1j), shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)]],
dtype=np_dtype),
a)
def testComplex64N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 1 } dim { size: 3 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex128N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 1 } dim { size: 3 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex64NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex64)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 2 } dim { size: 2 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
scomplex_val: 7
scomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testComplex128NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex128)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 2 } dim { size: 2 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
dcomplex_val: 7
dcomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testUnsupportedDTypes(self):
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(np.array([1]), 0)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(3, dtype=dtypes.qint8)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto([3], dtype=dtypes.qint8)
# Validate the helpful error message when trying to convert an
# unconvertible list as strings.
with self.assertRaisesRegexp(TypeError, "Failed to convert object"):
tensor_util.make_tensor_proto([tensor_shape.Dimension(1)])
def testTensorShapeVerification(self):
array = np.array([[1], [2]])
correct_shape = (2, 1)
incorrect_shape = (1, 2)
tensor_util.make_tensor_proto(array, shape=correct_shape, verify_shape=True)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(
array, shape=incorrect_shape, verify_shape=True)
def testShapeTooLarge(self):
with self.assertRaises(ValueError):
tensor_util.make_tensor_proto(np.array([1, 2]), shape=[1])
def testLowRankSupported(self):
t = tensor_util.make_tensor_proto(np.array(7))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 7
""", t)
def testShapeEquals(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertTrue(tensor_util.ShapeEquals(t, [2, 2]))
self.assertTrue(tensor_util.ShapeEquals(t, (2, 2)))
self.assertTrue(
tensor_util.ShapeEquals(t, tensor_shape.as_shape([2, 2]).as_proto()))
self.assertFalse(tensor_util.ShapeEquals(t, [5, 3]))
self.assertFalse(tensor_util.ShapeEquals(t, [1, 4]))
self.assertFalse(tensor_util.ShapeEquals(t, [4]))
def testMockArray(self):
class MockArray(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return np.asarray(self.array, dtype)
with self.test_session() as sess:
ma = MockArray(np.array([10, 20, 30]))
t = ops.convert_to_tensor(ma)
a = sess.run(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a)
class ConstantValueTest(test.TestCase):
def testConstant(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
def testUnknown(self):
tf_val = gen_state_ops._variable(
shape=[3, 4, 7],
dtype=dtypes.float32,
name="tf_val",
container="",
shared_name="")
self.assertIs(None, tensor_util.constant_value(tf_val))
def testShape(self):
np_val = np.array([1, 2, 3], dtype=np.int32)
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.int32, c_val.dtype)
def testFill(self):
np_val = np.array([-1, -1, -1], dtype=np.float32)
tf_val = array_ops.fill([3], constant_op.constant(-1.0))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.float32, c_val.dtype)
def testSize(self):
tf_val = array_ops.size(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(6, c_val)
def testSizeOfScalar(self):
tf_val = array_ops.size(constant_op.constant(0.0))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(1, c_val)
self.assertEqual(np.ndarray, type(c_val))
def testRank(self):
tf_val = array_ops.rank(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
# Repeat test using array_ops.rank_internal to avoid the optimization that
# happens in the rank function.
tf_val = array_ops.rank_internal(
constant_op.constant(
0.0, shape=[1, 2, 3]), optimize=False)
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
self.assertEqual([3], c_val)
def testCast(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
def testConcat(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = array_ops.concat(
[np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]], 0)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = array_ops.concat(
[np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]],
array_ops.placeholder(dtypes.int32))
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
tf_val = array_ops.concat([
np_val[0, :, :], array_ops.placeholder(dtypes.float32), np_val[2, :, :]
], 1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
def testPack(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
np_val = np.array(inputs)
tf_val = array_ops.stack(inputs)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = array_ops.stack(
[inputs[0], array_ops.placeholder(dtypes.float32), inputs[2]])
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
def testPack_Partial(self):
input_ = np.random.rand(4, 7)
tf_val = array_ops.stack([input_, array_ops.placeholder(dtypes.float32)])
c_val = tensor_util.constant_value(tf_val, partial=True)
self.assertAllClose(input_, c_val[0])
self.assertIsNone(c_val[1])
class ConstantValueAsShapeTest(test.TestCase):
def testConstant(self):
np_val = np.random.rand(3).astype(np.int32)
tf_val = constant_op.constant(np_val)
self.assertEqual(
tensor_shape.TensorShape(np_val),
tensor_util.constant_value_as_shape(tf_val))
tf_val = constant_op.constant([], dtype=dtypes.int32)
self.assertEqual(
tensor_shape.TensorShape([]),
tensor_util.constant_value_as_shape(tf_val))
def testShape(self):
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tensor_shape.TensorShape([1, 2, 3]), c_val)
def testPack(self):
tf_val = array_ops.stack(
[constant_op.constant(16), 37, array_ops.placeholder(dtypes.int32)])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None], c_val.as_list())
def testConcat(self):
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(2,))], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, None], c_val.as_list())
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
def testSlice(self):
tf_val = array_ops.placeholder(dtypes.int32, shape=(4,))[0:2]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, None], c_val.as_list())
# begin:end
tf_val = constant_op.constant([10, 20, 30])[1:3]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([20, 30], c_val.as_list())
# begin:end:stride
tf_val = array_ops.strided_slice(
constant_op.constant([10, 20, 30]), [1], [3], strides=[2])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([20], c_val.as_list())
# [1, 2, 16, 37, None, 48]
tf_val_orig = array_ops.concat(
[[1, 2, 16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
# begin: no end
tf_val = tf_val_orig[2:]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
# begin::negative slice
tf_val = tf_val_orig[2::-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 2, 1], c_val.as_list())
# :end:negative slice
tf_val = tf_val_orig[:1:-2]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([48, 37], c_val.as_list())
# begin:end:negative slice
tf_val = tf_val_orig[3:1:-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16], c_val.as_list())
# begin:negative end:slice
tf_val = tf_val_orig[1:-3:1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([2, 16], c_val.as_list())
# negative begin::slice
tf_val = tf_val_orig[-3::1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, None, 48], c_val.as_list())
# negative begin::negative slice
tf_val = tf_val_orig[-3::-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16, 2, 1], c_val.as_list())
# negative begin:negative end:negative slice
tf_val = tf_val_orig[-3:-5:-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16], c_val.as_list())
# Do not support shape inference for additional arguments
tf_val = constant_op.constant([10, 20, 30])[...]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, None, None], c_val.as_list())
# Do not support shape inference for tensor slices.
tf_val = constant_op.constant([10, 20, 30])[
array_ops.placeholder(dtypes.int32, shape=()):]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tensor_shape.unknown_shape(), c_val)
# Do not support shape inference for higher rank
with self.assertRaises(ValueError):
tf_val = constant_op.constant([[10], [20], [30]])[:, 0:]
c_val = tensor_util.constant_value_as_shape(tf_val)
if __name__ == "__main__":
test.main()
| apache-2.0 | 8,769,789,769,079,617,000 | 34.67861 | 123 | 0.601795 | false |
dklyle/trove-dashboard | trove_dashboard/content/database_backups/views.py | 2 | 4329 | # Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for displaying database backups.
"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables as horizon_tables
from horizon.utils import filters
from horizon import views as horizon_views
from horizon import workflows as horizon_workflows
from trove_dashboard import api
from trove_dashboard.content.database_backups import tables
from trove_dashboard.content.database_backups \
import workflows
class IndexView(horizon_tables.DataTableView):
table_class = tables.BackupsTable
template_name = 'project/database_backups/index.html'
page_title = _("Backups")
def _get_extra_data(self, backup):
"""Apply extra info to the backup."""
instance_id = backup.instance_id
# TODO(rdopieralski) It's not clear where this attribute is supposed
# to come from. At first glance it looks like it will always be {}.
if not hasattr(self, '_instances'):
self._instances = {}
instance = self._instances.get(instance_id)
if instance is None:
try:
instance = api.trove.instance_get(self.request, instance_id)
except Exception:
instance = _('Not Found')
backup.instance = instance
return backup
def get_data(self):
# TODO(rmyers) Add pagination support after it is available
# https://blueprints.launchpad.net/trove/+spec/paginate-backup-list
try:
backups = api.trove.backup_list(self.request)
backups = map(self._get_extra_data, backups)
except Exception:
backups = []
msg = _('Error getting database backup list.')
exceptions.handle(self.request, msg)
return backups
class BackupView(horizon_workflows.WorkflowView):
workflow_class = workflows.CreateBackup
template_name = "project/database_backups/backup.html"
page_title = _("Backup Database")
def get_context_data(self, **kwargs):
context = super(BackupView, self).get_context_data(**kwargs)
context["instance_id"] = kwargs.get("instance_id")
self._instance = context['instance_id']
return context
class DetailView(horizon_views.APIView):
template_name = "project/database_backups/details.html"
page_title = _("Backup Details: {{ backup.name }}")
def get_data(self, request, context, *args, **kwargs):
backup_id = kwargs.get("backup_id")
try:
backup = api.trove.backup_get(request, backup_id)
created_at = filters.parse_isotime(backup.created)
updated_at = filters.parse_isotime(backup.updated)
backup.duration = updated_at - created_at
except Exception:
redirect = reverse('horizon:project:database_backups:index')
msg = _('Unable to retrieve details for backup: %s') % backup_id
exceptions.handle(self.request, msg, redirect=redirect)
try:
if(hasattr(backup, 'parent_id') and backup.parent_id is not None):
backup.parent = api.trove.backup_get(request, backup.parent_id)
except Exception:
redirect = reverse('horizon:project:database_backups:index')
msg = (_('Unable to retrieve details for parent backup: %s')
% backup.parent_id)
exceptions.handle(self.request, msg, redirect=redirect)
try:
instance = api.trove.instance_get(request, backup.instance_id)
except Exception:
instance = None
context['backup'] = backup
context['instance'] = instance
return context
| apache-2.0 | -4,265,234,797,580,215,300 | 38.354545 | 79 | 0.657889 | false |
showerst/openstates | openstates/or/legislators.py | 1 | 4537 | from billy.scrape.legislators import LegislatorScraper, Legislator
from openstates.utils import LXMLMixin
import re
def itergraphs(elements, break_):
buf = []
for element in elements:
if element.tag == break_:
yield buf
buf = []
continue
buf.append(element)
if buf:
yield buf
class ORLegislatorScraper(LegislatorScraper, LXMLMixin):
jurisdiction = 'or'
URLs = {
"lower": "http://www.oregonlegislature.gov/house/Pages/RepresentativesAll.aspx",
"upper": "http://www.oregonlegislature.gov/senate/Pages/SenatorsAll.aspx",
}
def scrape(self, chamber, term):
url = self.URLs[chamber]
page = self.lxmlize(url)
for block in page.xpath("//div[@class='ms-rtestate-field']")[1:-1]:
# Each legislator block.
photo_block = block.xpath("ancestor::td/preceding-sibling::td")
if len(photo_block) == 0:
continue
h2s = block.xpath(".//h2/a")
if len(h2s) != 1:
# We've got a Vacant person.
print("Found a Vacant position. Skipping block.")
continue
h2, = h2s
# Need to remove weird Unicode spaces from their names
name = " ".join(h2.text.split())
name = re.sub(r'^\W?(Senator|Representative)\W?(?=[A-Z])', "", name)
photo_block, = photo_block
# (The <td> before ours was the photo)
img, = photo_block.xpath("*")
img = img.attrib['src']
info = {}
# Right, now let's get info out of their little profile box.
for entry in block.xpath(".//p"):
key = None
for kvpair in itergraphs(entry.xpath("./*"), 'br'):
# OK. We either get the tail or the next element
# (usually an <a> tag)
if len(kvpair) == 1:
key, = kvpair
value = key.tail.strip() if key.tail else None
if value:
value = re.sub("\s+", " ", value).strip()
elif len(kvpair) == 2:
key, value = kvpair
if value.text_content().strip() == "arty:":
key = value
value = value.tail
elif len(kvpair) == 3:
k1, k2, value = kvpair
# As seen with a <stong><strong>Email:</strong></strong>
t = lambda x: x.text_content().strip()
assert t(k1) == "" or t(k2) == ""
if t(k1) != "":
key = k1
else:
key = k2
else:
# Never seen text + an <a> tag, perhaps this can happen.
raise ValueError("Too many elements. Something changed")
key = key.text_content().strip(" :")
if value is None:
# A page has the value in a <strong> tag. D'oh.
key, value = (x.strip() for x in key.rsplit(":", 1))
key = re.sub("\s+", " ", key).strip()
key = key.replace(":", "")
if key == "arty":
key = "Party"
info[key] = value
info['District'] = info['District'].encode(
'ascii', 'ignore').strip()
info['Party'] = info['Party'].strip(": ").replace(u"\u00a0","")
leg = Legislator(term=term,
url=h2.attrib['href'],
chamber=chamber,
full_name=name,
party=info['Party'],
district=info['District'],
photo_url=img)
leg.add_source(url)
phone = info.get('Capitol Phone', info.get('apitol Phone'))
if hasattr(phone, 'text_content'):
phone = phone.text_content()
leg.add_office(type='capitol',
name='Capitol Office',
address=info['Capitol Address'],
phone=phone,
email=info['Email'].attrib['href'].replace("mailto:",""))
self.save_legislator(leg)
| gpl-3.0 | 4,962,277,494,263,386,000 | 37.12605 | 88 | 0.438836 | false |
schreiberx/sweet | tests/50_test_sphere_sph_solver_real_and_complex/test.py | 1 | 1312 | #! /usr/bin/env python3
import sys
import os
os.chdir(os.path.dirname(sys.argv[0]))
from mule_local.JobMule import *
from itertools import product
from mule.exec_program import *
exec_program('mule.benchmark.cleanup_all', catch_output=False)
jg = JobGeneration()
jg.compile.unit_test="test_sphere_sph_solver_real_and_complex"
jg.compile.plane_spectral_space="disable"
jg.compile.sphere_spectral_space="enable"
jg.compile.mode = "release"
jg.runtime.sphere_radius = 1
jg.runtime.sphere_rotating_coriolis_omega = 1
unique_id_filter = []
unique_id_filter.append('compile')
jg.unique_id_filter = unique_id_filter
#params_runtime_mode_res = [64, 128, 256, 512, 1024, 2048]
params_runtime_mode_res = [64, 128, 256, 512, 1024]
params_runtime_r = [1, 1e3, 1e6]
params_runtime_f = [1, 1e-3, 1e-6]
jg.runtime.verbosity = 5
for (
jg.runtime.space_res_spectral,
jg.runtime.sphere_radius,
jg.runtime.sphere_rotating_coriolis_omega,
) in product(
params_runtime_mode_res,
params_runtime_r,
params_runtime_f,
):
jg.gen_jobscript_directory()
exitcode = exec_program('mule.benchmark.jobs_run_directly', catch_output=False)
if exitcode != 0:
sys.exit(exitcode)
print("Benchmarks successfully finished")
exec_program('mule.benchmark.cleanup_all', catch_output=False)
| mit | 4,791,248,669,977,047,000 | 22.428571 | 79 | 0.720274 | false |
caelan/pyplanners | retired/planners/connectivity_graph/relaxed_connectivity_graph.py | 1 | 12764 | from .connectivity_graph import *
from misc.utils import *
from .utils import PRUNE_INACTIVE, CYCLE_INACTIVE, UPDATE_INACTIVE
# TODO
# - make the levels based on subproblem switches (use problem_reachable_sources). Don't count things moved by the same controller
# - flag to terminate costs/levels when the goals are reached (sometimes wouldn't want that though?)
# - support dynamically updating costs. Currently, my compute_costs/levels method assumes costs are INF
# - move these to another file to compute the costs generically?
# - cost per instance of the variable returned as a function of each independent problem?
# TODO
# - active edge stuff
# Could include reachability checks that don't propagate to things that go through the goal (but what's really the point)
# TODO - call delete-relaxation graph
# Make related plan graph extend delete relation graph
# Incorporate set-additive
class RelaxedConnectivityGraph(ConnectivityGraph):
# Need call this when the goal starts to open problems on the queue
# Start opening problems for actions as soon as they have all intermediates reachable
# Just the connected parts of the connector to do opening of problem when solving things and stuff
# TODO - is it easier to deactivate by searching from node to or searching from to node
# TODO - Mark attempted edges to discount for growing stuff
# TODO - max depth of the search?
def grow(self, start, max_time=INF, max_iterations=INF, max_cycles=INF, max_generations=INF, greedy=True):
#if max_time == INF and max_iterations == INF and max_cycles == INF and not greedy:
# raise RuntimeError('RelaxedPlanGraph.grow() will never terminate')
start_vertices, start_connectors, start_edges = len(self.vertices), len(self.connectors), len(self.edges)
start_time, iterations, cycles = time(), 0, 0
self.start = start
self.greedy = greedy
self.queue = deque([None])
for connector in self.connectors.values(): connector.reset()
for edge in self.edges.values(): edge.reset()
for vertex in self.vertices.values():
vertex.reset()
if self.start.includes(vertex.substate):
vertex.initial = True
#print(start, '\n')
for vertex in self.initial_vertices():
vertex.set_reachable()
#print(self.queue)
#print([c for c in self.connectors.values() if c.active], '\n')
self.goal.set_active()
#print(self.queue)
#print([c for c in self.connectors.values() if c.active], '\n')
#self.graph('start.pdf', reachable=False)
while time() - start_time <= max_time and iterations <= max_iterations and cycles <= max_cycles:
if self.greedy and self.goal.reachable: break
subplanner = self.queue.popleft()
if subplanner is None: # Dummy node to count the number of cycles
cycles += 1
if len(self.queue) == 0: break
self.queue.append(None)
continue
if not subplanner.queued or subplanner.generation_history[self.state_uses[start]] >= max_generations: continue # NOTE - treating queued as whether it should be in the queue
subplanner.queued = False
#print(subplanner.goal_connector.active, self.is_connector_active(subplanner.goal_connector))
if UPDATE_INACTIVE: self.update_active()
if CYCLE_INACTIVE: self.is_connector_active(subplanner.goal_connector)
if not subplanner.goal_connector.active: continue
iterations += 1 # Iterations of planner processing
#print(subplanner.goal_connector#, [e for e in subplanner.goal_connector.edges if e.active])
#raw_input()
#temp_vertices, temp_connectors, temp_edges = len(self.vertices), len(self.connectors), len(self.edges)
subplanner()
#print(len(self.vertices) - temp_vertices, len(self.connectors) - temp_connectors, len(self.edges) - temp_edges)
subplanner.generation_history[self.state_uses[start]] += 1 # NOTE - This should also be updated on the first call
if subplanner.exhausted:
# TODO - something special if the problem is exhausted
continue
if subplanner.goal_connector.active and not subplanner.queued:
self.queue.append(subplanner); subplanner.queued = True
#nodes = self.vertices.values() + self.edges.values() + self.connectors.values()
#for n in nodes: if n.active: print n,
#print
#print sum(n.active for n in nodes), '/', len(nodes)
#print 'Goal reachable:', self.goal.reachable
#print 'Goal active:', self.goal.active
#self.graph('end.pdf', reachable=False)
#raw_input()
#print
#print 'Done'
#print len(self.vertices) - start_vertices, len(self.connectors) - start_connectors, len(self.edges) - start_edges
#print iterations, cycles
reachable = self.goal.reachable
exhausted = len(self.queue) == 0
self.state_uses[start] += 1
self.total_time += time() - start_time; self.total_iterations += iterations; self.total_cycles += cycles
return reachable, exhausted
###########################################################################
# TODO - costs dependent on mode switches
def costs(self, op=max, unit=False, greedy=True):
self.op, self.unit = op, unit
for node in self.vertices.values() + self.edges.values() + self.connectors.values():
node.cost = INF; node.level = INF
for edge in self.edges.values():
edge.num_finite_cost = 0
queue = []
for vertex in self.initial_vertices():
vertex.cost, vertex.level = 0, 0
heappush(queue, (vertex.cost, vertex))
processed = set()
while len(queue) != 0:
_, vertex = heappop(queue)
if vertex in processed: continue
processed.add(vertex)
for sink_vertex, edge in vertex.sinks: # Sink Vertex
if edge.cost == INF: continue
new_cost = op(vertex.cost, edge.cost) + (edge.value.cost if not unit else 1)
if new_cost < edge.cost:
sink_vertex.cost, sink_vertex.level = new_cost, max(vertex.level, edge.level) + 1
heappush(queue, (sink_vertex.cost, sink_vertex))
for connector in vertex.connectors: # Connector
if connector.cost == INF:
connector.cost, connector.level = vertex.cost, vertex.level
for edge in connector.edges: # Edge
edge.num_finite_cost += 1
if edge.num_finite_cost == len(edge.value.conditions):
edge.cost, edge.level = op(c.cost for c in edge.connectors), max(c.level for c in edge.connectors)
if greedy and edge == self.goal: return
for source_vertex, sink_vertex in edge.mappings: # Vertex
source_cost, source_level = edge.cost, edge.level
if source_vertex is not None:
if source_vertex not in processed: continue
source_cost, source_level = op(source_cost, source_vertex.cost), max(source_vertex.level, source_level)
new_cost = source_cost + (edge.value.cost if not unit else 1)
if new_cost < sink_vertex.cost:
sink_vertex.cost, sink_vertex.level = new_cost, source_level + 1
heappush(queue, (sink_vertex.cost, sink_vertex))
###########################################################################
#def direct_vertex_achievers(self, vertex, l_fn):
# return filter(lambda (v, e): l_fn(e) == l_fn(vertex)-1 and (v is None or l_fn(v)==l_fn(vertex)-1), vertex.sources) # TODO - fix
def all_vertex_achievers(self, vertex, l_fn):
return list(filter(lambda item: l_fn(item[1]) < l_fn(vertex) and (item[0] is None or l_fn(item[0]) < l_fn(vertex)), vertex.sources))
def all_connector_achievers(self, connector, l_fn):
return list(filter(lambda v: l_fn(v) <= l_fn(connector), connector.vertices))
def discounted_vertex_cost(self, vertex, l_fn, h_fn):
if vertex in self.relaxed_plan_vertices[l_fn(vertex)]: return 0
return h_fn(vertex)
def discounted_edge_cost(self, edge, l_fn, h_fn, op=sum): # TODO - get rid of op
return op(min(self.discounted_vertex_cost(v, l_fn, h_fn) for v in self.all_connector_achievers(c, l_fn)) for c in edge.connectors)
def easiest_edge(self, vertex, l_fn, h_fn): # TODO - factor in v when computing the cost and level
return argmin(lambda item: self.discounted_edge_cost(item[1], l_fn, h_fn), self.all_vertex_achievers(vertex, l_fn))
def easiest_vertex(self, connector, l_fn, h_fn):
return argmin(lambda v: self.discounted_vertex_cost(v, l_fn, h_fn), self.all_connector_achievers(connector, l_fn))
def random_layer(self, layer): return randomize(layer)
def easy_layer(self, layer, h_fn): return sorted(layer, key=h_fn)
def linearize_plan(self, l_fn=lambda n: n.level, h_fn=lambda n: n.cost):
self.relaxed_plan_vertices = [set() for _ in range(l_fn(self.goal) + 1)]
self.relaxed_plan_edges = [set() for _ in range(l_fn(self.goal))]
for new_vertex in (self.easiest_vertex(c, l_fn, h_fn) for c in self.goal.connectors):
self.relaxed_plan_vertices[l_fn(new_vertex)].add(new_vertex)
marked_vertices = [set() for _ in range(l_fn(self.goal) + 1)]
for level in reversed(range(1, len(self.relaxed_plan_vertices))):
for vertex in self.easy_layer(self.relaxed_plan_vertices[level] - marked_vertices[level], h_fn):
source, edge = self.easiest_edge(vertex, l_fn, h_fn)
self.relaxed_plan_edges[l_fn(edge)].add(edge)
for new_vertex in [self.easiest_vertex(c, l_fn, h_fn) for c in edge.connectors] + ([source] if source is not None else []):
if new_vertex not in marked_vertices[level-1]:
self.relaxed_plan_vertices[l_fn(new_vertex)].add(new_vertex)
for _, sink_vertex in edge.mappings:
marked_vertices[level].add(sink_vertex)
marked_vertices[level-1].add(sink_vertex) # Assumes that actions are sequenced in the order they are selected
# NOTE - achievers_dag does this recursively
#def linearize_helpful(self, l_fn=lambda n: n.level):
# self.helpful_vertices = [set() for _ in range(l_fn(self.goal) + 1)]
# self.helpful_edges = [set() for _ in range(l_fn(self.goal))]
#
# self.vertex_arcs = defaultdict(set)
# self.edge_arcs = defaultdict(set)
# for c in self.goal.connectors:
# for v in self.all_connector_achievers(c, l_fn):
# self.helpful_vertices[l_fn(v)].add(v)
# self.vertex_arcs[v].add(self.goal)
#
# for level in reversed(range(1, len(self.helpful_vertices))):
# for vertex in self.helpful_vertices[level]:
# for source, edge in self.all_vertex_achievers(vertex, l_fn):
# self.helpful_edges[l_fn(edge)].add(edge)
# self.edge_arcs[edge].add(vertex)
# for c in edge.connectors:
# for v in self.all_connector_achievers(c, l_fn):
# self.helpful_vertices[l_fn(v)].add(v)
# self.vertex_arcs[v].add(edge)
# #if source is not None:
# # self.helpful_vertices[l_fn(source)].add(source)
###########################################################################
def graph_rpg(self, filename, reachable=True):
from pygraphviz import AGraph # NOTE - LIS machines do not have pygraphviz
graph = AGraph(strict=True, directed=True)
for vertex in self.vertices.values():
for connector in vertex.connectors:
if vertex.level <= connector.level and (not reachable or (vertex.reachable and connector.reachable)):
graphviz_connect(graph, vertex, connector)
for connector in self.connectors.values():
for edge in connector.edges:
if connector.level <= edge.level and (not reachable or (connector.reachable and edge.reachable)):
graphviz_connect(graph, connector, edge)
for edge in self.edges.values():
for _, sink_vertex in edge.mappings:
if edge.level <= sink_vertex.level and (not reachable or (edge.reachable and sink_vertex.reachable)):
graphviz_connect(graph, edge, sink_vertex)
graph.draw(filename, prog='dot')
def str_rpg(self):
vertex_levels = defaultdict(set)
for vertex in self.vertices.values():
vertex_levels[vertex.level].add(vertex)
connector_levels = defaultdict(set)
for connector in self.connectors.values():
connector_levels[connector.level].add(connector)
edge_levels = defaultdict(set)
for edge in self.edges.values():
edge_levels[edge.level].add(edge)
s = self.__class__.__name__
for level in sorted(set(vertex_levels.keys() + connector_levels.keys() + edge_levels.keys())):
s += '\n\n---Level ' + str(level) + '---\n' + \
'Vertices: ' + str_object(vertex_levels.get(level, [])) + '\n' + \
'Connectors: ' + str_object(connector_levels.get(level, [])) + '\n' + \
'Edges: ' + str_object(edge_levels.get(level, []))
return s
| mit | -6,384,865,114,852,407,000 | 48.092308 | 178 | 0.655672 | false |
kxliugang/edx-platform | lms/djangoapps/teams/views.py | 8 | 49796 | """HTTP endpoints for the Teams API."""
import logging
from django.shortcuts import get_object_or_404, render_to_response
from django.http import Http404
from django.conf import settings
from django.core.paginator import Paginator
from django.views.generic.base import View
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework.authentication import (
SessionAuthentication,
OAuth2Authentication
)
from rest_framework import status
from rest_framework import permissions
from django.db.models import Count
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
from django_countries import countries
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from openedx.core.lib.api.parsers import MergePatchParser
from openedx.core.lib.api.permissions import IsStaffOrReadOnly
from openedx.core.lib.api.view_utils import (
RetrievePatchAPIView,
add_serializer_errors,
build_api_error,
ExpandableFieldViewMixin
)
from openedx.core.lib.api.serializers import PaginationSerializer
from openedx.core.lib.api.paginators import paginate_search_results
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from courseware.courses import get_course_with_access, has_access
from student.models import CourseEnrollment, CourseAccessRole
from student.roles import CourseStaffRole
from django_comment_client.utils import has_discussion_privileges
from teams import is_feature_enabled
from util.model_utils import truncate_fields
from .models import CourseTeam, CourseTeamMembership
from .serializers import (
CourseTeamSerializer,
CourseTeamCreationSerializer,
TopicSerializer,
PaginatedTopicSerializer,
BulkTeamCountPaginatedTopicSerializer,
MembershipSerializer,
PaginatedMembershipSerializer,
add_team_count
)
from .search_indexes import CourseTeamIndexer
from .errors import AlreadyOnTeamInCourse, ElasticSearchConnectionError, NotEnrolledInCourseForTeam
from .utils import emit_team_event
TEAM_MEMBERSHIPS_PER_PAGE = 2
TOPICS_PER_PAGE = 12
MAXIMUM_SEARCH_SIZE = 100000
log = logging.getLogger(__name__)
@receiver(post_save, sender=CourseTeam)
def team_post_save_callback(sender, instance, **kwargs): # pylint: disable=unused-argument
""" Emits signal after the team is saved. """
changed_fields = instance.field_tracker.changed()
# Don't emit events when we are first creating the team.
if not kwargs['created']:
for field in changed_fields:
if field not in instance.FIELD_BLACKLIST:
truncated_fields = truncate_fields(unicode(changed_fields[field]), unicode(getattr(instance, field)))
truncated_fields['team_id'] = instance.team_id
truncated_fields['field'] = field
emit_team_event(
'edx.team.changed',
instance.course_id,
truncated_fields
)
class TeamsDashboardView(View):
"""
View methods related to the teams dashboard.
"""
def get(self, request, course_id):
"""
Renders the teams dashboard, which is shown on the "Teams" tab.
Raises a 404 if the course specified by course_id does not exist, the
user is not registered for the course, or the teams feature is not enabled.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
if not CourseEnrollment.is_enrolled(request.user, course.id) and \
not has_access(request.user, 'staff', course, course.id):
raise Http404
# Even though sorting is done outside of the serializer, sort_order needs to be passed
# to the serializer so that the paginated results indicate how they were sorted.
sort_order = 'name'
topics = get_alphabetical_topics(course)
topics_page = Paginator(topics, TOPICS_PER_PAGE).page(1)
# BulkTeamCountPaginatedTopicSerializer will add team counts to the topics in a single
# bulk operation per page.
topics_serializer = BulkTeamCountPaginatedTopicSerializer(
instance=topics_page,
context={'course_id': course.id, 'sort_order': sort_order}
)
user = request.user
team_memberships = CourseTeamMembership.get_memberships(request.user.username, [course.id])
team_memberships_page = Paginator(team_memberships, TEAM_MEMBERSHIPS_PER_PAGE).page(1)
team_memberships_serializer = PaginatedMembershipSerializer(
instance=team_memberships_page,
context={'expand': ('team', 'user'), 'request': request},
)
context = {
"course": course,
"topics": topics_serializer.data,
# It is necessary to pass both privileged and staff because only privileged users can
# administer discussion threads, but both privileged and staff users are allowed to create
# multiple teams (since they are not automatically added to teams upon creation).
"user_info": {
"username": user.username,
"privileged": has_discussion_privileges(user, course_key),
"staff": bool(has_access(user, 'staff', course_key)),
"team_memberships_data": team_memberships_serializer.data,
},
"topic_url": reverse(
'topics_detail', kwargs={'topic_id': 'topic_id', 'course_id': str(course_id)}, request=request
),
"topics_url": reverse('topics_list', request=request),
"teams_url": reverse('teams_list', request=request),
"teams_detail_url": reverse('teams_detail', args=['team_id']),
"team_memberships_url": reverse('team_membership_list', request=request),
"team_membership_detail_url": reverse('team_membership_detail', args=['team_id', user.username]),
"languages": [[lang[0], _(lang[1])] for lang in settings.ALL_LANGUAGES], # pylint: disable=translation-of-non-string
"countries": list(countries),
"disable_courseware_js": True,
"teams_base_url": reverse('teams_dashboard', request=request, kwargs={'course_id': course_id}),
}
return render_to_response("teams/teams.html", context)
def has_team_api_access(user, course_key, access_username=None):
"""Returns True if the user has access to the Team API for the course
given by `course_key`. The user must either be enrolled in the course,
be course staff, be global staff, or have discussion privileges.
Args:
user (User): The user to check access for.
course_key (CourseKey): The key to the course which we are checking access to.
access_username (string): If provided, access_username must match user.username for non staff access.
Returns:
bool: True if the user has access, False otherwise.
"""
if user.is_staff:
return True
if CourseStaffRole(course_key).has_user(user):
return True
if has_discussion_privileges(user, course_key):
return True
if not access_username or access_username == user.username:
return CourseEnrollment.is_enrolled(user, course_key)
return False
class TeamsListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Get or create a course team.
**Example Requests**:
GET /api/team/v0/teams
POST /api/team/v0/teams
**Query Parameters for GET**
* course_id: Filters the result to teams belonging to the given
course. Required.
* topic_id: Filters the result to teams associated with the given
topic.
* text_search: Searches for full word matches on the name, description,
country, and language fields. NOTES: Search is on full names for countries
and languages, not the ISO codes. Text_search cannot be requested along with
with order_by.
* order_by: Cannot be called along with with text_search. Must be one of the following:
* name: Orders results by case insensitive team name (default).
* open_slots: Orders results by most open slots (for tie-breaking,
last_activity_at is used, with most recent first).
* last_activity_at: Orders result by team activity, with most active first
(for tie-breaking, open_slots is used, with most open slots first).
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of teams matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the teams matching the request.
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is associated
with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* last_activity_at: The date of the last activity of any team member
within the team.
* membership: A list of the users that are members of the team.
See membership endpoint for more detail.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course specified by course_id or
is not course or global staff, a 403 error is returned.
If the specified course_id is not valid or the user attempts to
use an unsupported query parameter, a 400 error is returned.
If the response does not exist, a 404 error is returned. For
example, the course_id may not reference a real course or the page
number may be beyond the last page.
If the server is unable to connect to Elasticsearch, and
the text_search parameter is supplied, a 503 error is returned.
**Response Values for POST**
Any logged in user who has verified their email address can create
a team. The format mirrors that of a GET for an individual team,
but does not include the id, date_created, or membership fields.
id is automatically computed based on name.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course, is not course or
global staff, or does not have discussion privileges a 403 error
is returned.
If the course_id is not valid or extra fields are included in the
request, a 400 error is returned.
If the specified course does not exist, a 404 error is returned.
"""
# OAuth2Authentication must come first to return a 401 for unauthenticated users
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
paginate_by = 10
paginate_by_param = 'page_size'
pagination_serializer_class = PaginationSerializer
serializer_class = CourseTeamSerializer
def get(self, request):
"""GET /api/team/v0/teams/"""
result_filter = {}
if 'course_id' in request.QUERY_PARAMS:
course_id_string = request.QUERY_PARAMS['course_id']
try:
course_key = CourseKey.from_string(course_id_string)
# Ensure the course exists
course_module = modulestore().get_course(course_key)
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
result_filter.update({'course_id': course_key})
except InvalidKeyError:
error = build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string,
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
if not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
else:
return Response(
build_api_error(ugettext_noop("course_id must be provided")),
status=status.HTTP_400_BAD_REQUEST
)
text_search = request.QUERY_PARAMS.get('text_search', None)
if text_search and request.QUERY_PARAMS.get('order_by', None):
return Response(
build_api_error(ugettext_noop("text_search and order_by cannot be provided together")),
status=status.HTTP_400_BAD_REQUEST
)
topic_id = request.QUERY_PARAMS.get('topic_id', None)
if topic_id is not None:
if topic_id not in [topic['id'] for topic in course_module.teams_configuration['topics']]:
error = build_api_error(
ugettext_noop('The supplied topic id {topic_id} is not valid'),
topic_id=topic_id
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
result_filter.update({'topic_id': topic_id})
if text_search and CourseTeamIndexer.search_is_enabled():
try:
search_engine = CourseTeamIndexer.engine()
except ElasticSearchConnectionError:
return Response(
build_api_error(ugettext_noop('Error connecting to elasticsearch')),
status=status.HTTP_503_SERVICE_UNAVAILABLE
)
result_filter.update({'course_id': course_id_string})
search_results = search_engine.search(
query_string=text_search,
field_dictionary=result_filter,
size=MAXIMUM_SEARCH_SIZE,
)
paginated_results = paginate_search_results(
CourseTeam,
search_results,
self.get_paginate_by(),
self.get_page()
)
serializer = self.get_pagination_serializer(paginated_results)
emit_team_event('edx.team.searched', course_key, {
"number_of_results": search_results['total'],
"search_text": text_search,
"topic_id": topic_id,
})
else:
queryset = CourseTeam.objects.filter(**result_filter)
order_by_input = request.QUERY_PARAMS.get('order_by', 'name')
if order_by_input == 'name':
# MySQL does case-insensitive order_by.
queryset = queryset.order_by('name')
elif order_by_input == 'open_slots':
queryset = queryset.order_by('team_size', '-last_activity_at')
elif order_by_input == 'last_activity_at':
queryset = queryset.order_by('-last_activity_at', 'team_size')
else:
return Response({
'developer_message': "unsupported order_by value {ordering}".format(ordering=order_by_input),
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _(u"The ordering {ordering} is not supported").format(ordering=order_by_input),
}, status=status.HTTP_400_BAD_REQUEST)
page = self.paginate_queryset(queryset)
serializer = self.get_pagination_serializer(page)
serializer.context.update({'sort_order': order_by_input}) # pylint: disable=maybe-no-member
return Response(serializer.data) # pylint: disable=maybe-no-member
def post(self, request):
"""POST /api/team/v0/teams/"""
field_errors = {}
course_key = None
course_id = request.DATA.get('course_id')
try:
course_key = CourseKey.from_string(course_id)
# Ensure the course exists
if not modulestore().has_course(course_key):
return Response(status=status.HTTP_404_NOT_FOUND)
except InvalidKeyError:
field_errors['course_id'] = build_api_error(
ugettext_noop('The supplied course_id {course_id} is not valid.'),
course_id=course_id
)
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
# Course and global staff, as well as discussion "privileged" users, will not automatically
# be added to a team when they create it. They are allowed to create multiple teams.
team_administrator = (has_access(request.user, 'staff', course_key)
or has_discussion_privileges(request.user, course_key))
if not team_administrator and CourseTeamMembership.user_in_team_for_course(request.user, course_key):
error_message = build_api_error(
ugettext_noop('You are already in a team in this course.'),
course_id=course_id
)
return Response(error_message, status=status.HTTP_400_BAD_REQUEST)
if course_key and not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
data = request.DATA.copy()
data['course_id'] = course_key
serializer = CourseTeamCreationSerializer(data=data)
add_serializer_errors(serializer, data, field_errors)
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
else:
team = serializer.save()
emit_team_event('edx.team.created', course_key, {
'team_id': team.team_id
})
if not team_administrator:
# Add the creating user to the team.
team.add_user(request.user)
emit_team_event(
'edx.team.learner_added',
course_key,
{
'team_id': team.team_id,
'user_id': request.user.id,
'add_method': 'added_on_create'
}
)
return Response(CourseTeamSerializer(team).data)
def get_page(self):
""" Returns page number specified in args, params, or defaults to 1. """
# This code is taken from within the GenericAPIView#paginate_queryset method.
# We need need access to the page outside of that method for our paginate_search_results method
page_kwarg = self.kwargs.get(self.page_kwarg)
page_query_param = self.request.QUERY_PARAMS.get(self.page_kwarg)
return page_kwarg or page_query_param or 1
class IsEnrolledOrIsStaff(permissions.BasePermission):
"""Permission that checks to see if the user is enrolled in the course or is staff."""
def has_object_permission(self, request, view, obj):
"""Returns true if the user is enrolled or is staff."""
return has_team_api_access(request.user, obj.course_id)
class IsStaffOrPrivilegedOrReadOnly(IsStaffOrReadOnly):
"""
Permission that checks to see if the user is global staff, course
staff, or has discussion privileges. If none of those conditions are
met, only read access will be granted.
"""
def has_object_permission(self, request, view, obj):
return (
has_discussion_privileges(request.user, obj.course_id) or
super(IsStaffOrPrivilegedOrReadOnly, self).has_object_permission(request, view, obj)
)
class TeamsDetailView(ExpandableFieldViewMixin, RetrievePatchAPIView):
"""
**Use Cases**
Get, update, or delete a course team's information. Updates are supported
only through merge patch.
**Example Requests**:
GET /api/team/v0/teams/{team_id}}
PATCH /api/team/v0/teams/{team_id} "application/merge-patch+json"
DELETE /api/team/v0/teams/{team_id}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in, the response contains the following fields:
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is
associated with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* membership: A list of the users that are members of the team. See
membership endpoint for more detail.
* last_activity_at: The date of the last activity of any team member
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not course or global staff, a 403 error is returned.
If the specified team does not exist, a 404 error is returned.
**Response Values for PATCH**
Only staff can patch teams.
If the user is anonymous or inactive, a 401 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
If the user is not course or global staff, does not have discussion
privileges, and the team does exist, a 403 is returned.
If "application/merge-patch+json" is not the specified content type,
a 415 error is returned.
If the update could not be completed due to validation errors, this
method returns a 400 error with all error messages in the
"field_errors" field of the returned JSON.
**Response Values for DELETE**
Only staff can delete teams. When a team is deleted, all
team memberships associated with that team are also
deleted. Returns 204 on successful deletion.
If the user is anonymous or inactive, a 401 is returned.
If the user is not course or global staff and does not
have discussion privileges, a 403 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsStaffOrPrivilegedOrReadOnly, IsEnrolledOrIsStaff,)
lookup_field = 'team_id'
serializer_class = CourseTeamSerializer
parser_classes = (MergePatchParser,)
def get_queryset(self):
"""Returns the queryset used to access the given team."""
return CourseTeam.objects.all()
def delete(self, request, team_id):
"""DELETE /api/team/v0/teams/{team_id}"""
team = get_object_or_404(CourseTeam, team_id=team_id)
self.check_object_permissions(request, team)
# Note: list() forces the queryset to be evualuated before delete()
memberships = list(CourseTeamMembership.get_memberships(team_id=team_id))
# Note: also deletes all team memberships associated with this team
team.delete()
log.info('user %d deleted team %s', request.user.id, team_id)
emit_team_event('edx.team.deleted', team.course_id, {
'team_id': team_id,
})
for member in memberships:
emit_team_event('edx.team.learner_removed', team.course_id, {
'team_id': team_id,
'remove_method': 'team_deleted',
'user_id': member.user_id
})
return Response(status=status.HTTP_204_NO_CONTENT)
class TopicListView(GenericAPIView):
"""
**Use Cases**
Retrieve a list of topics associated with a single course.
**Example Requests**
GET /api/team/v0/topics/?course_id={course_id}
**Query Parameters for GET**
* course_id: Filters the result to topics belonging to the given
course (required).
* order_by: Orders the results. Currently only 'name' and 'team_count' are supported;
the default value is 'name'. If 'team_count' is specified, topics are returned first sorted
by number of teams per topic (descending), with a secondary sort of 'name'.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the course_id is not given or an unsupported value is passed for
order_by, returns a 400 error.
If the user is not logged in, is not enrolled in the course, or is
not course or global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following
fields:
* count: The total number of topics matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the topics matching the request.
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
paginate_by = TOPICS_PER_PAGE
paginate_by_param = 'page_size'
def get(self, request):
"""GET /api/team/v0/topics/?course_id={course_id}"""
course_id_string = request.QUERY_PARAMS.get('course_id', None)
if course_id_string is None:
return Response({
'field_errors': {
'course_id': build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string
)
}
}, status=status.HTTP_400_BAD_REQUEST)
try:
course_id = CourseKey.from_string(course_id_string)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None: # course is None if not found
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
ordering = request.QUERY_PARAMS.get('order_by', 'name')
if ordering not in ['name', 'team_count']:
return Response({
'developer_message': "unsupported order_by value {ordering}".format(ordering=ordering),
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _(u"The ordering {ordering} is not supported").format(ordering=ordering),
}, status=status.HTTP_400_BAD_REQUEST)
# Always sort alphabetically, as it will be used as secondary sort
# in the case of "team_count".
topics = get_alphabetical_topics(course_module)
if ordering == 'team_count':
add_team_count(topics, course_id)
topics.sort(key=lambda t: t['team_count'], reverse=True)
page = self.paginate_queryset(topics)
# Since team_count has already been added to all the topics, use PaginatedTopicSerializer.
# Even though sorting is done outside of the serializer, sort_order needs to be passed
# to the serializer so that the paginated results indicate how they were sorted.
serializer = PaginatedTopicSerializer(page, context={'course_id': course_id, 'sort_order': ordering})
else:
page = self.paginate_queryset(topics)
# Use the serializer that adds team_count in a bulk operation per page.
serializer = BulkTeamCountPaginatedTopicSerializer(
page, context={'course_id': course_id, 'sort_order': ordering}
)
return Response(serializer.data)
def get_alphabetical_topics(course_module):
"""Return a list of team topics sorted alphabetically.
Arguments:
course_module (xmodule): the course which owns the team topics
Returns:
list: a list of sorted team topics
"""
return sorted(course_module.teams_topics, key=lambda t: t['name'].lower())
class TopicDetailView(APIView):
"""
**Use Cases**
Retrieve a single topic from a course.
**Example Requests**
GET /api/team/v0/topics/{topic_id},{course_id}
**Query Parameters for GET**
* topic_id: The ID of the topic to retrieve (required).
* course_id: The ID of the course to retrieve the topic from
(required).
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the topic_id course_id are not given or an unsupported value is
passed for order_by, returns a 400 error.
If the user is not enrolled in the course, or is not course or
global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following fields:
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, topic_id, course_id):
"""GET /api/team/v0/topics/{topic_id},{course_id}/"""
try:
course_id = CourseKey.from_string(course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
topics = [t for t in course_module.teams_topics if t['id'] == topic_id]
if len(topics) == 0:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = TopicSerializer(topics[0], context={'course_id': course_id})
return Response(serializer.data)
class MembershipListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
List course team memberships or add a user to a course team.
**Example Requests**:
GET /api/team/v0/team_membership
POST /api/team/v0/team_membership
**Query Parameters for GET**
At least one of username and team_id must be provided.
* username: Returns membership records only for the specified user.
If the requesting user is not staff then only memberships for
teams associated with courses in which the requesting user is
enrolled are returned.
* team_id: Returns only membership records associated with the
specified team. The requesting user must be staff or enrolled in
the course associated with the team.
* course_id: Returns membership records only for the specified
course. Username must have access to this course, or else team_id
must be in this course.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of memberships matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the memberships matching the request.
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of the user
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If neither team_id nor username are provided, a 400 error is
returned.
If team_id is provided but the team does not exist, a 404 error is
returned.
If the specified course_id is invalid, a 404 error is returned.
This endpoint uses 404 error codes to avoid leaking information
about team or user existence. Specifically, a 404 error will be
returned if a logged in user specifies a team_id for a course
they are not enrolled in.
Additionally, when username is specified the list of returned
memberships will be filtered to memberships in teams associated
with courses that the requesting user is enrolled in.
If the course specified by course_id does not contain the team
specified by team_id, a 400 error is returned.
If the user is not enrolled in the course specified by course_id,
and does not have staff access to the course, a 400 error is
returned.
**Response Values for POST**
Any logged in user enrolled in a course can enroll themselves in a
team in the course. Course staff, global staff, and discussion
privileged users can enroll any user in a team, with a few
exceptions noted below.
If the user is not logged in and active, a 401 error is returned.
If username and team are not provided in the posted JSON, a 400
error is returned describing the missing fields.
If the specified team does not exist, a 404 error is returned.
If the user is not staff, does not have discussion privileges,
and is not enrolled in the course associated with the team they
are trying to join, or if they are trying to add a user other
than themselves to a team, a 404 error is returned. This is to
prevent leaking information about the existence of teams and users.
If the specified user does not exist, a 404 error is returned.
If the user is already a member of a team in the course associated
with the team they are trying to join, a 400 error is returned.
This applies to both staff and students.
If the user is not enrolled in the course associated with the team
they are trying to join, a 400 error is returned. This can occur
when a staff or discussion privileged user posts a request adding
another user to a team.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
paginate_by = 10
paginate_by_param = 'page_size'
pagination_serializer_class = PaginationSerializer
def get(self, request):
"""GET /api/team/v0/team_membership"""
specified_username_or_team = False
username = None
team_id = None
requested_course_id = None
requested_course_key = None
accessible_course_ids = None
if 'course_id' in request.QUERY_PARAMS:
requested_course_id = request.QUERY_PARAMS['course_id']
try:
requested_course_key = CourseKey.from_string(requested_course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
if 'team_id' in request.QUERY_PARAMS:
specified_username_or_team = True
team_id = request.QUERY_PARAMS['team_id']
try:
team = CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if requested_course_key is not None and requested_course_key != team.course_id:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
if 'username' in request.QUERY_PARAMS:
specified_username_or_team = True
username = request.QUERY_PARAMS['username']
if not request.user.is_staff:
enrolled_courses = (
CourseEnrollment.enrollments_for_user(request.user).values_list('course_id', flat=True)
)
staff_courses = (
CourseAccessRole.objects.filter(user=request.user, role='staff').values_list('course_id', flat=True)
)
accessible_course_ids = [item for sublist in (enrolled_courses, staff_courses) for item in sublist]
if requested_course_id is not None and requested_course_id not in accessible_course_ids:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not specified_username_or_team:
return Response(
build_api_error(ugettext_noop("username or team_id must be specified.")),
status=status.HTTP_400_BAD_REQUEST
)
course_keys = None
if requested_course_key is not None:
course_keys = [requested_course_key]
elif accessible_course_ids is not None:
course_keys = [CourseKey.from_string(course_string) for course_string in accessible_course_ids]
queryset = CourseTeamMembership.get_memberships(username, course_keys, team_id)
page = self.paginate_queryset(queryset)
serializer = self.get_pagination_serializer(page)
return Response(serializer.data) # pylint: disable=maybe-no-member
def post(self, request):
"""POST /api/team/v0/team_membership"""
field_errors = {}
if 'username' not in request.DATA:
field_errors['username'] = build_api_error(ugettext_noop("Username is required."))
if 'team_id' not in request.DATA:
field_errors['team_id'] = build_api_error(ugettext_noop("Team id is required."))
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
try:
team = CourseTeam.objects.get(team_id=request.DATA['team_id'])
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
username = request.DATA['username']
if not has_team_api_access(request.user, team.course_id, access_username=username):
return Response(status=status.HTTP_404_NOT_FOUND)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
course_module = modulestore().get_course(team.course_id)
if course_module.teams_max_size is not None and team.users.count() >= course_module.teams_max_size:
return Response(
build_api_error(ugettext_noop("This team is already full.")),
status=status.HTTP_400_BAD_REQUEST
)
try:
membership = team.add_user(user)
emit_team_event(
'edx.team.learner_added',
team.course_id,
{
'team_id': team.team_id,
'user_id': user.id,
'add_method': 'joined_from_team_view' if user == request.user else 'added_by_another_user'
}
)
except AlreadyOnTeamInCourse:
return Response(
build_api_error(
ugettext_noop("The user {username} is already a member of a team in this course."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
except NotEnrolledInCourseForTeam:
return Response(
build_api_error(
ugettext_noop("The user {username} is not enrolled in the course associated with this team."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
class MembershipDetailView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Gets individual course team memberships or removes a user from a course team.
**Example Requests**:
GET /api/team/v0/team_membership/{team_id},{username}
DELETE /api/team/v0/team_membership/{team_id},{username}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, or is course or global staff
the response contains:
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of any team member
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If specified team does not exist, a 404 error is returned.
If the user is logged in but is not enrolled in the course
associated with the specified team, or is not staff, a 404 error is
returned. This avoids leaking information about course or team
existence.
If the membership does not exist, a 404 error is returned.
**Response Values for DELETE**
Any logged in user enrolled in a course can remove themselves from
a team in the course. Course staff, global staff, and discussion
privileged users can remove any user from a team. Successfully
deleting a membership will return a 204 response with no content.
If the user is not logged in and active, a 401 error is returned.
If the specified team or username does not exist, a 404 error is
returned.
If the user is not staff or a discussion privileged user and is
attempting to remove another user from a team, a 404 error is
returned. This prevents leaking information about team and user
existence.
If the membership does not exist, a 404 error is returned.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
def get_team(self, team_id):
"""Returns the team with team_id, or throws Http404 if it does not exist."""
try:
return CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
raise Http404
def get_membership(self, username, team):
"""Returns the membership for the given user and team, or throws Http404 if it does not exist."""
try:
return CourseTeamMembership.objects.get(user__username=username, team=team)
except CourseTeamMembership.DoesNotExist:
raise Http404
def get(self, request, team_id, username):
"""GET /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
membership = self.get_membership(username, team)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
def delete(self, request, team_id, username):
"""DELETE /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if has_team_api_access(request.user, team.course_id, access_username=username):
membership = self.get_membership(username, team)
removal_method = 'self_removal'
if 'admin' in request.QUERY_PARAMS:
removal_method = 'removed_by_admin'
membership.delete()
emit_team_event(
'edx.team.learner_removed',
team.course_id,
{
'team_id': team.team_id,
'user_id': membership.user.id,
'remove_method': removal_method
}
)
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
| agpl-3.0 | 2,759,747,251,989,360,600 | 39.950658 | 129 | 0.619267 | false |
jacksonwilliams/arsenalsuite | cpp/lib/PyQt4/pyuic/uic/port_v3/invoke.py | 7 | 1837 | #############################################################################
##
## Copyright (c) 2011 Riverbank Computing Limited <[email protected]>
##
## This file is part of PyQt.
##
## This file may be used under the terms of the GNU General Public
## License versions 2.0 or 3.0 as published by the Free Software
## Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3
## included in the packaging of this file. Alternatively you may (at
## your option) use any later version of the GNU General Public
## License if such license has been publicly approved by Riverbank
## Computing Limited (or its successors, if any) and the KDE Free Qt
## Foundation. In addition, as a special exception, Riverbank gives you
## certain additional rights. These rights are described in the Riverbank
## GPL Exception version 1.1, which can be found in the file
## GPL_EXCEPTION.txt in this package.
##
## If you are unsure which license is appropriate for your use, please
## contact the sales department at [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
from PyQt4.uic.exceptions import NoSuchWidgetError
def invoke(driver):
""" Invoke the given command line driver. Return the exit status to be
passed back to the parent process.
"""
exit_status = 1
try:
exit_status = driver.invoke()
except IOError as e:
driver.on_IOError(e)
except SyntaxError as e:
driver.on_SyntaxError(e)
except NoSuchWidgetError as e:
driver.on_NoSuchWidgetError(e)
except Exception as e:
driver.on_Exception(e)
return exit_status
| gpl-2.0 | 601,557,978,863,623,200 | 33.660377 | 79 | 0.663038 | false |
thejordan95/Groovebot2 | plugins/admin.py | 1 | 7255 | from util import hook
import os, sys
import re
import json
import time
import subprocess
@hook.command(autohelp=False, permissions=["permissions_users"])
def permissions(inp, bot=None, notice=None):
"""permissions [group] -- lists the users and their permission level who have permissions."""
permissions = bot.config.get("permissions", [])
groups = []
if inp:
for k in permissions:
if inp == k:
groups.append(k)
else:
for k in permissions:
groups.append(k)
if not groups:
notice("{} is not a group with permissions".format(inp))
return None
for v in groups:
members = ""
for value in permissions[v]["users"]:
members = members + value + ", "
if members:
notice("the members in the {} group are..".format(v))
notice(members[:-2])
else:
notice("there are no members in the {} group".format(v))
@hook.command(permissions=["permissions_users"])
def deluser(inp, bot=None, notice=None):
"""deluser [user] [group] -- removes elevated permissions from [user].
If [group] is specified, they will only be removed from [group]."""
permissions = bot.config.get("permissions", [])
inp = inp.split(" ")
groups = []
try:
specgroup = inp[1]
except IndexError:
specgroup = None
for k in permissions:
groups.append(k)
else:
for k in permissions:
if specgroup == k:
groups.append(k)
if not groups:
notice("{} is not a group with permissions".format(inp[1]))
return None
removed = 0
for v in groups:
users = permissions[v]["users"]
for value in users:
if inp[0] == value:
users.remove(inp[0])
removed = 1
notice("{} has been removed from the group {}".format(inp[0], v))
json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
if specgroup:
if removed == 0:
notice("{} is not in the group {}".format(inp[0], specgroup))
else:
if removed == 0:
notice("{} is not in any groups".format(inp[0]))
@hook.command(permissions=["permissions_users"])
def adduser(inp, bot=None, notice=None):
"""adduser [user] [group] -- adds elevated permissions to [user].
[group] must be specified."""
permissions = bot.config.get("permissions", [])
inp = inp.split(" ")
try:
user = inp[0]
targetgroup = inp[1]
except IndexError:
notice("the group must be specified")
return None
if not re.search('.+!.+@.+', user):
notice("the user must be in the form of \"nick!user@host\"")
return None
try:
users = permissions[targetgroup]["users"]
except KeyError:
notice("no such group as {}".format(targetgroup))
return None
if user in users:
notice("{} is already in {}".format(user, targetgroup))
return None
users.append(user)
notice("{} has been added to the group {}".format(user, targetgroup))
users.sort()
json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
@hook.command("quit", autohelp=False, permissions=["botcontrol"])
@hook.command(autohelp=False, permissions=["botcontrol"])
def stop(inp, nick=None, conn=None):
"""stop [reason] -- Kills the bot with [reason] as its quit message."""
if inp:
conn.cmd("QUIT", ["Killed by {} ({})".format(nick, inp)])
else:
conn.cmd("QUIT", ["Killed by {}.".format(nick)])
time.sleep(5)
os.execl("./cloudbot", "cloudbot", "stop")
@hook.command(autohelp=False, permissions=["botcontrol"])
def restart(inp, nick=None, conn=None, bot=None):
"""restart [reason] -- Restarts the bot with [reason] as its quit message."""
for botcon in bot.conns:
if inp:
bot.conns[botcon].cmd("QUIT", ["Restarted by {} ({})".format(nick, inp)])
else:
bot.conns[botcon].cmd("QUIT", ["Restarted by {}.".format(nick)])
time.sleep(5)
#os.execl("./cloudbot", "cloudbot", "restart")
args = sys.argv[:]
args.insert(0, sys.executable)
os.execv(sys.executable, args)
@hook.command(autohelp=False, permissions=["botcontrol"])
def clearlogs(inp, input=None):
"""clearlogs -- Clears the bots log(s)."""
subprocess.call(["./cloudbot", "clear"])
@hook.command(permissions=["botcontrol"])
def join(inp, conn=None, notice=None):
"""join <channel> -- Joins <channel>."""
notice("Attempting to join {}...".format(inp))
conn.join(inp)
@hook.command(autohelp=False, permissions=["botcontrol"])
def part(inp, conn=None, chan=None, notice=None):
"""part <channel> -- Leaves <channel>.
If [channel] is blank the bot will leave the
channel the command was used in."""
if inp:
target = inp
else:
target = chan
notice("Attempting to leave {}...".format(target))
conn.part(target)
@hook.command(autohelp=False, permissions=["botcontrol"])
def cycle(inp, conn=None, chan=None, notice=None):
"""cycle <channel> -- Cycles <channel>.
If [channel] is blank the bot will cycle the
channel the command was used in."""
if inp:
target = inp
else:
target = chan
notice("Attempting to cycle {}...".format(target))
conn.part(target)
conn.join(target)
@hook.command(permissions=["botcontrol"])
def nick(inp, notice=None, conn=None):
"""nick <nick> -- Changes the bots nickname to <nick>."""
if not re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()):
notice("Invalid username!")
return
notice("Attempting to change nick to \"{}\"...".format(inp))
conn.set_nick(inp)
@hook.command(permissions=["botcontrol"])
def raw(inp, conn=None, notice=None):
"""raw <command> -- Sends a RAW IRC command."""
notice("Raw command sent.")
conn.send(inp)
@hook.command(permissions=["botcontrol"])
def say(inp, conn=None, chan=None):
"""say [channel] <message> -- Makes the bot say <message> in [channel].
If [channel] is blank the bot will say the <message> in the channel
the command was used in."""
inp = inp.split(" ")
if inp[0][0] == "#":
message = " ".join(inp[1:])
out = "PRIVMSG {} :{}".format(inp[0], message)
else:
message = " ".join(inp[0:])
out = "PRIVMSG {} :{}".format(chan, message)
conn.send(out)
@hook.command("act", permissions=["botcontrol"])
@hook.command(permissions=["botcontrol"])
def me(inp, conn=None, chan=None):
"""me [channel] <action> -- Makes the bot act out <action> in [channel].
If [channel] is blank the bot will act the <action> in the channel the
command was used in."""
inp = inp.split(" ")
if inp[0][0] == "#":
message = ""
for x in inp[1:]:
message = message + x + " "
message = message[:-1]
out = "PRIVMSG {} :\x01ACTION {}\x01".format(inp[0], message)
else:
message = ""
for x in inp[0:]:
message = message + x + " "
message = message[:-1]
out = "PRIVMSG {} :\x01ACTION {}\x01".format(chan, message)
conn.send(out)
| gpl-3.0 | 6,485,051,281,526,788,000 | 31.828054 | 97 | 0.587043 | false |
RedhawkSDR/integration-gnuhawk | gnuradio/gr-digital/examples/ofdm/benchmark_tx.py | 15 | 4302 | #!/usr/bin/env python
#
# Copyright 2005,2006,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import time, struct, sys
from gnuradio import digital
# from current dir
from transmit_path import transmit_path
from uhd_interface import uhd_transmitter
class my_top_block(gr.top_block):
def __init__(self, options):
gr.top_block.__init__(self)
if(options.tx_freq is not None):
self.sink = uhd_transmitter(options.args,
options.bandwidth,
options.tx_freq, options.tx_gain,
options.spec, options.antenna,
options.verbose)
elif(options.to_file is not None):
self.sink = gr.file_sink(gr.sizeof_gr_complex, options.to_file)
else:
self.sink = gr.null_sink(gr.sizeof_gr_complex)
# do this after for any adjustments to the options that may
# occur in the sinks (specifically the UHD sink)
self.txpath = transmit_path(options)
self.connect(self.txpath, self.sink)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
def main():
def send_pkt(payload='', eof=False):
return tb.txpath.send_pkt(payload, eof)
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
expert_grp = parser.add_option_group("Expert")
parser.add_option("-s", "--size", type="eng_float", default=400,
help="set packet size [default=%default]")
parser.add_option("-M", "--megabytes", type="eng_float", default=1.0,
help="set megabytes to transmit [default=%default]")
parser.add_option("","--discontinuous", action="store_true", default=False,
help="enable discontinuous mode")
parser.add_option("","--from-file", default=None,
help="use intput file for packet contents")
parser.add_option("","--to-file", default=None,
help="Output file for modulated samples")
transmit_path.add_options(parser, expert_grp)
digital.ofdm_mod.add_options(parser, expert_grp)
uhd_transmitter.add_options(parser)
(options, args) = parser.parse_args ()
# build the graph
tb = my_top_block(options)
r = gr.enable_realtime_scheduling()
if r != gr.RT_OK:
print "Warning: failed to enable realtime scheduling"
tb.start() # start flow graph
# generate and send packets
nbytes = int(1e6 * options.megabytes)
n = 0
pktno = 0
pkt_size = int(options.size)
while n < nbytes:
if options.from_file is None:
data = (pkt_size - 2) * chr(pktno & 0xff)
else:
data = source_file.read(pkt_size - 2)
if data == '':
break;
payload = struct.pack('!H', pktno & 0xffff) + data
send_pkt(payload)
n += len(payload)
sys.stderr.write('.')
if options.discontinuous and pktno % 5 == 4:
time.sleep(1)
pktno += 1
send_pkt(eof=True)
tb.wait() # wait for it to finish
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 | -3,181,240,704,205,923,300 | 34.262295 | 79 | 0.579265 | false |
andrewyoung1991/abjad | abjad/tools/rhythmmakertools/AccelerandoRhythmMaker.py | 1 | 169281 | # -*- encoding: utf-8 -*-
import math
from abjad.tools import datastructuretools
from abjad.tools import durationtools
from abjad.tools import scoretools
from abjad.tools import selectiontools
from abjad.tools.rhythmmakertools.RhythmMaker import RhythmMaker
from abjad.tools.topleveltools import attach
from abjad.tools.topleveltools import detach
from abjad.tools.topleveltools import inspect_
from abjad.tools.topleveltools import override
class AccelerandoRhythmMaker(RhythmMaker):
r'''Accelerando rhythm-maker.
.. container:: example
**Example 1.** Makes accelerando for each input division:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
}
.. container:: example
**Example 2.** Makes ritardando for each input division:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 20),
... stop_duration=Duration(1, 8),
... written_duration=Duration(1, 16),
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #left
c'16 * 45/64 [
c'16 * 23/32
c'16 * 25/32
c'16 * 55/64
c'16 * 1
c'16 * 75/64
c'16 * 89/64
c'16 * 103/64
c'16 * 113/64 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #left
c'16 * 5/8 [
c'16 * 43/64
c'16 * 51/64
c'16 * 65/64
c'16 * 85/64
c'16 * 25/16 ]
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #left
c'16 * 45/64 [
c'16 * 23/32
c'16 * 25/32
c'16 * 55/64
c'16 * 1
c'16 * 75/64
c'16 * 89/64
c'16 * 103/64
c'16 * 113/64 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #left
c'16 * 5/8 [
c'16 * 43/64
c'16 * 51/64
c'16 * 65/64
c'16 * 85/64
c'16 * 25/16 ]
}
\revert TupletNumber #'text
}
}
Set `written_duration` to `1/16` or less for multiple beams.
Usage follows the two-step configure-once / call-repeatedly pattern shown
here.
'''
### CLASS VARIABLES ###
__slots__ = (
'_exponent',
'_interpolation_specifiers',
)
_class_name_abbreviation = 'Acc'
_human_readable_class_name = 'accelerando rhythm-maker'
### INITIALIZER ###
def __init__(
self,
beam_specifier=None,
duration_spelling_specifier=None,
interpolation_specifiers=None,
output_masks=None,
tie_specifier=None,
tuplet_spelling_specifier=None,
):
RhythmMaker.__init__(
self,
beam_specifier=beam_specifier,
duration_spelling_specifier=duration_spelling_specifier,
output_masks=output_masks,
tie_specifier=tie_specifier,
tuplet_spelling_specifier=tuplet_spelling_specifier,
)
self._interpolation_specifiers = interpolation_specifiers
### SPECIAL METHODS ###
def __call__(self, divisions, rotation=None):
r'''Calls interpolated rhythm-maker on `divisions`.
Ignores `rotation`.
Returns list of selections.
'''
return RhythmMaker.__call__(
self,
divisions,
rotation=rotation,
)
### PRIVATE METHODS ###
def _fix_rounding_error(
self,
selection,
total_duration,
interpolation_specifier,
):
selection_duration = selection.get_duration()
if not selection_duration == total_duration:
needed_duration = total_duration - selection[:-1].get_duration()
multiplier = needed_duration / \
interpolation_specifier.written_duration
multiplier = durationtools.Multiplier(multiplier)
detach(durationtools.Multiplier, selection[-1])
attach(multiplier, selection[-1])
def _get_interpolation_specifiers(self):
from abjad.tools import rhythmmakertools
specifiers = self.interpolation_specifiers
if specifiers is None:
specifiers = datastructuretools.CyclicTuple([
rhythmmakertools.InterpolationSpecifier(),
])
elif isinstance(specifiers, rhythmmakertools.InterpolationSpecifier):
specifiers = datastructuretools.CyclicTuple([specifiers])
else:
specifiers = datastructuretools.CyclicTuple(specifiers)
return specifiers
@staticmethod
def _interpolate_cosine(y1, y2, mu):
r'''Perorms cosine interpolation of `y1` and `y2` with `mu` ``[0, 1]``
normalized:
::
>>> rhythmmakertools.AccelerandoRhythmMaker._interpolate_cosine(
... y1=0,
... y2=1,
... mu=0.5,
... )
0.49999999999999994
Returns float.
'''
mu2 = (1 - math.cos(mu * math.pi)) / 2
return (y1 * (1 - mu2) + y2 * mu2)
@staticmethod
def _interpolate_divide(
total_duration,
start_duration,
stop_duration,
exponent='cosine',
):
r'''Divides `total_duration` into durations computed from interpolating
between `start_duration` and `stop_duration`:
::
>>> rhythmmakertools.AccelerandoRhythmMaker._interpolate_divide(
... total_duration=10,
... start_duration=1,
... stop_duration=1,
... exponent=1,
... )
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> sum(_)
10.0
::
>>> rhythmmakertools.AccelerandoRhythmMaker._interpolate_divide(
... total_duration=10,
... start_duration=5,
... stop_duration=1,
... )
[4.798..., 2.879..., 1.326..., 0.995...]
>>> sum(_)
10.0
Set `exponent` to ``'cosine'`` for cosine interpolation.
Set `exponent` to a numeric value for exponential interpolation with
`exponent` as the exponent.
Scales resulting durations so that their sum equals `total_duration`
exactly.
Returns a list of floats.
'''
if total_duration <= 0:
message = "Total duration must be positive."
raise ValueError(message)
if start_duration <= 0 or stop_duration <= 0:
message = "Both 'start_duration' and 'stop_duration'"
message += ' must be positive.'
raise ValueError(message)
if total_duration < (stop_duration + start_duration):
return 'too small'
durations = []
total_duration = float(total_duration)
partial_sum = 0
while partial_sum < total_duration:
if exponent == 'cosine':
duration = AccelerandoRhythmMaker._interpolate_cosine(
start_duration,
stop_duration,
partial_sum / total_duration,
)
else:
duration = AccelerandoRhythmMaker._interpolate_exponential(
start_duration,
stop_duration,
partial_sum / total_duration,
exponent,
)
durations.append(duration)
partial_sum += duration
# scale result to fit total exaclty
durations = [_ * total_duration / sum(durations) for _ in durations]
return durations
@staticmethod
def _interpolate_divide_multiple(
total_durations,
reference_durations,
exponent='cosine',
):
'''Interpolates `reference_durations` such that the sum of the
resulting interpolated values equals the given `total_durations`:
::
>>> durations = rhythmmakertools.AccelerandoRhythmMaker._interpolate_divide_multiple(
... total_durations=[100, 50],
... reference_durations=[20, 10, 20],
... )
>>> for duration in durations:
... duration
19.448...
18.520...
16.227...
13.715...
11.748...
10.487...
9.8515...
9.5130...
10.421...
13.073...
16.991...
The operation is the same as the interpolate_divide() method
implemented on this class. But this function takes multiple
total durations and multiple reference durations at one time.
Precondition: ``len(totals_durations) == len(reference_durations)-1``.
Set `exponent` to `cosine` for cosine interpolation. Set `exponent` to
a number for exponential interpolation.
Returns a list of floats.
'''
assert len(total_durations) == len(reference_durations) - 1
durations = []
for i in range(len(total_durations)):
durations_ = AccelerandoRhythmMaker._interpolate_divide(
total_durations[i],
reference_durations[i],
reference_durations[i + 1],
exponent,
)
# we want a flat list
durations.extend(durations_)
return durations
@staticmethod
def _interpolate_exponential(y1, y2, mu, exponent=1):
r'''Performs exponential interpolation from `y1` to `y2` with `mu`
``[0, 1]`` normalized:
::
>>> rhythmmakertools.AccelerandoRhythmMaker._interpolate_exponential(
... y1=0,
... y2=1,
... mu=0.5,
... exponent=4,
... )
0.0625
Set `exponent` equal to the exponent of interpolation.
Returns float.
'''
result = (y1 * (1 - mu ** exponent) + y2 * mu ** exponent)
return result
def _is_accelerando(self, selection):
first_duration = inspect_(selection[0]).get_duration()
last_duration = inspect_(selection[-1]).get_duration()
if last_duration < first_duration:
return True
return False
def _is_ritardando(self, selection):
first_duration = inspect_(selection[0]).get_duration()
last_duration = inspect_(selection[-1]).get_duration()
if first_duration < last_duration:
return True
return False
def _make_accelerando(self, total_duration, index):
r'''Makes notes with LilyPond multipliers.
Returns as many interpolation values as necessary to fill `total`
duration requested.
Computes duration multipliers interpolated from `start` to `stop`.
Sets note durations to `written_duration` multiplied by interpolation
multipliers.
Returns selection of notes.
'''
from abjad.tools import rhythmmakertools
total_duration = durationtools.Duration(total_duration)
interpolation_specifiers = self._get_interpolation_specifiers()
interpolation_specifier = interpolation_specifiers[index]
durations = AccelerandoRhythmMaker._interpolate_divide(
total_duration=total_duration,
start_duration=interpolation_specifier.start_duration,
stop_duration=interpolation_specifier.stop_duration,
)
if durations == 'too small':
notes = scoretools.make_notes([0], [total_duration])
return notes
durations = [
durationtools.Duration(int(round(_ * 2**10)), 2**10)
for _ in durations
]
notes = []
for i, duration in enumerate(durations):
note = scoretools.Note(0, interpolation_specifier.written_duration)
multiplier = duration / interpolation_specifier.written_duration
multiplier = durationtools.Multiplier(multiplier)
attach(multiplier, note)
notes.append(note)
selection = selectiontools.Selection(notes)
self._fix_rounding_error(
selection,
total_duration,
interpolation_specifier,
)
pair = (selection.get_duration(), total_duration)
assert pair[0] == pair[1], repr(pair)
beam_specifier = self._get_beam_specifier()
if not beam_specifier.use_feather_beams:
pass
elif self._is_accelerando(selection):
override(selection[0]).beam.grow_direction = Right
elif self._is_ritardando(selection):
override(selection[0]).beam.grow_direction = Left
tuplet = scoretools.Tuplet((1, 1), selection)
tuplet_spelling_specifier = self._get_tuplet_spelling_specifier()
if tuplet_spelling_specifier.use_note_duration_bracket:
tuplet.force_times_command = True
duration = inspect_(tuplet).get_duration()
markup = duration.to_score_markup()
markup = markup.scale((0.75, 0.75))
override(tuplet).tuplet_number.text = markup
selection = selectiontools.Selection([tuplet])
return selection
def _make_music(self, divisions, rotation):
selections = []
for index, division in enumerate(divisions):
accelerando = self._make_accelerando(division, index)
selections.append(accelerando)
self._apply_beam_specifier(selections)
selections = self._apply_output_masks(selections, rotation)
return selections
### PUBLIC PROPERTIES ###
@property
def beam_specifier(self):
r'''Gets beam specifier of accelerando rhythm-maker.
.. container:: example
**Example 1.** Feather beams each division:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... beam_each_division=True,
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
}
.. container:: example
**Example 2.** Beams divisions together (without feathering):
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... beam_divisions_together=True,
... use_feather_beams=False,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\set stemLeftBeamCount = #0
\set stemRightBeamCount = #2
c'16 * 61/32 [
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 115/64
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 49/32
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 5/4
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 33/32
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 57/64
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 13/16
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #1
c'16 * 25/32
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\set stemLeftBeamCount = #1
\set stemRightBeamCount = #2
c'16 * 117/64
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 99/64
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 69/64
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 13/16
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #1
c'16 * 47/64
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\set stemLeftBeamCount = #1
\set stemRightBeamCount = #2
c'16 * 61/32
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 115/64
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 49/32
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 5/4
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 33/32
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 57/64
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 13/16
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #1
c'16 * 25/32
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\set stemLeftBeamCount = #1
\set stemRightBeamCount = #2
c'16 * 117/64
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 99/64
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 69/64
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #2
c'16 * 13/16
\set stemLeftBeamCount = #2
\set stemRightBeamCount = #0
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
}
It is important to leave feathering turned off here
because LilyPond feathers conjoint beams poorly.
.. container:: example
**Example 3.** Makes no beams:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... beam_divisions_together=False,
... beam_each_division=False,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
c'16 * 61/32
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
c'16 * 117/64
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
c'16 * 61/32
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
c'16 * 117/64
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64
}
\revert TupletNumber #'text
}
}
Returns beam specifier.
'''
superclass = super(AccelerandoRhythmMaker, self)
return superclass.beam_specifier
@property
def interpolation_specifiers(self):
r'''Gets interpolation specifier of accelerando rhythm-maker.
.. container:: example
**Example 1.** Makes accelerando for each input division:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
}
.. container:: example
**Example 2.** Makes accelerandi and ritardandi on alternate
divisions:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=[
... rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 20),
... stop_duration=Duration(1, 8),
... written_duration=Duration(1, 16),
... ),
... ],
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #left
c'16 * 5/8 [
c'16 * 43/64
c'16 * 51/64
c'16 * 65/64
c'16 * 85/64
c'16 * 25/16 ]
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #left
c'16 * 5/8 [
c'16 * 43/64
c'16 * 51/64
c'16 * 65/64
c'16 * 85/64
c'16 * 25/16 ]
}
\revert TupletNumber #'text
}
}
.. container:: example
**Example 3.** Makes a single note in the case that interpolation
would take too long for a given division:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (1, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
{
\time 1/8
c'8
}
}
Defaults to none.
Set to interpolation specifier or none.
Returns interpolation specifier or none.
'''
return self._interpolation_specifiers
@property
def output_masks(self):
r'''Gets output masks of accelerando rhythm-maker.
.. container:: example
**Example 1.** No output masks:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... output_masks=None,
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
}
.. container:: example
**Example 2.** Silences every other division:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... output_masks=[
... rhythmmakertools.SilenceMask(
... indices=[1],
... period=2,
... ),
... ],
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
r4.
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
r4.
}
}
'''
superclass = super(AccelerandoRhythmMaker, self)
return superclass.output_masks
@property
def tie_specifier(self):
r'''Gets tie specifier of rhythm-maker.
.. container:: example
**Example 1.** Does not tie across divisions:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... tie_specifier=rhythmmakertools.TieSpecifier(
... tie_across_divisions=False,
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
}
.. container:: example
**Example 2.** Ties across divisions:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... tie_specifier=rhythmmakertools.TieSpecifier(
... tie_across_divisions=True,
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ~ ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ~ ]
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ~ ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
}
.. container:: example
**Example 3.** Patterns ties across divisions:
::
>>> pattern = rhythmmakertools.BooleanPattern(
... indices=[0],
... period=2,
... )
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... tie_specifier=rhythmmakertools.TieSpecifier(
... tie_across_divisions=pattern,
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ~ ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ~ ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
}
Returns tie specifier.
'''
superclass = super(AccelerandoRhythmMaker, self)
return superclass.tie_specifier
@property
def tuplet_spelling_specifier(self):
r'''Gets tuplet spelling specifier of accelerando rhythm-maker.
.. container:: example
**Example 1.** Tuplets use note duration bracket:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... tie_specifier=rhythmmakertools.TieSpecifier(
... tie_across_divisions=False,
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=True,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
{
\time 5/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'2 ~
c'8
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
\revert TupletNumber #'text
}
{
\time 3/8
\override TupletNumber #'text = \markup {
\scale
#'(0.75 . 0.75)
\score
{
\new Score \with {
\override SpacingSpanner #'spacing-increment = #0.5
proportionalNotationDuration = ##f
} <<
\new RhythmicStaff \with {
\remove Time_signature_engraver
\remove Staff_symbol_engraver
\override Stem #'direction = #up
\override Stem #'length = #5
\override TupletBracket #'bracket-visibility = ##t
\override TupletBracket #'direction = #up
\override TupletBracket #'padding = #1.25
\override TupletBracket #'shorten-pair = #'(-1 . -1.5)
\override TupletNumber #'text = #tuplet-number::calc-fraction-text
tupletFullLength = ##t
} {
c'4.
}
>>
\layout {
indent = #0
ragged-right = ##t
}
}
}
\times 1/1 {
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
\revert TupletNumber #'text
}
}
.. container:: example
**Example 2.** Tuplets do not use note duration bracket:
::
>>> maker = rhythmmakertools.AccelerandoRhythmMaker(
... beam_specifier=rhythmmakertools.BeamSpecifier(
... use_feather_beams=True,
... ),
... interpolation_specifiers=rhythmmakertools.InterpolationSpecifier(
... start_duration=Duration(1, 8),
... stop_duration=Duration(1, 20),
... written_duration=Duration(1, 16),
... ),
... tie_specifier=rhythmmakertools.TieSpecifier(
... tie_across_divisions=False,
... ),
... tuplet_spelling_specifier=rhythmmakertools.TupletSpellingSpecifier(
... use_note_duration_bracket=False,
... ),
... )
::
>>> divisions = [(5, 8), (3, 8), (5, 8), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> print(format(staff))
\new RhythmicStaff {
{
\time 5/8
{
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
}
{
\time 3/8
{
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
}
{
\time 5/8
{
\once \override Beam #'grow-direction = #right
c'16 * 61/32 [
c'16 * 115/64
c'16 * 49/32
c'16 * 5/4
c'16 * 33/32
c'16 * 57/64
c'16 * 13/16
c'16 * 25/32 ]
}
}
{
\time 3/8
{
\once \override Beam #'grow-direction = #right
c'16 * 117/64 [
c'16 * 99/64
c'16 * 69/64
c'16 * 13/16
c'16 * 47/64 ]
}
}
}
Returns tuplet spelling specifier or none.
'''
superclass = super(AccelerandoRhythmMaker, self)
return superclass.tuplet_spelling_specifier | gpl-3.0 | 3,388,766,652,809,431,600 | 48.744931 | 114 | 0.290482 | false |
momikey/pyrge | music.py | 1 | 5751 | import pygame
__doc__ = """Module for streamed sound or music.
The L{Music} class can be used to play any sound, but it is particularly
useful for the playback of large music files, since it is optimized for
low memory usage. This object builds on the functionality provided by Pygame,
mainly by supporting more than a single music track. Only one track can be
played at any time, but multiple Music objects can be created.
The Music object supports a variety of formats, including both sample-based
(MP3, OGG) and pattern-based (MOD, XM) formats. Also, there are methods for
playback control, including pause, stop, rewind, looping, and volume control.
"""
__all__ = ['Music']
class Music(object):
"""A background music object. The file passed to the constructor will
not be loaded into memory yet, and playback will not be started. This
allows for multiple music streams, though, due to a limitation in the
underlying system, only one Music object can be playing at a time.
The music file can be in any format supported by Pygame and SDL,
including MP3, OGG, and MOD.
@cvar current: The filename or file object of the currently-playing Music.
@param musicfile: The filename or file object of the music.
"""
current = None
def __init__(self, musicfile):
self.filename = musicfile
self.__pausetime = 0.0
self.__volume = None
self.__endevent = None
def play(self, times=0, startpos=0.0, volume=None, endevent=None):
"""Starts playback of this object's music file.
@param times: The number of times this music will be played.
0 will play music once, -1 will loop indefinitely, and any
other number will cause the music to loop that many times.
@param startpos: The starting position of music playback.
This is in seconds for sample-based music formats like MP3,
or in patterns for pattern-based formats such as MOD.
@param volume: The volume level of the music, from 0.0 to 1.0.
@param endevent: The type of Pygame event that will be posted
when playback is finished. (If looping, the event will be posted
after each loop.)
"""
if not self.active:
pygame.mixer.music.load(self.filename)
Music.current = self.filename
self.__pausetime = startpos
# restore volume and end event state
if volume is not None:
self.volume = volume
elif self.volume is not None:
pygame.mixer.music.set_volume(self.volume)
if endevent is not None:
self.endevent = endevent
elif self.endevent is not None:
pygame.mixer.music.set_endevent(self.endevent)
pygame.mixer.music.play(times, startpos)
def loop(self):
"""Play this music repeatedly. This is the same as calling play()
with times = -1, but the intent is clearer."""
self.play(-1)
def pause(self):
"""Pause the music playback. Calling unpause() will restart playback
from the position where it was paused."""
self.__pausetime = pygame.mixer.music.get_pos() / 1000.
pygame.mixer.music.pause()
def unpause(self):
"""Resumes playback a paused music stream. This method is aware of
multiple music streams, but it can't keep track of loops."""
if self.active:
pygame.mixer.music.unpause()
else:
self.play(0, self.__pausetime)
def stop(self):
"""Completely stops playback of this music."""
self.__pausetime = 0.0
pygame.mixer.music.stop()
def rewind(self):
"""Resets playback of this music to the beginning."""
self.__pausetime = 0.0
if self.active:
pygame.mixer.music.rewind()
@property
def playing(self):
"""Whether this music object is currently playing"""
return self.active and pygame.mixer.music.get_busy()
@property
def active(self):
"""Whether this music object is "active" (i.e., loaded and ready to play)"""
return Music.current == self.filename
@property
def position(self):
"""The amount of time that this music object has been playing.
This is the time (in milliseconds) that this object has been playing,
0.0 if the object is currently stopped, or None if the object not
the active music track.
@note: Using the C{startpos} argument of the L{play} method will
cause this property to be given relative to the starting position
of playback, not of the file itself.
"""
if not self.active:
return None
elif not self.playing:
return 0.0
else:
return pygame.mixer.music.get_pos()
def __get_volume(self):
return self.__volume
def __set_volume(self, vol):
self.__volume = vol
if Music.current == self.filename:
pygame.mixer.music.set_volume(self.__volume)
volume = property(__get_volume, __set_volume, \
doc="The volume level of this object's music")
def __get_endevent(self):
return self.__endevent
def __set_endevent(self, evttype):
self.__endevent = evttype
if Music.current == self.filename:
if evttype is not None:
pygame.mixer.music.set_endevent(evttype)
else:
pygame.mixer.music.set_endevent()
endevent = property(__get_endevent, __set_endevent, \
doc="The type of pygame Event that will be posted after playback")
| lgpl-2.1 | -4,195,474,969,560,348,700 | 36.344156 | 90 | 0.62563 | false |
Maccimo/intellij-community | python/helpers/pydev/pydev_tests_python/test_bytecode_utils.py | 10 | 1254 | from __future__ import print_function
import pytest
from _pydevd_bundle import pydevd_bytecode_utils
@pytest.fixture
def inner_decorator_code():
def power(exponent):
def outer(f):
def inner(*args):
result = f(*args)
return exponent ** result
return inner
return outer
return power.__code__.co_consts[1].co_consts[1]
@pytest.fixture
def function_with_try_except_code():
def f():
try:
1 / 0
except ZeroDivisionError:
print("Can't divide by zero!")
else:
print("Everything is fine.")
return f.__code__
def test_candidates_for_inner_decorator(inner_decorator_code):
variants = pydevd_bytecode_utils.get_smart_step_into_candidates(inner_decorator_code)
assert len(variants) == 2
assert variants[0].argval == 'f'
assert variants[1].argval == '__pow__'
def test_candidates_for_function_with_try_except(function_with_try_except_code):
variants = pydevd_bytecode_utils.get_smart_step_into_candidates(function_with_try_except_code)
assert len(variants) == 3
assert variants[0].argval == '__div__'
assert variants[1].argval == 'print'
assert variants[2].argval == 'print'
| apache-2.0 | -3,164,033,886,191,206,400 | 28.162791 | 98 | 0.633174 | false |
savoirfairelinux/django | django/contrib/gis/utils/ogrinspect.py | 36 | 8918 | """
This module is for inspecting OGR data sources and generating either
models for GeoDjango and/or mapping dictionaries for use with the
`LayerMapping` utility.
"""
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString,
OFTTime,
)
def mapping(data_source, geom_name='geom', layer_key=0, multi_geom=False):
"""
Given a DataSource, generate a dictionary that may be used
for invoking the LayerMapping utility.
Keyword Arguments:
`geom_name` => The name of the geometry field to use for the model.
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
"""
if isinstance(data_source, str):
# Instantiating the DataSource from the string.
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Creating the dictionary.
_mapping = {}
# Generating the field name for each field in the layer.
for field in data_source[layer_key].fields:
mfield = field.lower()
if mfield[-1:] == '_':
mfield += 'field'
_mapping[mfield] = field
gtype = data_source[layer_key].geom_type
if multi_geom:
gtype.to_multi()
_mapping[geom_name] = str(gtype).upper()
return _mapping
def ogrinspect(*args, **kwargs):
"""
Given a data source (either a string or a DataSource object) and a string
model name this function will generate a GeoDjango model.
Usage:
>>> from django.contrib.gis.utils import ogrinspect
>>> ogrinspect('/path/to/shapefile.shp','NewModel')
...will print model definition to stout
or put this in a python script and use to redirect the output to a new
model like:
$ python generate_model.py > myapp/models.py
# generate_model.py
from django.contrib.gis.utils import ogrinspect
shp_file = 'data/mapping_hacks/world_borders.shp'
model_name = 'WorldBorders'
print(ogrinspect(shp_file, model_name, multi_geom=True, srid=4326,
geom_name='shapes', blank=True))
Required Arguments
`datasource` => string or DataSource object to file pointer
`model name` => string of name of new model class to create
Optional Keyword Arguments
`geom_name` => For specifying the model name for the Geometry Field.
Otherwise will default to `geom`
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`srid` => The SRID to use for the Geometry Field. If it can be determined,
the SRID of the datasource is used.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
`name_field` => String - specifies a field name to return for the
__str__() method (which will be generated if specified).
`imports` => Boolean (default: True) - set to False to omit the
`from django.contrib.gis.db import models` code from the
autogenerated models thus avoiding duplicated imports when building
more than one model by batching ogrinspect()
`decimal` => Boolean or sequence (default: False). When set to True
all generated model fields corresponding to the `OFTReal` type will
be `DecimalField` instead of `FloatField`. A sequence of specific
field names to generate as `DecimalField` may also be used.
`blank` => Boolean or sequence (default: False). When set to True all
generated model fields will have `blank=True`. If the user wants to
give specific fields to have blank, then a list/tuple of OGR field
names may be used.
`null` => Boolean (default: False) - When set to True all generated
model fields will have `null=True`. If the user wants to specify
give specific fields to have null, then a list/tuple of OGR field
names may be used.
Note: Call the _ogrinspect() helper to do the heavy lifting.
"""
return '\n'.join(s for s in _ogrinspect(*args, **kwargs))
def _ogrinspect(data_source, model_name, geom_name='geom', layer_key=0, srid=None,
multi_geom=False, name_field=None, imports=True,
decimal=False, blank=False, null=False):
"""
Helper routine for `ogrinspect` that generates GeoDjango models corresponding
to the given data source. See the `ogrinspect` docstring for more details.
"""
# Getting the DataSource
if isinstance(data_source, str):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Getting the layer corresponding to the layer key and getting
# a string listing of all OGR fields in the Layer.
layer = data_source[layer_key]
ogr_fields = layer.fields
# Creating lists from the `null`, `blank`, and `decimal`
# keyword arguments.
def process_kwarg(kwarg):
if isinstance(kwarg, (list, tuple)):
return [s.lower() for s in kwarg]
elif kwarg:
return [s.lower() for s in ogr_fields]
else:
return []
null_fields = process_kwarg(null)
blank_fields = process_kwarg(blank)
decimal_fields = process_kwarg(decimal)
# Gets the `null` and `blank` keywords for the given field name.
def get_kwargs_str(field_name):
kwlist = []
if field_name.lower() in null_fields:
kwlist.append('null=True')
if field_name.lower() in blank_fields:
kwlist.append('blank=True')
if kwlist:
return ', ' + ', '.join(kwlist)
else:
return ''
# For those wishing to disable the imports.
if imports:
yield '# This is an auto-generated Django model module created by ogrinspect.'
yield 'from django.contrib.gis.db import models'
yield ''
yield 'class %s(models.Model):' % model_name
for field_name, width, precision, field_type in zip(
ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types):
# The model field name.
mfield = field_name.lower()
if mfield[-1:] == '_':
mfield += 'field'
# Getting the keyword args string.
kwargs_str = get_kwargs_str(field_name)
if field_type is OFTReal:
# By default OFTReals are mapped to `FloatField`, however, they
# may also be mapped to `DecimalField` if specified in the
# `decimal` keyword.
if field_name.lower() in decimal_fields:
yield ' %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)' % (
mfield, width, precision, kwargs_str
)
else:
yield ' %s = models.FloatField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger:
yield ' %s = models.IntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger64:
yield ' %s = models.BigIntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTString:
yield ' %s = models.CharField(max_length=%s%s)' % (mfield, width, kwargs_str)
elif field_type is OFTDate:
yield ' %s = models.DateField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTDateTime:
yield ' %s = models.DateTimeField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTTime:
yield ' %s = models.TimeField(%s)' % (mfield, kwargs_str[2:])
else:
raise TypeError('Unknown field type %s in %s' % (field_type, mfield))
# TODO: Autodetection of multigeometry types (see #7218).
gtype = layer.geom_type
if multi_geom:
gtype.to_multi()
geom_field = gtype.django
# Setting up the SRID keyword string.
if srid is None:
if layer.srs is None:
srid_str = 'srid=-1'
else:
srid = layer.srs.srid
if srid is None:
srid_str = 'srid=-1'
elif srid == 4326:
# WGS84 is already the default.
srid_str = ''
else:
srid_str = 'srid=%s' % srid
else:
srid_str = 'srid=%s' % srid
yield ' %s = models.%s(%s)' % (geom_name, geom_field, srid_str)
if name_field:
yield ''
yield ' def __str__(self): return self.%s' % name_field
| bsd-3-clause | -7,136,833,307,658,238,000 | 36.788136 | 92 | 0.623122 | false |
pieleric/odemis | src/odemis/acq/stitching/_simple.py | 2 | 3937 | # -*- coding: utf-8 -*-
'''
Created on 19 Jul 2017
@author: Éric Piel, Philip Winkler
Copyright © 2017 Éric Piel, Philip Winkler, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division
import copy
from odemis import model
from odemis.acq.stitching._constants import REGISTER_GLOBAL_SHIFT, REGISTER_SHIFT, \
REGISTER_IDENTITY, WEAVER_MEAN, WEAVER_COLLAGE, WEAVER_COLLAGE_REVERSE
from odemis.acq.stitching._registrar import ShiftRegistrar, IdentityRegistrar, GlobalShiftRegistrar
from odemis.acq.stitching._weaver import MeanWeaver, CollageWeaver, CollageWeaverReverse
def register(tiles, method=REGISTER_GLOBAL_SHIFT):
"""
tiles (list of DataArray of shape YX or tuples of DataArrays): The tiles to compute the registration.
If it's tuples, the first tile of each tuple is the “main tile”, and the following ones are
dependent tiles.
method (REGISTER_*): REGISTER_SHIFT → ShiftRegistrar, REGISTER_IDENTITY → IdentityRegistrar
returns:
tiles (list of DataArray of shape YX or tuples of DataArrays): The tiles as passed, but with updated
MD_POS metadata
"""
if method == REGISTER_SHIFT:
registrar = ShiftRegistrar()
elif method == REGISTER_IDENTITY:
registrar = IdentityRegistrar()
elif method == REGISTER_GLOBAL_SHIFT:
registrar = GlobalShiftRegistrar()
else:
raise ValueError("Invalid registrar %s" % (method,))
# Register tiles
updatedTiles = []
for ts in tiles:
# Separate tile and dependent_tiles
if isinstance(ts, tuple):
tile = ts[0]
dep_tiles = ts[1:]
else:
tile = ts
dep_tiles = None
registrar.addTile(tile, dep_tiles)
# Update positions
for i, ts in enumerate(tiles):
# Return tuple of positions if dependent tiles are present
if isinstance(ts, tuple):
tile = ts[0]
dep_tiles = ts[1:]
# Update main tile
md = copy.deepcopy(tile.metadata)
md[model.MD_POS] = registrar.getPositions()[0][i]
tileUpd = model.DataArray(tile, md)
# Update dependent tiles
tilesNew = [tileUpd]
for j, dt in enumerate(dep_tiles):
md = copy.deepcopy(dt.metadata)
md[model.MD_POS] = registrar.getPositions()[1][i][j]
tilesNew.append(model.DataArray(dt, md))
tileUpd = tuple(tilesNew)
else:
md = copy.deepcopy(ts.metadata)
md[model.MD_POS] = registrar.getPositions()[0][i]
tileUpd = model.DataArray(ts, md)
updatedTiles.append(tileUpd)
return updatedTiles
def weave(tiles, method=WEAVER_MEAN):
"""
tiles (list of DataArray of shape YX): The tiles to draw
method (WEAVER_*): WEAVER_MEAN → MeanWeaver, WEAVER_COLLAGE → CollageWeaver
return:
image (DataArray of shape Y'X'): A large image containing all the tiles
"""
if method == WEAVER_MEAN:
weaver = MeanWeaver()
elif method == WEAVER_COLLAGE:
weaver = CollageWeaver()
elif method == WEAVER_COLLAGE_REVERSE:
weaver = CollageWeaverReverse()
else:
raise ValueError("Invalid weaver %s" % (method,))
for t in tiles:
weaver.addTile(t)
stitched_image = weaver.getFullImage()
return stitched_image
| gpl-2.0 | -4,060,947,858,652,214,300 | 34.017857 | 226 | 0.657828 | false |
UTSA-ICS/python-openstackclient-SID | openstackclient/object/v1/object.py | 3 | 4440 | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Object v1 action implementations"""
import logging
import six
from cliff import lister
from cliff import show
from openstackclient.common import utils
from openstackclient.object.v1.lib import object as lib_object
class ListObject(lister.Lister):
"""List objects"""
log = logging.getLogger(__name__ + '.ListObject')
def get_parser(self, prog_name):
parser = super(ListObject, self).get_parser(prog_name)
parser.add_argument(
"container",
metavar="<container-name>",
help="List contents of container-name",
)
parser.add_argument(
"--prefix",
metavar="<prefix>",
help="Filter list using <prefix>",
)
parser.add_argument(
"--delimiter",
metavar="<delimiter>",
help="Roll up items with <delimiter>",
)
parser.add_argument(
"--marker",
metavar="<marker>",
help="Anchor for paging",
)
parser.add_argument(
"--end-marker",
metavar="<end-marker>",
help="End anchor for paging",
)
parser.add_argument(
"--limit",
metavar="<limit>",
type=int,
help="Limit the number of objects returned",
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
parser.add_argument(
'--all',
action='store_true',
default=False,
help='List all objects in container (default is 10000)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
if parsed_args.long:
columns = (
'Name',
'Bytes',
'Hash',
'Content Type',
'Last Modified',
)
else:
columns = ('Name',)
kwargs = {}
if parsed_args.prefix:
kwargs['prefix'] = parsed_args.prefix
if parsed_args.delimiter:
kwargs['delimiter'] = parsed_args.delimiter
if parsed_args.marker:
kwargs['marker'] = parsed_args.marker
if parsed_args.end_marker:
kwargs['end_marker'] = parsed_args.end_marker
if parsed_args.limit:
kwargs['limit'] = parsed_args.limit
if parsed_args.all:
kwargs['full_listing'] = True
data = lib_object.list_objects(
self.app.restapi,
self.app.client_manager.object_store.endpoint,
parsed_args.container,
**kwargs
)
return (columns,
(utils.get_dict_properties(
s, columns,
formatters={},
) for s in data))
class ShowObject(show.ShowOne):
"""Show object information"""
log = logging.getLogger(__name__ + '.ShowObject')
def get_parser(self, prog_name):
parser = super(ShowObject, self).get_parser(prog_name)
parser.add_argument(
'container',
metavar='<container>',
help='Container name for object to display',
)
parser.add_argument(
'object',
metavar='<object>',
help='Object name to display',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
data = lib_object.show_object(
self.app.restapi,
self.app.client_manager.object_store.endpoint,
parsed_args.container,
parsed_args.object,
)
return zip(*sorted(six.iteritems(data)))
| apache-2.0 | -3,496,593,673,283,125,000 | 28.210526 | 77 | 0.545495 | false |
JackNokia/robotframework | src/robot/result/keywordremover.py | 22 | 5204 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
from robot.model import SuiteVisitor, TagPattern
from robot.utils import Matcher, plural_or_not
def KeywordRemover(how):
upper = how.upper()
if upper.startswith('NAME:'):
return ByNameKeywordRemover(pattern=how[5:])
if upper.startswith('TAG:'):
return ByTagKeywordRemover(pattern=how[4:])
try:
return {'ALL': AllKeywordsRemover,
'PASSED': PassedKeywordRemover,
'FOR': ForLoopItemsRemover,
'WUKS': WaitUntilKeywordSucceedsRemover}[upper]()
except KeyError:
raise DataError("Expected 'ALL', 'PASSED', 'NAME:<pattern>', 'FOR', "
"or 'WUKS' but got '%s'." % how)
class _KeywordRemover(SuiteVisitor):
_message = 'Keyword data removed using --RemoveKeywords option.'
def __init__(self):
self._removal_message = RemovalMessage(self._message)
def _clear_content(self, kw):
kw.keywords = []
kw.messages = []
self._removal_message.set(kw)
def _failed_or_warning_or_error(self, item):
return not item.passed or self._warning_or_error(item)
def _warning_or_error(self, item):
finder = WarningAndErrorFinder()
item.visit(finder)
return finder.found
class AllKeywordsRemover(_KeywordRemover):
def visit_keyword(self, keyword):
self._clear_content(keyword)
class PassedKeywordRemover(_KeywordRemover):
def start_suite(self, suite):
if not suite.statistics.all.failed:
for keyword in suite.keywords:
if not self._warning_or_error(keyword):
self._clear_content(keyword)
def visit_test(self, test):
if not self._failed_or_warning_or_error(test):
for keyword in test.keywords:
self._clear_content(keyword)
def visit_keyword(self, keyword):
pass
class ByNameKeywordRemover(_KeywordRemover):
def __init__(self, pattern):
_KeywordRemover.__init__(self)
self._matcher = Matcher(pattern, ignore='_')
def start_keyword(self, kw):
if self._matcher.match(kw.name) and not self._warning_or_error(kw):
self._clear_content(kw)
class ByTagKeywordRemover(_KeywordRemover):
def __init__(self, pattern):
_KeywordRemover.__init__(self)
self._pattern = TagPattern(pattern)
def start_keyword(self, kw):
if self._pattern.match(kw.tags) and not self._warning_or_error(kw):
self._clear_content(kw)
class ForLoopItemsRemover(_KeywordRemover):
_message = '%d passing step%s removed using --RemoveKeywords option.'
def start_keyword(self, kw):
if kw.type == kw.FOR_LOOP_TYPE:
before = len(kw.keywords)
kw.keywords = self._remove_keywords(kw.keywords)
self._removal_message.set_if_removed(kw, before)
def _remove_keywords(self, keywords):
return [kw for kw in keywords
if self._failed_or_warning_or_error(kw) or kw is keywords[-1]]
class WaitUntilKeywordSucceedsRemover(_KeywordRemover):
_message = '%d failing step%s removed using --RemoveKeywords option.'
def start_keyword(self, kw):
if kw.name == 'BuiltIn.Wait Until Keyword Succeeds' and kw.keywords:
before = len(kw.keywords)
kw.keywords = self._remove_keywords(list(kw.keywords))
self._removal_message.set_if_removed(kw, before)
def _remove_keywords(self, keywords):
include_from_end = 2 if keywords[-1].passed else 1
return self._kws_with_warnings(keywords[:-include_from_end]) \
+ keywords[-include_from_end:]
def _kws_with_warnings(self, keywords):
return [kw for kw in keywords if self._warning_or_error(kw)]
class WarningAndErrorFinder(SuiteVisitor):
def __init__(self):
self.found = False
def start_suite(self, suite):
return not self.found
def start_test(self, test):
return not self.found
def start_keyword(self, keyword):
return not self.found
def visit_message(self, msg):
if msg.level in ('WARN', 'ERROR'):
self.found = True
class RemovalMessage(object):
def __init__(self, message):
self._message = message
def set_if_removed(self, kw, len_before):
removed = len_before - len(kw.keywords)
if removed:
self.set(kw, self._message % (removed, plural_or_not(removed)))
def set(self, kw, message=None):
kw.doc = ('%s\n\n_%s_' % (kw.doc, message or self._message)).strip()
| apache-2.0 | -224,594,458,913,545,280 | 30.92638 | 78 | 0.640277 | false |
mikedh/trimesh | tests/test_ray.py | 1 | 9508 | try:
from . import generic as g
except BaseException:
import generic as g
class RayTests(g.unittest.TestCase):
def test_rays(self):
meshes = [g.get_mesh(**k)
for k in g.data['ray_data']['load_kwargs']]
rays = g.data['ray_data']['rays']
names = [m.metadata['file_name'] for m in meshes]
hit_id = []
hit_loc = []
hit_any = []
for m in meshes:
name = m.metadata['file_name']
hit_any.append(m.ray.intersects_any(**rays[name]))
hit_loc.append(m.ray.intersects_location(**rays[name])[0])
hit_id.append(m.ray.intersects_id(**rays[name]))
hit_any = g.np.array(hit_any, dtype=g.np.int64)
for i in g.trimesh.grouping.group(
g.np.unique(names, return_inverse=True)[1]):
broken = hit_any[i].astype(g.np.int64).ptp(axis=0).sum()
assert broken == 0
def test_rps(self):
for use_embree in [True, False]:
dimension = (10000, 3)
sphere = g.get_mesh('unit_sphere.STL',
use_embree=use_embree)
ray_origins = g.np.random.random(dimension)
ray_directions = g.np.tile([0, 0, 1], (dimension[0], 1))
ray_origins[:, 2] = -5
# force ray object to allocate tree before timing it
# tree = sphere.ray.tree
tic = [g.time.time()]
a = sphere.ray.intersects_id(
ray_origins, ray_directions)
tic.append(g.time.time())
b = sphere.ray.intersects_location(
ray_origins, ray_directions)
tic.append(g.time.time())
# make sure ray functions always return numpy arrays
assert all(len(i.shape) >= 0 for i in a)
assert all(len(i.shape) >= 0 for i in b)
rps = dimension[0] / g.np.diff(tic)
g.log.info('Measured %s rays/second with embree %d',
str(rps),
use_embree)
def test_empty(self):
"""
Test queries with no hits
"""
for use_embree in [True, False]:
dimension = (100, 3)
sphere = g.get_mesh('unit_sphere.STL',
use_embree=use_embree)
# should never hit the sphere
ray_origins = g.np.random.random(dimension)
ray_directions = g.np.tile([0, 1, 0], (dimension[0], 1))
ray_origins[:, 2] = -5
# make sure ray functions always return numpy arrays
# these functions return multiple results all of which
# should always be a numpy array
assert all(len(i.shape) >= 0 for i in
sphere.ray.intersects_id(
ray_origins, ray_directions))
assert all(len(i.shape) >= 0 for i in
sphere.ray.intersects_location(
ray_origins, ray_directions))
def test_contains(self):
scale = 1.5
for use_embree in [True, False]:
mesh = g.get_mesh('unit_cube.STL', use_embree=use_embree)
g.log.info('Contains test ray engine: ' + str(mesh.ray.__class__))
test_on = mesh.ray.contains_points(mesh.vertices) # NOQA
test_in = mesh.ray.contains_points(mesh.vertices * (1.0 / scale))
assert test_in.all()
test_out = mesh.ray.contains_points(mesh.vertices * scale)
assert not test_out.any()
points_way_out = (
g.np.random.random(
(30, 3)) * 100) + 1.0 + mesh.bounds[1]
test_way_out = mesh.ray.contains_points(points_way_out)
assert not test_way_out.any()
test_centroid = mesh.ray.contains_points([mesh.center_mass])
assert test_centroid.all()
def test_on_vertex(self):
for use_embree in [True, False]:
m = g.trimesh.primitives.Box(use_embree=False)
origins = g.np.zeros_like(m.vertices)
vectors = m.vertices.copy()
assert m.ray.intersects_any(ray_origins=origins,
ray_directions=vectors).all()
(locations,
index_ray,
index_tri) = m.ray.intersects_location(ray_origins=origins,
ray_directions=vectors)
hit_count = g.np.bincount(index_ray,
minlength=len(origins))
assert (hit_count == 1).all()
def test_on_edge(self):
for use_embree in [True, False]:
m = g.get_mesh('7_8ths_cube.stl')
points = [[4.5, 0, -23], [4.5, 0, -2], [0, 0, -1e-6], [0, 0, -1]]
truth = [False, True, True, True]
result = g.trimesh.ray.ray_util.contains_points(m.ray, points)
assert (result == truth).all()
def test_multiple_hits(self):
"""
"""
# Set camera focal length (in pixels)
f = g.np.array([1000., 1000.])
h, w = 256, 256
# Set up a list of ray directions - one for each pixel in our (256,
# 256) output image.
ray_directions = g.trimesh.util.grid_arange(
[[-h / 2, -w / 2],
[h / 2, w / 2]],
step=2.0)
ray_directions = g.np.column_stack(
(ray_directions,
g.np.ones(len(ray_directions)) * f[0]))
# Initialize the camera origin to be somewhere behind the cube.
cam_t = g.np.array([0, 0, -15.])
# Duplicate to ensure we have an camera_origin per ray direction
ray_origins = g.np.tile(cam_t, (ray_directions.shape[0], 1))
for use_embree in [True, False]:
# Generate a 1 x 1 x 1 cube using the trimesh box primitive
cube_mesh = g.trimesh.primitives.Box(extents=[2, 2, 2],
use_embree=use_embree)
# Perform 256 * 256 raycasts, one for each pixel on the image
# plane. We only want the 'first' hit.
index_triangles, index_ray = cube_mesh.ray.intersects_id(
ray_origins=ray_origins,
ray_directions=ray_directions,
multiple_hits=False)
assert len(g.np.unique(index_triangles)) == 2
index_triangles, index_ray = cube_mesh.ray.intersects_id(
ray_origins=ray_origins,
ray_directions=ray_directions,
multiple_hits=True)
assert len(g.np.unique(index_triangles)) > 2
def test_contain_single(self):
# not watertight
mesh = g.get_mesh("teapot.stl", use_embree=False)
# sample a grid of points (n,3)
points = mesh.bounding_box.sample_grid(step=2.0)
# to a contains check on every point
contained = mesh.ray.contains_points(points)
assert len(points) == len(contained)
# not contained and should surface a bug
for point in mesh.bounding_box.vertices:
mesh.ray.contains_points([point])
def test_box(self):
"""
Run box- ray intersection along Z and make sure XY match
ray origin XY.
"""
for kwargs in [{'use_embree': True},
{'use_embree': False}]:
mesh = g.get_mesh('unit_cube.STL', **kwargs)
# grid is across meshes XY profile
origins = g.trimesh.util.grid_linspace(mesh.bounds[:, :2] +
g.np.reshape(
[-.02, .02], (-1, 1)),
100)
origins = g.np.column_stack((
origins,
g.np.ones(len(origins)) * -100))
# all vectors are along Z axis
vectors = g.np.ones((len(origins), 3)) * [0, 0, 1.0]
# (n,3) float intersection position in space
# (n,) int, index of original ray
# (m,) int, index of mesh.faces
pos, ray, tri = mesh.ray.intersects_location(
ray_origins=origins,
ray_directions=vectors)
for p, r in zip(pos, ray):
# intersect location XY should match ray origin XY
assert g.np.allclose(p[:2], origins[r][:2])
# the Z of the hit should be on the cube's
# top or bottom face
assert g.np.isclose(p[2], mesh.bounds[:, 2]).any()
def test_broken(self):
"""
Test a mesh with badly defined face normals
"""
ray_origins = g.np.array([[0.12801793, 24.5030052, -5.],
[0.12801793, 24.5030052, -5.]])
ray_directions = g.np.array([[-0.13590759, -0.98042506, 0.],
[0.13590759, 0.98042506, -0.]])
for kwargs in [{'use_embree': True},
{'use_embree': False}]:
mesh = g.get_mesh('broken.STL', **kwargs)
locations, index_ray, index_tri = mesh.ray.intersects_location(
ray_origins=ray_origins, ray_directions=ray_directions)
# should be same number of location hits
assert len(locations) == len(ray_origins)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
| mit | -286,884,637,848,557,950 | 37.184739 | 79 | 0.505154 | false |
loli/sklearn-ensembletrees | benchmarks/bench_plot_omp_lars.py | 31 | 4457 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute_gram=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute_gram=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause | 3,028,455,366,158,458,400 | 35.235772 | 76 | 0.528158 | false |
mmcdermo/helpinghand | server/venv/lib/python2.7/site-packages/django/contrib/admin/util.py | 7 | 15672 | from __future__ import unicode_literals
import datetime
import decimal
from django.contrib.auth import get_permission_codename
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.related import RelatedObject
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils import six
from django.utils.translation import ungettext
from django.core.urlresolvers import reverse
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field_by_name(field_name)[0]
if ((hasattr(field, 'rel') and
isinstance(field.rel, models.ManyToManyRel)) or
(isinstance(field, models.related.RelatedObject) and
not field.field.unique)):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
for field in opts['fields']:
if isinstance(field, (list, tuple)):
field_names.extend(field)
else:
field_names.append(field)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogenous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{0}: <a href="{1}">{2}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source_attr=None, **kwargs):
for obj in objs:
if source_attr:
self.add_edge(getattr(obj, source_attr), obj)
else:
self.add_edge(None, obj)
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = opts.get_field(name)
except models.FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and hasattr(model_admin, name) and
not name == '__str__' and not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = model._meta.get_field_by_name(name)[0]
if isinstance(field, RelatedObject):
label = field.opts.verbose_name
else:
label = field.verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field_data = model._meta.get_field_by_name(name)
except models.FieldDoesNotExist:
pass
else:
field = field_data[0]
if not isinstance(field, RelatedObject):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if isinstance(field, models.related.RelatedObject):
return field.model
elif getattr(field, 'rel'): # or isinstance?
return field.rel.to
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field, model, direct, m2m = parent._meta.get_field_by_name(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces)-1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
if direct:
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field_by_name(piece)[0])
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a `limit_choices_to` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'limit_choices_to', None))
if not limit_choices_to:
return models.Q() # empty Q
elif isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
| mit | -2,093,303,613,755,990,800 | 32.630901 | 94 | 0.597882 | false |
js0701/chromium-crosswalk | tools/telemetry/telemetry/internal/backends/chrome_inspector/inspector_websocket.py | 10 | 5732 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import errno
import json
import logging
import socket
import time
from telemetry.core import exceptions
from telemetry.internal.backends.chrome_inspector import websocket
class WebSocketDisconnected(exceptions.Error):
"""An attempt was made to use a web socket after it had been disconnected."""
pass
class InspectorWebsocket(object):
# See http://www.jsonrpc.org/specification#error_object.
METHOD_NOT_FOUND_CODE = -32601
def __init__(self):
"""Create a websocket handler for communicating with Inspectors."""
self._socket = None
self._cur_socket_timeout = 0
self._next_request_id = 0
self._domain_handlers = {}
self._pending_callbacks = dict()
def RegisterDomain(self, domain_name, notification_handler):
"""Registers a given domain for handling notification methods.
For example, given inspector_backend:
def OnConsoleNotification(msg):
if msg['method'] == 'Console.messageAdded':
print msg['params']['message']
inspector_backend.RegisterDomain('Console', OnConsoleNotification)
Args:
domain_name: The devtools domain name. E.g., 'Tracing', 'Memory', 'Page'.
notification_handler: Handler for devtools notification. Will be
called if a devtools notification with matching domain is received
via DispatchNotifications. The handler accepts a single paramater:
the JSON object representing the notification.
"""
assert domain_name not in self._domain_handlers
self._domain_handlers[domain_name] = notification_handler
def UnregisterDomain(self, domain_name):
"""Unregisters a previously registered domain."""
assert domain_name in self._domain_handlers
del self._domain_handlers[domain_name]
def Connect(self, url, timeout=10):
"""Connects the websocket.
Raises:
websocket.WebSocketException
socket.error
"""
assert not self._socket
self._socket = websocket.create_connection(url, timeout=timeout)
self._cur_socket_timeout = 0
self._next_request_id = 0
def Disconnect(self):
"""Disconnects the inspector websocket.
Raises:
websocket.WebSocketException
socket.error
"""
if self._socket:
self._socket.close()
self._socket = None
def SendAndIgnoreResponse(self, req):
"""Sends a request without waiting for a response.
Raises:
websocket.WebSocketException: Error from websocket library.
socket.error: Error from websocket library.
exceptions.WebSocketDisconnected: The socket was disconnected.
"""
self._SendRequest(req)
def _SendRequest(self, req):
if not self._socket:
raise WebSocketDisconnected()
req['id'] = self._next_request_id
self._next_request_id += 1
data = json.dumps(req)
self._socket.send(data)
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.debug('sent [%s]', json.dumps(req, indent=2, sort_keys=True))
def SyncRequest(self, req, timeout=10):
"""Sends a request and waits for a response.
Raises:
websocket.WebSocketException: Error from websocket library.
socket.error: Error from websocket library.
exceptions.WebSocketDisconnected: The socket was disconnected.
"""
self._SendRequest(req)
while True:
res = self._Receive(timeout)
if 'id' in res and res['id'] == req['id']:
return res
def AsyncRequest(self, req, callback):
"""Sends an async request and returns immediately.
Response will be handled in the |callback| later when DispatchNotifications
is invoked.
Args:
callback: a function that takes inspector's response as the argument.
"""
self._SendRequest(req)
self._pending_callbacks[req['id']] = callback
def DispatchNotifications(self, timeout=10):
"""Waits for responses from the websocket, dispatching them as necessary.
Raises:
websocket.WebSocketException: Error from websocket library.
socket.error: Error from websocket library.
exceptions.WebSocketDisconnected: The socket was disconnected.
"""
self._Receive(timeout)
def _SetTimeout(self, timeout):
if self._cur_socket_timeout != timeout:
self._socket.settimeout(timeout)
self._cur_socket_timeout = timeout
def _Receive(self, timeout=10):
if not self._socket:
raise WebSocketDisconnected()
self._SetTimeout(timeout)
while True:
try:
data = self._socket.recv()
except socket.error, e:
if e.errno == errno.EAGAIN:
# Resource is temporarily unavailable. Try again.
# See https://code.google.com/p/chromium/issues/detail?id=545853#c3
# for more details.
time.sleep(0.1)
else:
raise
else:
break
result = json.loads(data)
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.debug(
'got [%s]', json.dumps(result, indent=2, sort_keys=True))
if 'method' in result:
self._HandleNotification(result)
elif 'id' in result:
self._HandleAsyncResponse(result)
return result
def _HandleNotification(self, result):
mname = result['method']
dot_pos = mname.find('.')
domain_name = mname[:dot_pos]
if not domain_name in self._domain_handlers:
logging.warn('Unhandled inspector message: %s', result)
return
self._domain_handlers[domain_name](result)
def _HandleAsyncResponse(self, result):
callback = self._pending_callbacks.pop(result['id'], None)
if callback:
callback(result)
| bsd-3-clause | -6,294,678,083,037,360,000 | 30.152174 | 79 | 0.679693 | false |
eklitzke/envoy | tools/envoy_collect/envoy_collect.py | 6 | 9564 | #!/usr/bin/env python
"""Wrapper for Envoy command-line that collects stats/log/profile.
Example use:
./tools/envoy_collect.py --output-path=./envoy.tar -c
./configs/google_com_proxy.v2.yaml --service-node foo
<Ctrl-C>
tar -tvf ./envoy.tar
-rw------- htuch/eng 0 2017-08-13 21:13 access_0.log
-rw------- htuch/eng 876 2017-08-13 21:13 clusters.txt
-rw------- htuch/eng 19 2017-08-13 21:13 listeners.txt
-rw------- htuch/eng 70 2017-08-13 21:13 server_info.txt
-rw------- htuch/eng 8443 2017-08-13 21:13 stats.txt
-rw------- htuch/eng 1551 2017-08-13 21:13 config.json
-rw------- htuch/eng 32681 2017-08-13 21:13 envoy.log
The Envoy process will execute as normal and will terminate when interrupted
with SIGINT (ctrl-c on stdin), collecting the various stats/log/profile in the
--output-path tarball.
TODO(htuch):
- Generate the full perf trace as well, since we may have a different version
of perf local vs. remote.
- Add a Bazel run wrapper.
- Support v2 proto config in ModifyEnvoyConfig().
- Flamegraph generation in post-processing.
- Support other modes of data collection (e.g. snapshotting on SIGUSR,
periodic).
- Validate in performance mode that we're using an opt binary.
- Consider handling other signals.
- Optional real time logging while Envoy process is running.
- bz2 compress tarball.
- Use freeze or something similar to build a static binary with embedded
Python, ending need to have Python on remote host (and care about version).
"""
from __future__ import print_function
import argparse
import ctypes
import ctypes.util
import datetime
import json
import os
import pipes
import shutil
import signal
import subprocess as sp
import sys
import tarfile
import tempfile
from six.moves import urllib
DEFAULT_ENVOY_PATH = os.getenv('ENVOY_PATH', 'bazel-bin/source/exe/envoy-static')
PERF_PATH = os.getenv('PERF_PATH', 'perf')
PR_SET_PDEATHSIG = 1 # See prtcl(2).
DUMP_HANDLERS = ['clusters', 'listeners', 'server_info', 'stats']
def fetch_url(url):
return urllib.request.urlopen(url).read().decode('utf-8')
def modify_envoy_config(config_path, perf, output_directory):
"""Modify Envoy config to support gathering logs, etc.
Args:
config_path: the command-line specified Envoy config path.
perf: boolean indicating whether in performance mode.
output_directory: directory path for additional generated files.
Returns:
(modified Envoy config path, list of additional files to collect)
"""
# No modifications yet when in performance profiling mode.
if perf:
return config_path, []
# Load original Envoy config.
with open(config_path, 'r') as f:
envoy_config = json.loads(f.read())
# Add unconditional access logs for all listeners.
access_log_paths = []
for n, listener in enumerate(envoy_config['listeners']):
for network_filter in listener['filters']:
if network_filter['name'] == 'http_connection_manager':
config = network_filter['config']
access_log_path = os.path.join(output_directory, 'access_%d.log' % n)
access_log_config = {'path': access_log_path}
if 'access_log' in config:
config['access_log'].append(access_log_config)
else:
config['access_log'] = [access_log_config]
access_log_paths.append(access_log_path)
# Write out modified Envoy config.
modified_envoy_config_path = os.path.join(output_directory, 'config.json')
with open(modified_envoy_config_path, 'w') as f:
f.write(json.dumps(envoy_config, indent=2))
return modified_envoy_config_path, access_log_paths
def run_envoy(envoy_shcmd_args, envoy_log_path, admin_address_path, dump_handlers_paths):
"""Run Envoy subprocess and trigger admin endpoint gathering on SIGINT.
Args:
envoy_shcmd_args: list of Envoy subprocess args.
envoy_log_path: path to write Envoy stderr log to.
admin_address_path: path to where admin address is written by Envoy.
dump_handlers_paths: map from admin endpoint handler to path to where the respective contents
are to be written.
Returns:
The Envoy subprocess exit code.
"""
envoy_shcmd = ' '.join(map(pipes.quote, envoy_shcmd_args))
print(envoy_shcmd)
# Some process setup stuff to ensure the child process gets cleaned up properly if the
# collector dies and doesn't get its signals implicitly.
def envoy_preexec_fn():
os.setpgrp()
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
libc.prctl(PR_SET_PDEATHSIG, signal.SIGTERM)
# Launch Envoy, register for SIGINT, and wait for the child process to exit.
with open(envoy_log_path, 'w') as envoy_log:
envoy_proc = sp.Popen(envoy_shcmd,
stdin=sp.PIPE,
stderr=envoy_log,
preexec_fn=envoy_preexec_fn,
shell=True)
def signal_handler(signum, frame):
# The read is deferred until the signal so that the Envoy process gets a
# chance to write the file out.
with open(admin_address_path, 'r') as f:
admin_address = 'http://%s' % f.read()
# Fetch from the admin endpoint.
for handler, path in dump_handlers_paths.items():
handler_url = '%s/%s' % (admin_address, handler)
print('Fetching %s' % handler_url)
with open(path, 'w') as f:
f.write(fetch_url(handler_url))
# Send SIGINT to Envoy process, it should exit and execution will
# continue from the envoy_proc.wait() below.
print('Sending Envoy process (PID=%d) SIGINT...' % envoy_proc.pid)
envoy_proc.send_signal(signal.SIGINT)
signal.signal(signal.SIGINT, signal_handler)
return envoy_proc.wait()
def envoy_collect(parse_result, unknown_args):
"""Run Envoy and collect its artifacts.
Args:
parse_result: Namespace object with envoy_collect.py's args.
unknown_args: list of remaining args to pass to Envoy binary.
"""
# Are we in performance mode? Otherwise, debug.
perf = parse_result.performance
return_code = 1 # Non-zero default return.
envoy_tmpdir = tempfile.mkdtemp(prefix='envoy-collect-tmp-')
# Try and do stuff with envoy_tmpdir, rm -rf regardless of success/failure.
try:
# Setup Envoy config and determine the paths of the files we're going to
# generate.
modified_envoy_config_path, access_log_paths = modify_envoy_config(
parse_result.config_path, perf, envoy_tmpdir)
dump_handlers_paths = {h: os.path.join(envoy_tmpdir, '%s.txt' % h) for h in DUMP_HANDLERS}
envoy_log_path = os.path.join(envoy_tmpdir, 'envoy.log')
# The manifest of files that will be placed in the output .tar.
manifest = access_log_paths + list(
dump_handlers_paths.values()) + [modified_envoy_config_path, envoy_log_path]
# This is where we will find out where the admin endpoint is listening.
admin_address_path = os.path.join(envoy_tmpdir, 'admin_address.txt')
# Only run under 'perf record' in performance mode.
if perf:
perf_data_path = os.path.join(envoy_tmpdir, 'perf.data')
manifest.append(perf_data_path)
perf_record_args = [
PERF_PATH,
'record',
'-o',
perf_data_path,
'-g',
'--',
]
else:
perf_record_args = []
# This is how we will invoke the wrapped envoy.
envoy_shcmd_args = perf_record_args + [
parse_result.envoy_binary,
'-c',
modified_envoy_config_path,
'-l',
'error' if perf else 'trace',
'--admin-address-path',
admin_address_path,
] + unknown_args[1:]
# Run the Envoy process (under 'perf record' if needed).
return_code = run_envoy(envoy_shcmd_args, envoy_log_path, admin_address_path,
dump_handlers_paths)
# Collect manifest files and tar them.
with tarfile.TarFile(parse_result.output_path, 'w') as output_tar:
for path in manifest:
if os.path.exists(path):
print('Adding %s to archive' % path)
output_tar.add(path, arcname=os.path.basename(path))
else:
print('%s not found' % path)
print('Wrote Envoy artifacts to %s' % parse_result.output_path)
finally:
shutil.rmtree(envoy_tmpdir)
return return_code
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Envoy wrapper to collect stats/log/profile.')
default_output_path = 'envoy-%s.tar' % datetime.datetime.now().isoformat('-')
parser.add_argument('--output-path', default=default_output_path, help='path to output .tar.')
# We either need to interpret or override these, so we declare them in
# envoy_collect.py and always parse and present them again when invoking
# Envoy.
parser.add_argument('--config-path',
'-c',
required=True,
help='Path to Envoy configuration file.')
parser.add_argument('--log-level',
'-l',
help='Envoy log level. This will be overridden when invoking Envoy.')
# envoy_collect specific args.
parser.add_argument('--performance',
action='store_true',
help='Performance mode (collect perf trace, minimize log verbosity).')
parser.add_argument('--envoy-binary',
default=DEFAULT_ENVOY_PATH,
help='Path to Envoy binary (%s by default).' % DEFAULT_ENVOY_PATH)
sys.exit(envoy_collect(*parser.parse_known_args(sys.argv)))
| apache-2.0 | 398,129,151,264,080,500 | 37.720648 | 97 | 0.658616 | false |
Dourv/tornado-mongo | src/forms/permissions.py | 1 | 4485 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bson.objectid import ObjectId
from pymongo import MongoClient
from validate_email import validate_email
from views.base import base
import config
import hashlib
class permissions():
@property
def db(self):
if config.debug == True:
client = MongoClient('localhost', 27017)
else:
client = MongoClient('mongodb://'+config.__user+':'+config.__psw+'@'+config.__host, config.__port)
return client[config.database]
def form(self):
form = {
'config' : {
'method': 'POST',
'action' : '/admin/permissions',
'class' : 'form-horizontal',
'error-class' : ''
},
'fields': [
{
'required':True,
'widget':'text',
'attributes': {
'data-hint':'Escriba el nombre del permiso',
'name': 'permission_name',
'placeholder': 'Nombre del Permiso',
'class': 'form-control floating-label',
},
'form-group-class': 'col-md-12',
},
{
'widget':'submit',
'attributes':{
'name': 'submit',
'class': 'btn btn-primary',
'value': 'Crear Permiso'
},
'form-group-class': 'col-md-6'
}
]
}
return form
def form_edit(self,id):
data = self.db.permissions.find_one({'_id':ObjectId(id)})
print str(data['name']).decode('utf-8')
form = {
'config' : {
'method': 'POST',
'action' : '/admin/permissions/edit/'+id,
'class' : 'form-horizontal',
'error-class' : ''
},
'fields': [
{
'required':True,
'widget':'text',
'attributes': {
'data-hint' :'Escriba el nombre del permiso',
'class': 'form-control floating-label',
'name': 'permission_name',
'placeholder': 'Nombre del Permiso',
'value': data['name']
},
'form-group-class': 'col-md-12'
},
{
'widget':'submit',
'attributes':{
'name': 'submit',
'class': 'btn btn-primary',
'value': 'Guardar Permiso'
},
'form-group-class': 'col-md-6'
},
{
'widget':'hidden',
'attributes': {
'name':'id',
'value': id
}
}
]
}
return form
def validation(self,data,edit=False):
validation = {'status':True, 'errors': list() }
if 'permission_name' in data:
if len(data['permission_name']) < 3:
validation['status'] = False
validation['errors'].append('El Nombre del permiso debe poseer al menos 3 caracteres')
permission = self.db.permissions.find_one({'name': data['permission_name']})
if permission != None and edit == False:
validation['status'] = False
validation['errors'].append('Este permiso ya existe')
else:
if edit == True and permission != None:
if str(data['id']) != str(permission['_id']):
validation['status'] = False
validation['errors'].append('Este permiso ya existe')
else:
validation['status'] = False
validation['errors'].append('El campo nombre del permiso es Obligatorio.')
if edit == True:
_q = self.db.permissions.find_one({'_id':ObjectId(data['id'])})
if _q == None:
validation['status'] = False
validation['errors'].append('El id de permiso a editar no existe.')
if validation['status'] == True:
if edit == False:
self.insert(data)
return 'Nuevo Permiso Creado'
else:
return self.edit(data)
else:
return validation
def insert(self,data):
_INSERT = {
'name': data['permission_name'],
}
self.db.permissions.insert(_INSERT)
def edit(self,data):
old_data = self.db.permissions.find_one({'_id':ObjectId(data['id'])})
if 'block' in old_data and old_data['block'] == True:
return {'status':False, 'errors':['Este Permiso no puede ser editado.']}
else:
new_data = {
'name' : data['permission_name'],
'_id' : ObjectId(data['id'])
}
self.db.permissions.update(old_data,new_data)
return 'Permiso '+old_data['name']+' editado correctamente.'
def delete(self,id):
data = self.db.permissions.find_one({'_id':ObjectId(id)})
if data != None:
if 'block' in data and data['block'] == True:
return False
else:
self.db.permissions.remove(data)
rols = self.db.rols.find()
for rol in rols:
tmp = rol
if data['name'] in rol['permissions']:
for x in range(0,len(rol['permissions'])):
if rol['permissions'][x] == data['name']:
rol['permissions'].pop(x)
print x
break
self.db.rols.update({'_id':rol['_id']},tmp)
return 'Eliminado '+data['name'] | mit | -2,924,168,554,575,277,600 | 23.648352 | 102 | 0.585507 | false |
analyseuc3m/ANALYSE-v1 | common/djangoapps/external_auth/tests/test_shib.py | 35 | 30342 | # -*- coding: utf-8 -*-
"""
Tests for Shibboleth Authentication
@jbau
"""
import unittest
from ddt import ddt, data
from django.conf import settings
from django.http import HttpResponseRedirect
from django.test import TestCase
from django.test.client import RequestFactory, Client as DjangoTestClient
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser, User
from importlib import import_module
from edxmako.tests import mako_middleware_process_request
from external_auth.models import ExternalAuthMap
from external_auth.views import (
shib_login, course_specific_login, course_specific_register, _flatten_to_ascii
)
from mock import patch
from urllib import urlencode
from student.views import create_account, change_enrollment
from student.models import UserProfile, CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore import ModuleStoreEnum
# Shib is supposed to provide 'REMOTE_USER', 'givenName', 'sn', 'mail', 'Shib-Identity-Provider'
# attributes via request.META. We can count on 'Shib-Identity-Provider', and 'REMOTE_USER' being present
# b/c of how mod_shib works but should test the behavior with the rest of the attributes present/missing
# For the sake of python convention we'll make all of these variable names ALL_CAPS
# These values would all returned from request.META, so they need to be str, not unicode
IDP = 'https://idp.stanford.edu/'
REMOTE_USER = '[email protected]'
MAILS = [None, '', '[email protected]'] # unicode shouldn't be in emails, would fail django's email validator
DISPLAYNAMES = [None, '', 'Jason 包']
GIVENNAMES = [None, '', 'jasön; John; bob'] # At Stanford, the givenNames can be a list delimited by ';'
SNS = [None, '', '包; smith'] # At Stanford, the sns can be a list delimited by ';'
def gen_all_identities():
"""
A generator for all combinations of test inputs.
Each generated item is a dict that represents what a shib IDP
could potentially pass to django via request.META, i.e.
setting (or not) request.META['givenName'], etc.
"""
def _build_identity_dict(mail, display_name, given_name, surname):
""" Helper function to return a dict of test identity """
meta_dict = {'Shib-Identity-Provider': IDP,
'REMOTE_USER': REMOTE_USER}
if display_name is not None:
meta_dict['displayName'] = display_name
if mail is not None:
meta_dict['mail'] = mail
if given_name is not None:
meta_dict['givenName'] = given_name
if surname is not None:
meta_dict['sn'] = surname
return meta_dict
for mail in MAILS:
for given_name in GIVENNAMES:
for surname in SNS:
for display_name in DISPLAYNAMES:
yield _build_identity_dict(mail, display_name, given_name, surname)
@ddt
@override_settings(SESSION_ENGINE='django.contrib.sessions.backends.cache')
class ShibSPTest(SharedModuleStoreTestCase):
"""
Tests for the Shibboleth SP, which communicates via request.META
(Apache environment variables set by mod_shib)
"""
request_factory = RequestFactory()
def setUp(self):
super(ShibSPTest, self).setUp()
self.test_user_id = ModuleStoreEnum.UserID.test
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_exception_shib_login(self):
"""
Tests that we get the error page when there is no REMOTE_USER
or Shib-Identity-Provider in request.META
"""
no_remote_user_request = self.request_factory.get('/shib-login')
no_remote_user_request.META.update({'Shib-Identity-Provider': IDP})
no_remote_user_request.user = AnonymousUser()
mako_middleware_process_request(no_remote_user_request)
no_remote_user_response = shib_login(no_remote_user_request)
self.assertEqual(no_remote_user_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_remote_user_response.content)
no_idp_request = self.request_factory.get('/shib-login')
no_idp_request.META.update({'REMOTE_USER': REMOTE_USER})
no_idp_response = shib_login(no_idp_request)
self.assertEqual(no_idp_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_idp_response.content)
def _assert_shib_login_is_logged(self, audit_log_call, remote_user):
"""Asserts that shibboleth login attempt is being logged"""
remote_user = _flatten_to_ascii(remote_user) # django usernames have to be ascii
method_name, args, _kwargs = audit_log_call
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'logged in via Shibboleth', args[0])
self.assertIn(remote_user, args[0])
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_shib_login(self):
"""
Tests that:
* shib credentials that match an existing ExternalAuthMap with a linked active user logs the user in
* shib credentials that match an existing ExternalAuthMap with a linked inactive user shows error page
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user without an existing ExternalAuthMap links the two and log the user in
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user that already has an ExternalAuthMap causes an error (403)
* shib credentials that do not match an existing ExternalAuthMap causes the registration form to appear
"""
user_w_map = UserFactory.create(email='[email protected]')
extauth = ExternalAuthMap(external_id='[email protected]',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=user_w_map)
user_wo_map = UserFactory.create(email='[email protected]')
user_w_map.save()
user_wo_map.save()
extauth.save()
inactive_user = UserFactory.create(email='[email protected]')
inactive_user.is_active = False
inactive_extauth = ExternalAuthMap(external_id='[email protected]',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=inactive_user)
inactive_user.save()
inactive_extauth.save()
idps = ['https://idp.stanford.edu/', 'https://someother.idp.com/']
remote_users = ['[email protected]', '[email protected]',
'testuser2@someother_idp.com', '[email protected]']
for idp in idps:
for remote_user in remote_users:
request = self.request_factory.get('/shib-login')
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
request.META.update({'Shib-Identity-Provider': idp,
'REMOTE_USER': remote_user,
'mail': remote_user})
request.user = AnonymousUser()
mako_middleware_process_request(request)
with patch('external_auth.views.AUDIT_LOG') as mock_audit_log:
response = shib_login(request)
audit_log_calls = mock_audit_log.method_calls
if idp == "https://idp.stanford.edu/" and remote_user == '[email protected]':
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, user_w_map)
self.assertEqual(response['Location'], '/dashboard')
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(remote_user, args[0])
elif idp == "https://idp.stanford.edu/" and remote_user == '[email protected]':
self.assertEqual(response.status_code, 403)
self.assertIn("Account not yet activated: please look for link in your email", response.content)
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'warning')
self.assertEquals(len(args), 1)
self.assertIn(u'is not active after external login', args[0])
# self.assertEquals(remote_user, args[1])
elif idp == "https://idp.stanford.edu/" and remote_user == '[email protected]':
self.assertIsNotNone(ExternalAuthMap.objects.get(user=user_wo_map))
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, user_wo_map)
self.assertEqual(response['Location'], '/dashboard')
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(remote_user, args[0])
elif idp == "https://someother.idp.com/" and remote_user in \
['[email protected]', '[email protected]', '[email protected]']:
self.assertEqual(response.status_code, 403)
self.assertIn("You have already created an account using an external login", response.content)
# no audit logging calls
self.assertEquals(len(audit_log_calls), 0)
else:
self.assertEqual(response.status_code, 200)
self.assertContains(response,
("Preferences for {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
# no audit logging calls
self.assertEquals(len(audit_log_calls), 0)
def _base_test_extauth_auto_activate_user_with_flag(self, log_user_string="[email protected]"):
"""
Tests that FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] means extauth automatically
linked users, activates them, and logs them in
"""
inactive_user = UserFactory.create(email='[email protected]')
inactive_user.is_active = False
inactive_user.save()
request = self.request_factory.get('/shib-login')
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
request.META.update({
'Shib-Identity-Provider': 'https://idp.stanford.edu/',
'REMOTE_USER': '[email protected]',
'mail': '[email protected]'
})
request.user = AnonymousUser()
with patch('external_auth.views.AUDIT_LOG') as mock_audit_log:
response = shib_login(request)
audit_log_calls = mock_audit_log.method_calls
# reload user from db, since the view function works via db side-effects
inactive_user = User.objects.get(id=inactive_user.id)
self.assertIsNotNone(ExternalAuthMap.objects.get(user=inactive_user))
self.assertTrue(inactive_user.is_active)
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, inactive_user)
self.assertEqual(response['Location'], '/dashboard')
# verify logging:
self.assertEquals(len(audit_log_calls), 3)
self._assert_shib_login_is_logged(audit_log_calls[0], log_user_string)
method_name, args, _kwargs = audit_log_calls[2]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(log_user_string, args[0])
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'SQUELCH_PII_IN_LOGS': False})
def test_extauth_auto_activate_user_with_flag_no_squelch(self):
"""
Wrapper to run base_test_extauth_auto_activate_user_with_flag with {'SQUELCH_PII_IN_LOGS': False}
"""
self._base_test_extauth_auto_activate_user_with_flag(log_user_string="[email protected]")
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'SQUELCH_PII_IN_LOGS': True})
def test_extauth_auto_activate_user_with_flag_squelch(self):
"""
Wrapper to run base_test_extauth_auto_activate_user_with_flag with {'SQUELCH_PII_IN_LOGS': True}
"""
self._base_test_extauth_auto_activate_user_with_flag(log_user_string="user.id: 1")
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(*gen_all_identities())
def test_registration_form(self, identity):
"""
Tests the registration form showing up with the proper parameters.
Uses django test client for its session support
"""
client = DjangoTestClient()
# identity k/v pairs will show up in request.META
response = client.get(path='/shib-login/', data={}, follow=False, **identity)
self.assertEquals(response.status_code, 200)
mail_input_HTML = '<input class="" id="email" type="email" name="email"'
if not identity.get('mail'):
self.assertContains(response, mail_input_HTML)
else:
self.assertNotContains(response, mail_input_HTML)
sn_empty = not identity.get('sn')
given_name_empty = not identity.get('givenName')
displayname_empty = not identity.get('displayName')
fullname_input_html = '<input id="name" type="text" name="name"'
if sn_empty and given_name_empty and displayname_empty:
self.assertContains(response, fullname_input_html)
else:
self.assertNotContains(response, fullname_input_html)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(*gen_all_identities())
def test_registration_form_submit(self, identity):
"""
Tests user creation after the registration form that pops is submitted. If there is no shib
ExternalAuthMap in the session, then the created user should take the username and email from the
request.
Uses django test client for its session support
"""
# First we pop the registration form
client = DjangoTestClient()
response1 = client.get(path='/shib-login/', data={}, follow=False, **identity)
# Then we have the user answer the registration form
# These are unicode because request.POST returns unicode
postvars = {'email': u'[email protected]',
'username': u'post_username', # django usernames can't be unicode
'password': u'post_pássword',
'name': u'post_náme',
'terms_of_service': u'true',
'honor_code': u'true'}
# use RequestFactory instead of TestClient here because we want access to request.user
request2 = self.request_factory.post('/create_account', data=postvars)
request2.session = client.session
request2.user = AnonymousUser()
mako_middleware_process_request(request2)
with patch('student.views.AUDIT_LOG') as mock_audit_log:
_response2 = create_account(request2)
user = request2.user
mail = identity.get('mail')
# verify logging of login happening during account creation:
audit_log_calls = mock_audit_log.method_calls
self.assertEquals(len(audit_log_calls), 3)
method_name, args, _kwargs = audit_log_calls[0]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success on new account creation', args[0])
self.assertIn(u'post_username', args[0])
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 2)
self.assertIn(u'User registered with external_auth', args[0])
self.assertEquals(u'post_username', args[1])
method_name, args, _kwargs = audit_log_calls[2]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 3)
self.assertIn(u'Updated ExternalAuthMap for ', args[0])
self.assertEquals(u'post_username', args[1])
self.assertEquals(u'[email protected]', args[2].external_id)
# check that the created user has the right email, either taken from shib or user input
if mail:
self.assertEqual(user.email, mail)
self.assertEqual(list(User.objects.filter(email=postvars['email'])), [])
self.assertIsNotNone(User.objects.get(email=mail)) # get enforces only 1 such user
else:
self.assertEqual(user.email, postvars['email'])
self.assertEqual(list(User.objects.filter(email=mail)), [])
self.assertIsNotNone(User.objects.get(email=postvars['email'])) # get enforces only 1 such user
# check that the created user profile has the right name, either taken from shib or user input
profile = UserProfile.objects.get(user=user)
sn_empty = not identity.get('sn')
given_name_empty = not identity.get('givenName')
displayname_empty = not identity.get('displayName')
if displayname_empty:
if sn_empty and given_name_empty:
self.assertEqual(profile.name, postvars['name'])
else:
self.assertEqual(profile.name, request2.session['ExternalAuthMap'].external_name)
self.assertNotIn(u';', profile.name)
else:
self.assertEqual(profile.name, request2.session['ExternalAuthMap'].external_name)
self.assertEqual(profile.name, identity.get('displayName').decode('utf-8'))
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@SharedModuleStoreTestCase.modifies_courseware
@data(None, "", "shib:https://idp.stanford.edu/")
def test_course_specific_login_and_reg(self, domain):
"""
Tests that the correct course specific login and registration urls work for shib
"""
course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course',
user_id=self.test_user_id,
)
# Test for cases where course is found
# set domains
# temporarily set the branch to draft-preferred so we can update the course
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
course.enrollment_domain = domain
self.store.update_item(course, self.test_user_id)
# setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
_reg_request = self.request_factory.get('/course_specific_register/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/course/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'MITx/999/Robot_Super_Course')
reg_response = course_specific_register(login_request, 'MITx/999/Robot_Super_Course')
if domain and "shib" in domain:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
else:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
# Now test for non-existent course
# setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
_reg_request = self.request_factory.get('/course_specific_register/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'DNE/DNE/DNE')
reg_response = course_specific_register(login_request, 'DNE/DNE/DNE')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@SharedModuleStoreTestCase.modifies_courseware
def test_enrollment_limit_by_domain(self):
"""
Tests that the enrollmentDomain setting is properly limiting enrollment to those who have
the proper external auth
"""
# create 2 course, one with limited enrollment one without
shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.test_user_id,
)
open_enroll_course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course',
enrollment_domain='',
user_id=self.test_user_id,
)
# create 3 kinds of students, external_auth matching shib_course, external_auth not matching, no external auth
shib_student = UserFactory.create()
shib_student.save()
extauth = ExternalAuthMap(external_id='[email protected]',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=shib_student)
extauth.save()
other_ext_student = UserFactory.create()
other_ext_student.username = "teststudent2"
other_ext_student.email = "[email protected]"
other_ext_student.save()
extauth = ExternalAuthMap(external_id='[email protected]',
external_email='',
external_domain='shib:https://other.edu/',
external_credentials="",
user=other_ext_student)
extauth.save()
int_student = UserFactory.create()
int_student.username = "teststudent3"
int_student.email = "[email protected]"
int_student.save()
# Tests the two case for courses, limited and not
for course in [shib_course, open_enroll_course]:
for student in [shib_student, other_ext_student, int_student]:
request = self.request_factory.post('/change_enrollment')
request.POST.update({'enrollment_action': 'enroll',
'course_id': course.id.to_deprecated_string()})
request.user = student
response = change_enrollment(request)
# If course is not limited or student has correct shib extauth then enrollment should be allowed
if course is open_enroll_course or student is shib_student:
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))
else:
self.assertEqual(response.status_code, 400)
self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@SharedModuleStoreTestCase.modifies_courseware
def test_shib_login_enrollment(self):
"""
A functionality test that a student with an existing shib login
can auto-enroll in a class with GET or POST params. Also tests the direction functionality of
the 'next' GET/POST param
"""
student = UserFactory.create()
extauth = ExternalAuthMap(external_id='[email protected]',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
internal_password="password",
user=student)
student.set_password("password")
student.save()
extauth.save()
course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.test_user_id,
)
# use django test client for sessions and url processing
# no enrollment before trying
self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))
self.client.logout()
params = [
('course_id', course.id.to_deprecated_string()),
('enrollment_action', 'enroll'),
('next', '/testredirect')
]
request_kwargs = {'path': '/shib-login/',
'data': dict(params),
'follow': False,
'REMOTE_USER': '[email protected]',
'Shib-Identity-Provider': 'https://idp.stanford.edu/'}
response = self.client.get(**request_kwargs)
# successful login is a redirect to the URL that handles auto-enrollment
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'], 'http://testserver/account/finish_auth?{}'.format(urlencode(params)))
class ShibUtilFnTest(TestCase):
"""
Tests util functions in shib module
"""
def test__flatten_to_ascii(self):
DIACRITIC = u"àèìòùÀÈÌÒÙáéíóúýÁÉÍÓÚÝâêîôûÂÊÎÔÛãñõÃÑÕäëïöüÿÄËÏÖÜŸåÅçÇ" # pylint: disable=invalid-name
STR_DIACRI = "àèìòùÀÈÌÒÙáéíóúýÁÉÍÓÚÝâêîôûÂÊÎÔÛãñõÃÑÕäëïöüÿÄËÏÖÜŸåÅçÇ" # pylint: disable=invalid-name
FLATTENED = u"aeiouAEIOUaeiouyAEIOUYaeiouAEIOUanoANOaeiouyAEIOUYaAcC" # pylint: disable=invalid-name
self.assertEqual(_flatten_to_ascii('jasön'), 'jason') # umlaut
self.assertEqual(_flatten_to_ascii('Jason包'), 'Jason') # mandarin, so it just gets dropped
self.assertEqual(_flatten_to_ascii('abc'), 'abc') # pass through
unicode_test = _flatten_to_ascii(DIACRITIC)
self.assertEqual(unicode_test, FLATTENED)
self.assertIsInstance(unicode_test, unicode)
str_test = _flatten_to_ascii(STR_DIACRI)
self.assertEqual(str_test, FLATTENED)
self.assertIsInstance(str_test, str)
| agpl-3.0 | -8,130,467,474,458,833,000 | 50.227119 | 118 | 0.608291 | false |
kanagasabapathi/python-for-android | python-modules/twisted/twisted/trial/test/test_runner.py | 49 | 29815 | # Copyright (c) 2005-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Maintainer: Jonathan Lange
# Author: Robert Collins
import StringIO, os, sys
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.trial.itrial import IReporter, ITestCase
from twisted.trial import unittest, runner, reporter, util
from twisted.python import failure, log, reflect, filepath
from twisted.python.filepath import FilePath
from twisted.scripts import trial
from twisted.plugins import twisted_trial
from twisted import plugin
from twisted.internet import defer
pyunit = __import__('unittest')
class CapturingDebugger(object):
def __init__(self):
self._calls = []
def runcall(self, *args, **kwargs):
self._calls.append('runcall')
args[0](*args[1:], **kwargs)
class CapturingReporter(object):
"""
Reporter that keeps a log of all actions performed on it.
"""
implements(IReporter)
stream = None
tbformat = None
args = None
separator = None
testsRun = None
def __init__(self, stream=None, tbformat=None, rterrors=None,
publisher=None):
"""
Create a capturing reporter.
"""
self._calls = []
self.shouldStop = False
self._stream = stream
self._tbformat = tbformat
self._rterrors = rterrors
self._publisher = publisher
def startTest(self, method):
"""
Report the beginning of a run of a single test method
@param method: an object that is adaptable to ITestMethod
"""
self._calls.append('startTest')
def stopTest(self, method):
"""
Report the status of a single test method
@param method: an object that is adaptable to ITestMethod
"""
self._calls.append('stopTest')
def cleanupErrors(self, errs):
"""called when the reactor has been left in a 'dirty' state
@param errs: a list of L{twisted.python.failure.Failure}s
"""
self._calls.append('cleanupError')
def addSuccess(self, test):
self._calls.append('addSuccess')
def done(self):
"""
Do nothing. These tests don't care about done.
"""
class TrialRunnerTestsMixin:
"""
Mixin defining tests for L{runner.TrialRunner}.
"""
def tearDown(self):
self.runner._tearDownLogFile()
def test_empty(self):
"""
Empty test method, used by the other tests.
"""
def _getObservers(self):
return log.theLogPublisher.observers
def test_addObservers(self):
"""
Any log system observers L{TrialRunner.run} adds are removed by the
time it returns.
"""
originalCount = len(self._getObservers())
self.runner.run(self.test)
newCount = len(self._getObservers())
self.assertEqual(newCount, originalCount)
def test_logFileAlwaysActive(self):
"""
Test that a new file is opened on each run.
"""
oldSetUpLogFile = self.runner._setUpLogFile
l = []
def setUpLogFile():
oldSetUpLogFile()
l.append(self.runner._logFileObserver)
self.runner._setUpLogFile = setUpLogFile
self.runner.run(self.test)
self.runner.run(self.test)
self.failUnlessEqual(len(l), 2)
self.failIf(l[0] is l[1], "Should have created a new file observer")
def test_logFileGetsClosed(self):
"""
Test that file created is closed during the run.
"""
oldSetUpLogFile = self.runner._setUpLogFile
l = []
def setUpLogFile():
oldSetUpLogFile()
l.append(self.runner._logFileObject)
self.runner._setUpLogFile = setUpLogFile
self.runner.run(self.test)
self.failUnlessEqual(len(l), 1)
self.failUnless(l[0].closed)
class TestTrialRunner(TrialRunnerTestsMixin, unittest.TestCase):
"""
Tests for L{runner.TrialRunner} with the feature to turn unclean errors
into warnings disabled.
"""
def setUp(self):
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(CapturingReporter, stream=self.stream)
self.test = TestTrialRunner('test_empty')
def test_publisher(self):
"""
The reporter constructed by L{runner.TrialRunner} is passed
L{twisted.python.log} as the value for the C{publisher} parameter.
"""
result = self.runner._makeResult()
self.assertIdentical(result._publisher, log)
class TrialRunnerWithUncleanWarningsReporter(TrialRunnerTestsMixin,
unittest.TestCase):
"""
Tests for the TrialRunner's interaction with an unclean-error suppressing
reporter.
"""
def setUp(self):
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(CapturingReporter, stream=self.stream,
uncleanWarnings=True)
self.test = TestTrialRunner('test_empty')
class DryRunMixin(object):
suppress = [util.suppress(
category=DeprecationWarning,
message="Test visitors deprecated in Twisted 8.0")]
def setUp(self):
self.log = []
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(CapturingReporter,
runner.TrialRunner.DRY_RUN,
stream=self.stream)
self.makeTestFixtures()
def makeTestFixtures(self):
"""
Set C{self.test} and C{self.suite}, where C{self.suite} is an empty
TestSuite.
"""
def test_empty(self):
"""
If there are no tests, the reporter should not receive any events to
report.
"""
result = self.runner.run(runner.TestSuite())
self.assertEqual(result._calls, [])
def test_singleCaseReporting(self):
"""
If we are running a single test, check the reporter starts, passes and
then stops the test during a dry run.
"""
result = self.runner.run(self.test)
self.assertEqual(result._calls, ['startTest', 'addSuccess', 'stopTest'])
def test_testsNotRun(self):
"""
When we are doing a dry run, the tests should not actually be run.
"""
self.runner.run(self.test)
self.assertEqual(self.log, [])
class DryRunTest(DryRunMixin, unittest.TestCase):
"""
Check that 'dry run' mode works well with Trial tests.
"""
def makeTestFixtures(self):
class MockTest(unittest.TestCase):
def test_foo(test):
self.log.append('test_foo')
self.test = MockTest('test_foo')
self.suite = runner.TestSuite()
class PyUnitDryRunTest(DryRunMixin, unittest.TestCase):
"""
Check that 'dry run' mode works well with stdlib unittest tests.
"""
def makeTestFixtures(self):
class PyunitCase(pyunit.TestCase):
def test_foo(self):
pass
self.test = PyunitCase('test_foo')
self.suite = pyunit.TestSuite()
class TestRunner(unittest.TestCase):
def setUp(self):
self.config = trial.Options()
# whitebox hack a reporter in, because plugins are CACHED and will
# only reload if the FILE gets changed.
parts = reflect.qual(CapturingReporter).split('.')
package = '.'.join(parts[:-1])
klass = parts[-1]
plugins = [twisted_trial._Reporter(
"Test Helper Reporter",
package,
description="Utility for unit testing.",
longOpt="capturing",
shortOpt=None,
klass=klass)]
# XXX There should really be a general way to hook the plugin system
# for tests.
def getPlugins(iface, *a, **kw):
self.assertEqual(iface, IReporter)
return plugins + list(self.original(iface, *a, **kw))
self.original = plugin.getPlugins
plugin.getPlugins = getPlugins
self.standardReport = ['startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest']
def tearDown(self):
plugin.getPlugins = self.original
def parseOptions(self, args):
self.config.parseOptions(args)
def getRunner(self):
r = trial._makeRunner(self.config)
r.stream = StringIO.StringIO()
# XXX The runner should always take care of cleaning this up itself.
# It's not clear why this is necessary. The runner always tears down
# its log file.
self.addCleanup(r._tearDownLogFile)
# XXX The runner should always take care of cleaning this up itself as
# well. It's necessary because TrialRunner._setUpTestdir might raise
# an exception preventing Reporter.done from being run, leaving the
# observer added by Reporter.__init__ still present in the system.
# Something better needs to happen inside
# TrialRunner._runWithoutDecoration to remove the need for this cludge.
r._log = log.LogPublisher()
return r
def test_runner_can_get_reporter(self):
self.parseOptions([])
result = self.config['reporter']
runner = self.getRunner()
self.assertEqual(result, runner._makeResult().__class__)
def test_runner_get_result(self):
self.parseOptions([])
runner = self.getRunner()
result = runner._makeResult()
self.assertEqual(result.__class__, self.config['reporter'])
def test_uncleanWarningsOffByDefault(self):
"""
By default Trial sets the 'uncleanWarnings' option on the runner to
False. This means that dirty reactor errors will be reported as
errors. See L{test_reporter.TestDirtyReactor}.
"""
self.parseOptions([])
runner = self.getRunner()
self.assertNotIsInstance(runner._makeResult(),
reporter.UncleanWarningsReporterWrapper)
def test_getsUncleanWarnings(self):
"""
Specifying '--unclean-warnings' on the trial command line will cause
reporters to be wrapped in a device which converts unclean errors to
warnings. See L{test_reporter.TestDirtyReactor} for implications.
"""
self.parseOptions(['--unclean-warnings'])
runner = self.getRunner()
self.assertIsInstance(runner._makeResult(),
reporter.UncleanWarningsReporterWrapper)
def test_runner_working_directory(self):
self.parseOptions(['--temp-directory', 'some_path'])
runner = self.getRunner()
self.assertEquals(runner.workingDirectory, 'some_path')
def test_concurrentImplicitWorkingDirectory(self):
"""
If no working directory is explicitly specified and the default
working directory is in use by another runner, L{TrialRunner.run}
selects a different default working directory to use.
"""
self.parseOptions([])
# Make sure we end up with the same working directory after this test
# as we had before it.
self.addCleanup(os.chdir, os.getcwd())
# Make a new directory and change into it. This isolates us from state
# that other tests might have dumped into this process's temp
# directory.
runDirectory = FilePath(self.mktemp())
runDirectory.makedirs()
os.chdir(runDirectory.path)
firstRunner = self.getRunner()
secondRunner = self.getRunner()
where = {}
class ConcurrentCase(unittest.TestCase):
def test_first(self):
"""
Start a second test run which will have a default working
directory which is the same as the working directory of the
test run already in progress.
"""
# Change the working directory to the value it had before this
# test suite was started.
where['concurrent'] = subsequentDirectory = os.getcwd()
os.chdir(runDirectory.path)
self.addCleanup(os.chdir, subsequentDirectory)
secondRunner.run(ConcurrentCase('test_second'))
def test_second(self):
"""
Record the working directory for later analysis.
"""
where['record'] = os.getcwd()
result = firstRunner.run(ConcurrentCase('test_first'))
bad = result.errors + result.failures
if bad:
self.fail(bad[0][1])
self.assertEqual(
where, {
'concurrent': runDirectory.child('_trial_temp').path,
'record': runDirectory.child('_trial_temp-1').path})
def test_concurrentExplicitWorkingDirectory(self):
"""
If a working directory which is already in use is explicitly specified,
L{TrialRunner.run} raises L{_WorkingDirectoryBusy}.
"""
self.parseOptions(['--temp-directory', os.path.abspath(self.mktemp())])
initialDirectory = os.getcwd()
self.addCleanup(os.chdir, initialDirectory)
firstRunner = self.getRunner()
secondRunner = self.getRunner()
class ConcurrentCase(unittest.TestCase):
def test_concurrent(self):
"""
Try to start another runner in the same working directory and
assert that it raises L{_WorkingDirectoryBusy}.
"""
self.assertRaises(
util._WorkingDirectoryBusy,
secondRunner.run, ConcurrentCase('test_failure'))
def test_failure(self):
"""
Should not be called, always fails.
"""
self.fail("test_failure should never be called.")
result = firstRunner.run(ConcurrentCase('test_concurrent'))
bad = result.errors + result.failures
if bad:
self.fail(bad[0][1])
def test_runner_normal(self):
self.parseOptions(['--temp-directory', self.mktemp(),
'--reporter', 'capturing',
'twisted.trial.test.sample'])
my_runner = self.getRunner()
loader = runner.TestLoader()
suite = loader.loadByName('twisted.trial.test.sample', True)
result = my_runner.run(suite)
self.assertEqual(self.standardReport, result._calls)
def test_runner_debug(self):
self.parseOptions(['--reporter', 'capturing',
'--debug', 'twisted.trial.test.sample'])
my_runner = self.getRunner()
debugger = CapturingDebugger()
def get_debugger():
return debugger
my_runner._getDebugger = get_debugger
loader = runner.TestLoader()
suite = loader.loadByName('twisted.trial.test.sample', True)
result = my_runner.run(suite)
self.assertEqual(self.standardReport, result._calls)
self.assertEqual(['runcall'], debugger._calls)
class RemoveSafelyTests(unittest.TestCase):
"""
Tests for L{_removeSafely}.
"""
def test_removeSafelyNoTrialMarker(self):
"""
If a path doesn't contain a node named C{"_trial_marker"}, that path is
not removed by L{runner._removeSafely} and a L{runner._NoTrialMarker}
exception is raised instead.
"""
directory = self.mktemp()
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
self.assertRaises(util._NoTrialMarker, util._removeSafely, dirPath)
def test_removeSafelyRemoveFailsMoveSucceeds(self):
"""
If an L{OSError} is raised while removing a path in
L{runner._removeSafely}, an attempt is made to move the path to a new
name.
"""
def dummyRemove():
"""
Raise an C{OSError} to emulate the branch of L{runner._removeSafely}
in which path removal fails.
"""
raise OSError()
# Patch stdout so we can check the print statements in _removeSafely
out = StringIO.StringIO()
self.patch(sys, 'stdout', out)
# Set up a trial directory with a _trial_marker
directory = self.mktemp()
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
dirPath.child('_trial_marker').touch()
# Ensure that path.remove() raises an OSError
dirPath.remove = dummyRemove
util._removeSafely(dirPath)
self.assertIn("could not remove FilePath", out.getvalue())
def test_removeSafelyRemoveFailsMoveFails(self):
"""
If an L{OSError} is raised while removing a path in
L{runner._removeSafely}, an attempt is made to move the path to a new
name. If that attempt fails, the L{OSError} is re-raised.
"""
def dummyRemove():
"""
Raise an C{OSError} to emulate the branch of L{runner._removeSafely}
in which path removal fails.
"""
raise OSError("path removal failed")
def dummyMoveTo(path):
"""
Raise an C{OSError} to emulate the branch of L{runner._removeSafely}
in which path movement fails.
"""
raise OSError("path movement failed")
# Patch stdout so we can check the print statements in _removeSafely
out = StringIO.StringIO()
self.patch(sys, 'stdout', out)
# Set up a trial directory with a _trial_marker
directory = self.mktemp()
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
dirPath.child('_trial_marker').touch()
# Ensure that path.remove() and path.moveTo() both raise OSErrors
dirPath.remove = dummyRemove
dirPath.moveTo = dummyMoveTo
error = self.assertRaises(OSError, util._removeSafely, dirPath)
self.assertEquals(str(error), "path movement failed")
self.assertIn("could not remove FilePath", out.getvalue())
class TestTrialSuite(unittest.TestCase):
def test_imports(self):
# FIXME, HTF do you test the reactor can be cleaned up ?!!!
from twisted.trial.runner import TrialSuite
class TestUntilFailure(unittest.TestCase):
class FailAfter(unittest.TestCase):
"""
A test case that fails when run 3 times in a row.
"""
count = []
def test_foo(self):
self.count.append(None)
if len(self.count) == 3:
self.fail('Count reached 3')
def setUp(self):
TestUntilFailure.FailAfter.count = []
self.test = TestUntilFailure.FailAfter('test_foo')
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(reporter.Reporter, stream=self.stream)
def test_runUntilFailure(self):
"""
Test that the runUntilFailure method of the runner actually fail after
a few runs.
"""
result = self.runner.runUntilFailure(self.test)
self.failUnlessEqual(result.testsRun, 1)
self.failIf(result.wasSuccessful())
self.assertEquals(self._getFailures(result), 1)
def _getFailures(self, result):
"""
Get the number of failures that were reported to a result.
"""
return len(result.failures)
def test_runUntilFailureDecorate(self):
"""
C{runUntilFailure} doesn't decorate the tests uselessly: it does it one
time when run starts, but not at each turn.
"""
decorated = []
def decorate(test, interface):
decorated.append((test, interface))
return test
self.patch(unittest, "decorate", decorate)
result = self.runner.runUntilFailure(self.test)
self.failUnlessEqual(result.testsRun, 1)
self.assertEquals(len(decorated), 1)
self.assertEquals(decorated, [(self.test, ITestCase)])
def test_runUntilFailureForceGCDecorate(self):
"""
C{runUntilFailure} applies the force-gc decoration after the standard
L{ITestCase} decoration, but only one time.
"""
decorated = []
def decorate(test, interface):
decorated.append((test, interface))
return test
self.patch(unittest, "decorate", decorate)
self.runner._forceGarbageCollection = True
result = self.runner.runUntilFailure(self.test)
self.failUnlessEqual(result.testsRun, 1)
self.assertEquals(len(decorated), 2)
self.assertEquals(decorated,
[(self.test, ITestCase),
(self.test, unittest._ForceGarbageCollectionDecorator)])
class UncleanUntilFailureTests(TestUntilFailure):
"""
Test that the run-until-failure feature works correctly with the unclean
error suppressor.
"""
def setUp(self):
TestUntilFailure.setUp(self)
self.runner = runner.TrialRunner(reporter.Reporter, stream=self.stream,
uncleanWarnings=True)
def _getFailures(self, result):
"""
Get the number of failures that were reported to a result that
is wrapped in an UncleanFailureWrapper.
"""
return len(result._originalReporter.failures)
class BreakingSuite(runner.TestSuite):
"""
A L{TestSuite} that logs an error when it is run.
"""
def run(self, result):
try:
raise RuntimeError("error that occurs outside of a test")
except RuntimeError:
log.err(failure.Failure())
class TestLoggedErrors(unittest.TestCase):
"""
It is possible for an error generated by a test to be logged I{outside} of
any test. The log observers constructed by L{TestCase} won't catch these
errors. Here we try to generate such errors and ensure they are reported to
a L{TestResult} object.
"""
def tearDown(self):
self.flushLoggedErrors(RuntimeError)
def test_construct(self):
"""
Check that we can construct a L{runner.LoggedSuite} and that it
starts empty.
"""
suite = runner.LoggedSuite()
self.assertEqual(suite.countTestCases(), 0)
def test_capturesError(self):
"""
Chek that a L{LoggedSuite} reports any logged errors to its result.
"""
result = reporter.TestResult()
suite = runner.LoggedSuite([BreakingSuite()])
suite.run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.errors[0][0].id(), runner.NOT_IN_TEST)
self.failUnless(result.errors[0][1].check(RuntimeError))
class TestTestHolder(unittest.TestCase):
def setUp(self):
self.description = "description"
self.holder = runner.TestHolder(self.description)
def test_holder(self):
"""
Check that L{runner.TestHolder} takes a description as a parameter
and that this description is returned by the C{id} and
C{shortDescription} methods.
"""
self.assertEqual(self.holder.id(), self.description)
self.assertEqual(self.holder.shortDescription(), self.description)
def test_holderImplementsITestCase(self):
"""
L{runner.TestHolder} implements L{ITestCase}.
"""
self.assertIdentical(self.holder, ITestCase(self.holder))
self.assertTrue(
verifyObject(ITestCase, self.holder),
"%r claims to provide %r but does not do so correctly."
% (self.holder, ITestCase))
def test_runsWithStandardResult(self):
"""
A L{runner.TestHolder} can run against the standard Python
C{TestResult}.
"""
result = pyunit.TestResult()
self.holder.run(result)
self.assertTrue(result.wasSuccessful())
self.assertEquals(1, result.testsRun)
class TestErrorHolder(TestTestHolder):
"""
Test L{runner.ErrorHolder} shares behaviour with L{runner.TestHolder}.
"""
def setUp(self):
self.description = "description"
# make a real Failure so we can construct ErrorHolder()
try:
1/0
except ZeroDivisionError:
error = failure.Failure()
self.holder = runner.ErrorHolder(self.description, error)
def test_runsWithStandardResult(self):
"""
A L{runner.ErrorHolder} can run against the standard Python
C{TestResult}.
"""
result = pyunit.TestResult()
self.holder.run(result)
self.assertFalse(result.wasSuccessful())
self.assertEquals(1, result.testsRun)
class TestMalformedMethod(unittest.TestCase):
"""
Test that trial manages when test methods don't have correct signatures.
"""
class ContainMalformed(unittest.TestCase):
"""
This TestCase holds malformed test methods that trial should handle.
"""
def test_foo(self, blah):
pass
def test_bar():
pass
test_spam = defer.deferredGenerator(test_bar)
def _test(self, method):
"""
Wrapper for one of the test method of L{ContainMalformed}.
"""
stream = StringIO.StringIO()
trialRunner = runner.TrialRunner(reporter.Reporter, stream=stream)
test = TestMalformedMethod.ContainMalformed(method)
result = trialRunner.run(test)
self.failUnlessEqual(result.testsRun, 1)
self.failIf(result.wasSuccessful())
self.failUnlessEqual(len(result.errors), 1)
def test_extraArg(self):
"""
Test when the method has extra (useless) arguments.
"""
self._test('test_foo')
def test_noArg(self):
"""
Test when the method doesn't have even self as argument.
"""
self._test('test_bar')
def test_decorated(self):
"""
Test a decorated method also fails.
"""
self._test('test_spam')
class DestructiveTestSuiteTestCase(unittest.TestCase):
"""
Test for L{runner.DestructiveTestSuite}.
"""
def test_basic(self):
"""
Thes destructive test suite should run the tests normally.
"""
called = []
class MockTest(unittest.TestCase):
def test_foo(test):
called.append(True)
test = MockTest('test_foo')
result = reporter.TestResult()
suite = runner.DestructiveTestSuite([test])
self.assertEquals(called, [])
suite.run(result)
self.assertEquals(called, [True])
self.assertEquals(suite.countTestCases(), 0)
def test_shouldStop(self):
"""
Test the C{shouldStop} management: raising a C{KeyboardInterrupt} must
interrupt the suite.
"""
called = []
class MockTest(unittest.TestCase):
def test_foo1(test):
called.append(1)
def test_foo2(test):
raise KeyboardInterrupt()
def test_foo3(test):
called.append(2)
result = reporter.TestResult()
loader = runner.TestLoader()
loader.suiteFactory = runner.DestructiveTestSuite
suite = loader.loadClass(MockTest)
self.assertEquals(called, [])
suite.run(result)
self.assertEquals(called, [1])
# The last test shouldn't have been run
self.assertEquals(suite.countTestCases(), 1)
def test_cleanup(self):
"""
Checks that the test suite cleanups its tests during the run, so that
it ends empty.
"""
class MockTest(unittest.TestCase):
def test_foo(test):
pass
test = MockTest('test_foo')
result = reporter.TestResult()
suite = runner.DestructiveTestSuite([test])
self.assertEquals(suite.countTestCases(), 1)
suite.run(result)
self.assertEquals(suite.countTestCases(), 0)
class TestRunnerDeprecation(unittest.TestCase):
class FakeReporter(reporter.Reporter):
"""
Fake reporter that does *not* implement done() but *does* implement
printErrors, separator, printSummary, stream, write and writeln
without deprecations.
"""
done = None
separator = None
stream = None
def printErrors(self, *args):
pass
def printSummary(self, *args):
pass
def write(self, *args):
pass
def writeln(self, *args):
pass
def test_reporterDeprecations(self):
"""
The runner emits a warning if it is using a result that doesn't
implement 'done'.
"""
trialRunner = runner.TrialRunner(None)
result = self.FakeReporter()
trialRunner._makeResult = lambda: result
def f():
# We have to use a pyunit test, otherwise we'll get deprecation
# warnings about using iterate() in a test.
trialRunner.run(pyunit.TestCase('id'))
self.assertWarns(
DeprecationWarning,
"%s should implement done() but doesn't. Falling back to "
"printErrors() and friends." % reflect.qual(result.__class__),
__file__, f)
| apache-2.0 | 5,046,429,572,721,015,000 | 30.550265 | 80 | 0.603622 | false |
Kobzol/debug-visualizer | debugger/mi/mi_debugger.py | 1 | 6700 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Jakub Beranek
#
# This file is part of Devi.
#
# Devi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Devi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Devi. If not, see <http://www.gnu.org/licenses/>.
#
import os
import threading
import debugger.util as util
from debugger.debugger_api import StartupInfo
from debugger.enums import ProcessState, DebuggerState
from debugger.mi.breakpoint_manager import BreakpointManager
from debugger.mi.communicator import Communicator
from debugger.mi.file_manager import FileManager
from debugger.mi.heap_manager import HeapManager
from debugger.mi.io_manager import IOManager
from debugger.mi.thread_manager import ThreadManager
from debugger.mi.variable_manager import VariableManager
from debugger import debugger_api
shlib_path = util.get_root_path("build/debugger/liballochook.so")
if not os.path.isfile(shlib_path):
raise BaseException(
"liballochook.so is missing in {}. Please run install.sh."
"".format(os.path.dirname(shlib_path))
)
class MiDebugger(debugger_api.Debugger):
def __init__(self):
super(MiDebugger, self).__init__()
self.communicator = Communicator()
self.communicator.on_process_change.subscribe(
self._handle_process_state)
self.io_manager = IOManager()
self.breakpoint_manager = BreakpointManager(self)
self.file_manager = FileManager(self)
self.thread_manager = ThreadManager(self)
self.variable_manager = VariableManager(self)
self.heap_manager = HeapManager(self)
self.exit_lock = threading.RLock()
self.binary_path = None
def _handle_process_state(self, output):
"""
@type output: mi.communicator.StateOutput
"""
util.Logger.debug("Process state changed: {0}".format(output.state))
self.process_state = output.state
if output.state == ProcessState.Exited:
self._cleanup_program()
self._on_program_ended(output.exit_code)
elif output.state == ProcessState.Stopped:
self.on_process_state_changed.notify(
output.state,
debugger_api.ProcessStoppedEventData(output.reason)
)
else:
self.on_process_state_changed.notify(output.state, None)
def require_state(self, required_state):
if not self.get_state().is_set(required_state):
raise util.BadStateError(required_state, self.state)
def get_state(self):
return self.state
def get_process_state(self):
return self.process_state
def load_binary(self, binary_path):
binary_path = os.path.abspath(binary_path)
self.communicator.start_gdb()
result = self.communicator.send(
"-file-exec-and-symbols {0}".format(binary_path))
util.Logger.debug("Loading program binary {0} succeeded: {1}".format(
binary_path, result.is_success()))
if result.is_success():
self.state.set(DebuggerState.BinaryLoaded)
self.communicator.send("-gdb-set mi-async on")
self.binary_path = binary_path
return True
else:
return False
def launch(self, startup_info=None):
"""
@type startup_info: StartupInfo | None
@rtype: bool
"""
if startup_info is None:
startup_info = StartupInfo()
if startup_info.working_directory == "":
startup_info.working_directory = os.path.dirname(self.binary_path)
self.require_state(DebuggerState.BinaryLoaded)
stdin, stdout, stderr = self.io_manager.handle_io()
alloc_file = self.heap_manager.watch()
startup_info.env_vars.append(("DEVI_ALLOC_FILE_PATH", alloc_file))
startup_info.env_vars.append(("LD_PRELOAD", shlib_path))
for env_var in startup_info.env_vars:
self.communicator.send("set environment {}={}".format(
env_var[0], env_var[1]
))
self.communicator.send("cd {}".format(startup_info.working_directory))
self.on_process_state_changed.notify(ProcessState.Launching, None)
result = self.communicator.send("run 1>{0} 2>{1} <{2} {3}".format(
stdout,
stderr,
stdin,
startup_info.cmd_arguments
))
util.Logger.debug("Launching program: {0}".format(result))
if result:
self.state.set(DebuggerState.Running)
return result.is_success()
def exec_continue(self):
self.require_state(DebuggerState.Running)
self.communicator.send("-exec-continue")
def exec_pause(self):
self.require_state(DebuggerState.Running)
self.communicator.pause_program()
self.communicator.send("interrupt")
def exec_step_over(self):
self.require_state(DebuggerState.Running)
self.communicator.send("-exec-next")
def exec_step_in(self):
self.require_state(DebuggerState.Running)
self.communicator.send("-exec-step")
def exec_step_out(self):
self.require_state(DebuggerState.Running)
self.communicator.send("-exec-finish")
def quit_program(self, return_code=1):
if not self.state.is_set(DebuggerState.Running):
return
self.communicator.quit_program()
self._cleanup_program()
self._on_program_ended(return_code)
def terminate(self):
self.quit_program()
self.communicator.kill()
def _cleanup_program(self):
self.exit_lock.acquire()
try:
if not self.state.is_set(DebuggerState.Running):
return
util.Logger.debug("Cleaning debugged process")
self.state.unset(DebuggerState.Running)
self.io_manager.stop_io()
self.heap_manager.stop()
finally:
self.exit_lock.release()
def _on_program_ended(self, return_code):
self.process_state = ProcessState.Exited
self.on_process_state_changed.notify(
ProcessState.Exited,
debugger_api.ProcessExitedEventData(return_code))
| gpl-3.0 | 1,696,932,445,310,678,000 | 31.682927 | 78 | 0.641791 | false |
alexmogavero/home-assistant | homeassistant/components/media_player/nadtcp.py | 4 | 5642 | """
Support for NAD digital amplifiers which can be remote controlled via tcp/ip.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.nadtcp/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_MUTE, SUPPORT_TURN_ON, SUPPORT_TURN_OFF,
SUPPORT_VOLUME_STEP, SUPPORT_SELECT_SOURCE, MediaPlayerDevice,
PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_NAME, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['nad_receiver==0.0.6']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'NAD amplifier'
DEFAULT_MIN_VOLUME = -60
DEFAULT_MAX_VOLUME = -10
DEFAULT_VOLUME_STEP = 4
SUPPORT_NAD = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_TURN_ON | \
SUPPORT_TURN_OFF | SUPPORT_VOLUME_STEP | SUPPORT_SELECT_SOURCE
CONF_MIN_VOLUME = 'min_volume'
CONF_MAX_VOLUME = 'max_volume'
CONF_VOLUME_STEP = 'volume_step'
CONF_HOST = 'host'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MIN_VOLUME, default=DEFAULT_MIN_VOLUME): int,
vol.Optional(CONF_MAX_VOLUME, default=DEFAULT_MAX_VOLUME): int,
vol.Optional(CONF_VOLUME_STEP, default=DEFAULT_VOLUME_STEP): int,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the NAD platform."""
from nad_receiver import NADReceiverTCP
add_devices([NADtcp(
NADReceiverTCP(config.get(CONF_HOST)),
config.get(CONF_NAME),
config.get(CONF_MIN_VOLUME),
config.get(CONF_MAX_VOLUME),
config.get(CONF_VOLUME_STEP),
)])
class NADtcp(MediaPlayerDevice):
"""Representation of a NAD Digital amplifier."""
def __init__(self, nad_device, name, min_volume, max_volume, volume_step):
"""Initialize the amplifier."""
self._name = name
self.nad_device = nad_device
self._min_vol = (min_volume + 90) * 2 # from dB to nad vol (0-200)
self._max_vol = (max_volume + 90) * 2 # from dB to nad vol (0-200)
self._volume_step = volume_step
self._state = None
self._mute = None
self._nad_volume = None
self._volume = None
self._source = None
self._source_list = self.nad_device.available_sources()
self.update()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Get the latest details from the device."""
try:
nad_status = self.nad_device.status()
except OSError:
return
if nad_status is None:
return
# Update on/off state
if nad_status['power']:
self._state = STATE_ON
else:
self._state = STATE_OFF
# Update current volume
self._volume = self.nad_vol_to_internal_vol(nad_status['volume'])
self._nad_volume = nad_status['volume']
# Update muted state
self._mute = nad_status['muted']
# Update current source
self._source = nad_status['source']
def nad_vol_to_internal_vol(self, nad_volume):
"""Convert nad volume range (0-200) to internal volume range.
Takes into account configured min and max volume.
"""
if nad_volume < self._min_vol:
volume_internal = 0.0
if nad_volume > self._max_vol:
volume_internal = 1.0
else:
volume_internal = (nad_volume - self._min_vol) / \
(self._max_vol - self._min_vol)
return volume_internal
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_NAD
def turn_off(self):
"""Turn the media player off."""
self.nad_device.power_off()
def turn_on(self):
"""Turn the media player on."""
self.nad_device.power_on()
def volume_up(self):
"""Step volume up in the configured increments."""
self.nad_device.set_volume(self._nad_volume + 2 * self._volume_step)
def volume_down(self):
"""Step volume down in the configured increments."""
self.nad_device.set_volume(self._nad_volume - 2 * self._volume_step)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
nad_volume_to_set = \
int(round(volume * (self._max_vol - self._min_vol) +
self._min_vol))
self.nad_device.set_volume(nad_volume_to_set)
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
if mute:
self.nad_device.mute()
else:
self.nad_device.unmute()
def select_source(self, source):
"""Select input source."""
self.nad_device.select_source(source)
@property
def source(self):
"""Name of the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self.nad_device.available_sources()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._mute
| apache-2.0 | -601,563,004,172,533,100 | 30.171271 | 78 | 0.61184 | false |
Vrekrer/magdynlab | utils/VNA_FMR_A.py | 1 | 7347 | # coding=utf-8
# Author: Diego Gonzalez Chavez
# email : [email protected] / [email protected]
#
# magdynlab
# Rutinas de analisis para los datos medidos con:
# experiments/VNA_FMR_1P
# experiments/VNA_FMR_2P
#
# TODO:
# Make documentation
import numpy
import matplotlib
import matplotlib.pyplot as plt
class _FMR_P(object):
'''
Broadband FMR measurement common functions
'''
def getHi(self, h):
return numpy.argmin(numpy.abs(self.h - h))
def getFi(self, f):
return numpy.argmin(numpy.abs(self.f - f))
def getOutH(self, h):
hi = numpy.argmin(numpy.abs(self.h - h))
return self.ColorMapData[hi]
def getOutF(self, f):
fi = numpy.argmin(numpy.abs(self.f - f))
return self.ColorMapData[:,fi]
def Calc(self, Sfunct, *args, **kargs):
Sfunct(self, *args, **kargs)
def plot_ColorMap(self, fig='Auto'):
if fig == 'Auto':
fig = self.file.split('/')[-1] + '_CM'
data = self.ColorMapData
if self.sweepDir == '-1':
data = data[::-1]
extent = self.extent
if self.ColorMapData.dtype != 'complex':
fig = plt.figure(fig, (4,4))
fig.clear()
plt.xlim(*extent[[0,1]])
plt.ylim(*extent[[2,3]])
plt.xlabel('Field (Oe)')
plt.ylabel('Freq (GHz)')
plt.imshow(data.T,
aspect = 'auto',
origin = 'lower',
extent = extent)
fig.tight_layout()
fig.canvas.draw()
else:
fig = plt.figure(fig, (7,4))
fig.clear()
for pl in [121, 122]:
plt.subplot(pl)
plt.xlim(*extent[[0,1]])
plt.ylim(*extent[[2,3]])
plt.xlabel('Field (Oe)')
plt.ylabel('Freq (GHz)')
ax = fig.axes[0]
ax.imshow(data.real.T,
aspect = 'auto',
origin = 'lower',
extent = extent)
ax = fig.axes[1]
ax.imshow(data.imag.T,
aspect = 'auto',
origin = 'lower',
extent = extent)
fig.tight_layout()
fig.canvas.draw()
def plotH(self, h, fig='Auto'):
if fig == 'Auto':
fig = self.file.split('/')[-1] + '_H'
data = self.getOutH(h)
fig = plt.figure(fig, (4, 3))
plt.plot(self.f/1E9, data.real, '-')
plt.grid(True)
plt.xlabel('Freq (GHz)')
if data.dtype == 'complex':
plt.plot(self.f/1E9, data.imag, '--')
fig.tight_layout()
def plotF(self, f, fig='Auto'):
if fig == 'Auto':
fig = self.file.split('/')[-1] + '_F'
data = self.getOutF(f)
fig = plt.figure(fig, (4, 3))
plt.plot(self.h, data.real, '-')
plt.grid(True)
plt.xlabel('Field (Oe)')
if data.dtype == 'complex':
plt.plot(self.h, data.imag, '--')
fig.tight_layout()
def To_PowSpectra(self, file_name, info=''):
numpy.savez_compressed(file_name + '.PowerSpectra',
info=info,
outArray=self.ColorMapData.real,
h=self.h, f=self.f)
@property
def extent(self):
return numpy.array([self.h.min(), self.h.max(),
self.f.min()/1E9, self.f.max()/1E9])
class FMR_1P(_FMR_P):
'''
Broadband FMR measurement 1 port
'''
def __init__(self, file_name):
npz_file = numpy.load(file_name+'.VNA_1P_Raw.npz')
self.Info = str(npz_file['Info'])
self.DateTime = str(npz_file['DateTime'])
self.f = npz_file['f']
self.h = npz_file['h']
self.S11 = npz_file['S11']
self.S11_Ref = npz_file['S11_Ref']
self.file = file_name
self.sweepDir = '+1'
if self.h[0]>self.h[-1]:
self.sweepDir = '-1'
#Pabs = 1 - |S11|²
#Pref = 1 -|S11_ref|²
#CM = Pabs - Pref = |S11_ref|² - |S11|²
self.ColorMapData = numpy.abs(self.S11_Ref)**2 - numpy.abs(self.S11)**2
class FMR_2P(_FMR_P):
'''
Broadband FMR measurement 2 ports
'''
def __init__(self, file_name):
npz_file = numpy.load(file_name+'.VNA_2P_Raw.npz')
self.Info = str(npz_file['Info'])
self.DateTime = str(npz_file['DateTime'])
self.f = npz_file['f']
self.h = npz_file['h']
self.S11 = npz_file['S11']
self.S21 = npz_file['S21']
self.S22 = npz_file['S22']
self.S12 = npz_file['S12']
self.S11_Ref = npz_file['S11_Ref']
self.S21_Ref = npz_file['S21_Ref']
self.S22_Ref = npz_file['S22_Ref']
self.S12_Ref = npz_file['S12_Ref']
self.file = file_name
self.sweepDir = '+1'
if self.h[0]>self.h[-1]:
self.sweepDir = '-1'
#Pabs = 1 - |S11|² - |S21|²
#Pref = 1 -|S11_ref|² - |S21_ref|²
#CM = Pabs - Pref = |S11_ref|² + |S21_ref|² - |S11|² - |S21|²
self.ColorMapData = + numpy.abs(self.S11_Ref)**2 \
+ numpy.abs(self.S21_Ref)**2 \
- numpy.abs(self.S11)**2 \
- numpy.abs(self.S21)**2
class FMR_dP_dH():
'''
FMR differential measurement
'''
def __init__(self, file_name):
self.file = file_name
x, y = numpy.loadtxt(file_name+'.dPxH', unpack=True)
self.h = x
self.dp_dh = y*1000
with open(file_name+'.dPxH', 'r') as text_file:
lines = text_file.readlines()
for line in lines:
if 'Frequency' in line:
self.f = float(line.split(':')[-1].split('G')[0])*1E9
elif 'Osc Field' in line:
self.h_ac = float(line.split(':')[-1].split('O')[0])
break
def plot(self, fig='Auto', stl='.-'):
if fig == 'Auto':
fig = self.file.split('/')[-1]
fig = plt.figure(fig, (4, 3))
plt.plot(self.h, self.dp_dh, stl, label='%0.3f GHz'%(self.f/1E9))
plt.legend()
plt.xlabel('Field (Oe)')
plt.ylabel('dp/dh (a.u.)')
plt.grid(True)
fig.tight_layout()
def fit(self):
pass
_dp_dh_script = '''
def dp_dh_fitfunct(h, %(prefix)sK1, %(prefix)sK2, %(prefix)sH_FMR, %(prefix)sDH):
#Fitting function for FMR
#Ref: Thesis (page 51) Georg Woltersdorf 2004 SIMON FRASER UNIVERSITY
#"SPIN-PUMPING AND TWO-MAGNON SCATTERING IN MAGNETIC MULTILAYERS"
K1 = %(prefix)sK1
K2 = %(prefix)sK2
H_FMR = %(prefix)sH_FMR
DH = %(prefix)sDH
dh = h - H_FMR
denom = (DH**2 + dh**2)**2
return (-K1*2*dh*DH - K2*(DH**2-dh**2))/denom
'''
def dp_dh_model(prefix=''):
import lmfit
expr = 'dp_dh_fitfunct(x, %(prefix)sK1, %(prefix)sK2, %(prefix)sH_FMR, %(prefix)sDH)' % {'prefix':prefix}
script = _dp_dh_script % {'prefix':prefix}
return lmfit.models.ExpressionModel(expr,
independent_vars=['x'],
init_script=script,
nan_policy='omit')
| mit | 1,611,695,563,202,316,500 | 30.080508 | 109 | 0.485617 | false |
nre/Doxhooks | setup.py | 1 | 1498 | #!/usr/bin/env python3
from setuptools import setup
from doxhooks import __version__
with open("README.rst") as readme:
lines = list(readme)
for line_no, line in enumerate(lines):
if line.startswith("Doxhooks helps you"):
long_description = "".join(lines[line_no:])
break
else:
raise RuntimeError("Cannot find long description in README.")
setup(
name="Doxhooks",
version=__version__,
description=(
"Abstract away the content and maintenance of files in your project."
),
long_description=long_description,
license="MIT",
platforms=["any"],
url="https://github.com/nre/doxhooks",
author="Nick Evans",
author_email="[email protected]",
keywords=(
"abstract build code document file hook "
"preprocessor project resource source text"
),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Pre-processors",
],
packages=["doxhooks"],
zip_safe=True,
)
| mit | 1,392,875,262,442,896,000 | 25.75 | 77 | 0.616155 | false |
npiganeau/odoo | addons/account/account_invoice.py | 12 | 78556 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import itertools
from lxml import etree
from openerp import models, fields, api, _
from openerp.exceptions import except_orm, Warning, RedirectWarning
import openerp.addons.decimal_precision as dp
# mapping invoice type to journal type
TYPE2JOURNAL = {
'out_invoice': 'sale',
'in_invoice': 'purchase',
'out_refund': 'sale_refund',
'in_refund': 'purchase_refund',
}
# mapping invoice type to refund type
TYPE2REFUND = {
'out_invoice': 'out_refund', # Customer Invoice
'in_invoice': 'in_refund', # Supplier Invoice
'out_refund': 'out_invoice', # Customer Refund
'in_refund': 'in_invoice', # Supplier Refund
}
MAGIC_COLUMNS = ('id', 'create_uid', 'create_date', 'write_uid', 'write_date')
class account_invoice(models.Model):
_name = "account.invoice"
_inherit = ['mail.thread']
_description = "Invoice"
_order = "number desc, id desc"
_track = {
'type': {
},
'state': {
'account.mt_invoice_paid': lambda self, cr, uid, obj, ctx=None: obj.state == 'paid' and obj.type in ('out_invoice', 'out_refund'),
'account.mt_invoice_validated': lambda self, cr, uid, obj, ctx=None: obj.state == 'open' and obj.type in ('out_invoice', 'out_refund'),
},
}
@api.one
@api.depends('invoice_line.price_subtotal', 'tax_line.amount')
def _compute_amount(self):
self.amount_untaxed = sum(line.price_subtotal for line in self.invoice_line)
self.amount_tax = sum(line.amount for line in self.tax_line)
self.amount_total = self.amount_untaxed + self.amount_tax
@api.model
def _default_journal(self):
inv_type = self._context.get('type', 'out_invoice')
inv_types = inv_type if isinstance(inv_type, list) else [inv_type]
company_id = self._context.get('company_id', self.env.user.company_id.id)
domain = [
('type', 'in', filter(None, map(TYPE2JOURNAL.get, inv_types))),
('company_id', '=', company_id),
]
return self.env['account.journal'].search(domain, limit=1)
@api.model
def _default_currency(self):
journal = self._default_journal()
return journal.currency or journal.company_id.currency_id
@api.model
@api.returns('account.analytic.journal')
def _get_journal_analytic(self, inv_type):
""" Return the analytic journal corresponding to the given invoice type. """
journal_type = TYPE2JOURNAL.get(inv_type, 'sale')
journal = self.env['account.analytic.journal'].search([('type', '=', journal_type)], limit=1)
if not journal:
raise except_orm(_('No Analytic Journal!'),
_("You must define an analytic journal of type '%s'!") % (journal_type,))
return journal
@api.one
@api.depends('account_id', 'move_id.line_id.account_id', 'move_id.line_id.reconcile_id')
def _compute_reconciled(self):
self.reconciled = self.test_paid()
if not self.reconciled and self.state == 'paid':
self.signal_workflow('open_test')
@api.model
def _get_reference_type(self):
return [('none', _('Free Reference'))]
@api.one
@api.depends(
'state', 'currency_id', 'invoice_line.price_subtotal',
'move_id.line_id.account_id.type',
'move_id.line_id.amount_residual',
'move_id.line_id.amount_residual_currency',
'move_id.line_id.currency_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids.invoice.type',
)
def _compute_residual(self):
nb_inv_in_partial_rec = max_invoice_id = 0
self.residual = 0.0
for line in self.move_id.line_id:
if line.account_id.type in ('receivable', 'payable'):
if line.currency_id == self.currency_id:
self.residual += line.amount_residual_currency
else:
# ahem, shouldn't we use line.currency_id here?
from_currency = line.company_id.currency_id.with_context(date=line.date)
self.residual += from_currency.compute(line.amount_residual, self.currency_id)
# we check if the invoice is partially reconciled and if there
# are other invoices involved in this partial reconciliation
for pline in line.reconcile_partial_id.line_partial_ids:
if pline.invoice and self.type == pline.invoice.type:
nb_inv_in_partial_rec += 1
# store the max invoice id as for this invoice we will
# make a balance instead of a simple division
max_invoice_id = max(max_invoice_id, pline.invoice.id)
if nb_inv_in_partial_rec:
# if there are several invoices in a partial reconciliation, we
# split the residual by the number of invoices to have a sum of
# residual amounts that matches the partner balance
new_value = self.currency_id.round(self.residual / nb_inv_in_partial_rec)
if self.id == max_invoice_id:
# if it's the last the invoice of the bunch of invoices
# partially reconciled together, we make a balance to avoid
# rounding errors
self.residual = self.residual - ((nb_inv_in_partial_rec - 1) * new_value)
else:
self.residual = new_value
# prevent the residual amount on the invoice to be less than 0
self.residual = max(self.residual, 0.0)
@api.one
@api.depends(
'move_id.line_id.account_id',
'move_id.line_id.reconcile_id.line_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids',
)
def _compute_move_lines(self):
# Give Journal Items related to the payment reconciled to this invoice.
# Return partial and total payments related to the selected invoice.
self.move_lines = self.env['account.move.line']
if not self.move_id:
return
data_lines = self.move_id.line_id.filtered(lambda l: l.account_id == self.account_id)
partial_lines = self.env['account.move.line']
for data_line in data_lines:
if data_line.reconcile_id:
lines = data_line.reconcile_id.line_id
elif data_line.reconcile_partial_id:
lines = data_line.reconcile_partial_id.line_partial_ids
else:
lines = self.env['account_move_line']
partial_lines += data_line
self.move_lines = lines - partial_lines
@api.one
@api.depends(
'move_id.line_id.reconcile_id.line_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids',
)
def _compute_payments(self):
partial_lines = lines = self.env['account.move.line']
for line in self.move_id.line_id:
if line.reconcile_id:
lines |= line.reconcile_id.line_id
elif line.reconcile_partial_id:
lines |= line.reconcile_partial_id.line_partial_ids
partial_lines += line
self.payment_ids = (lines - partial_lines).sorted()
name = fields.Char(string='Reference/Description', index=True,
readonly=True, states={'draft': [('readonly', False)]})
origin = fields.Char(string='Source Document',
help="Reference of the document that produced this invoice.",
readonly=True, states={'draft': [('readonly', False)]})
supplier_invoice_number = fields.Char(string='Supplier Invoice Number',
help="The reference of this invoice as provided by the supplier.",
readonly=True, states={'draft': [('readonly', False)]})
type = fields.Selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
], string='Type', readonly=True, index=True, change_default=True,
default=lambda self: self._context.get('type', 'out_invoice'),
track_visibility='always')
number = fields.Char(related='move_id.name', store=True, readonly=True, copy=False)
internal_number = fields.Char(string='Invoice Number', readonly=True,
default=False, copy=False,
help="Unique number of the invoice, computed automatically when the invoice is created.")
reference = fields.Char(string='Invoice Reference',
help="The partner reference of this invoice.")
reference_type = fields.Selection('_get_reference_type', string='Payment Reference',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default='none')
comment = fields.Text('Additional Information')
state = fields.Selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Paid'),
('cancel','Cancelled'),
], string='Status', index=True, readonly=True, default='draft',
track_visibility='onchange', copy=False,
help=" * The 'Draft' status is used when a user is encoding a new and unconfirmed Invoice.\n"
" * The 'Pro-forma' when invoice is in Pro-forma status,invoice does not have an invoice number.\n"
" * The 'Open' status is used when user create invoice,a invoice number is generated.Its in open status till user does not pay invoice.\n"
" * The 'Paid' status is set automatically when the invoice is paid. Its related journal entries may or may not be reconciled.\n"
" * The 'Cancelled' status is used when user cancel invoice.")
sent = fields.Boolean(readonly=True, default=False, copy=False,
help="It indicates that the invoice has been sent.")
date_invoice = fields.Date(string='Invoice Date',
readonly=True, states={'draft': [('readonly', False)]}, index=True,
help="Keep empty to use the current date", copy=False)
date_due = fields.Date(string='Due Date',
readonly=True, states={'draft': [('readonly', False)]}, index=True, copy=False,
help="If you use payment terms, the due date will be computed automatically at the generation "
"of accounting entries. The payment term may compute several due dates, for example 50% "
"now and 50% in one month, but if you want to force a due date, make sure that the payment "
"term is not set on the invoice. If you keep the payment term and the due date empty, it "
"means direct payment.")
partner_id = fields.Many2one('res.partner', string='Partner', change_default=True,
required=True, readonly=True, states={'draft': [('readonly', False)]},
track_visibility='always')
payment_term = fields.Many2one('account.payment.term', string='Payment Terms',
readonly=True, states={'draft': [('readonly', False)]},
help="If you use payment terms, the due date will be computed automatically at the generation "
"of accounting entries. If you keep the payment term and the due date empty, it means direct payment. "
"The payment term may compute several due dates, for example 50% now, 50% in one month.")
period_id = fields.Many2one('account.period', string='Force Period',
domain=[('state', '!=', 'done')], copy=False,
help="Keep empty to use the period of the validation(invoice) date.",
readonly=True, states={'draft': [('readonly', False)]})
account_id = fields.Many2one('account.account', string='Account',
required=True, readonly=True, states={'draft': [('readonly', False)]},
help="The partner account used for this invoice.")
invoice_line = fields.One2many('account.invoice.line', 'invoice_id', string='Invoice Lines',
readonly=True, states={'draft': [('readonly', False)]}, copy=True)
tax_line = fields.One2many('account.invoice.tax', 'invoice_id', string='Tax Lines',
readonly=True, states={'draft': [('readonly', False)]}, copy=True)
move_id = fields.Many2one('account.move', string='Journal Entry',
readonly=True, index=True, ondelete='restrict', copy=False,
help="Link to the automatically generated Journal Items.")
amount_untaxed = fields.Float(string='Subtotal', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount', track_visibility='always')
amount_tax = fields.Float(string='Tax', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount')
amount_total = fields.Float(string='Total', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount')
currency_id = fields.Many2one('res.currency', string='Currency',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=_default_currency, track_visibility='always')
journal_id = fields.Many2one('account.journal', string='Journal',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=_default_journal,
domain="[('type', 'in', {'out_invoice': ['sale'], 'out_refund': ['sale_refund'], 'in_refund': ['purchase_refund'], 'in_invoice': ['purchase']}.get(type, [])), ('company_id', '=', company_id)]")
company_id = fields.Many2one('res.company', string='Company', change_default=True,
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=lambda self: self.env['res.company']._company_default_get('account.invoice'))
check_total = fields.Float(string='Verification Total', digits=dp.get_precision('Account'),
readonly=True, states={'draft': [('readonly', False)]}, default=0.0)
reconciled = fields.Boolean(string='Paid/Reconciled',
store=True, readonly=True, compute='_compute_reconciled',
help="It indicates that the invoice has been paid and the journal entry of the invoice has been reconciled with one or several journal entries of payment.")
partner_bank_id = fields.Many2one('res.partner.bank', string='Bank Account',
help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Supplier Refund, otherwise a Partner bank account number.',
readonly=True, states={'draft': [('readonly', False)]})
move_lines = fields.Many2many('account.move.line', string='Entry Lines',
compute='_compute_move_lines')
residual = fields.Float(string='Balance', digits=dp.get_precision('Account'),
compute='_compute_residual', store=True,
help="Remaining amount due.")
payment_ids = fields.Many2many('account.move.line', string='Payments',
compute='_compute_payments')
move_name = fields.Char(string='Journal Entry', readonly=True,
states={'draft': [('readonly', False)]}, copy=False)
user_id = fields.Many2one('res.users', string='Salesperson', track_visibility='onchange',
readonly=True, states={'draft': [('readonly', False)]},
default=lambda self: self.env.user)
fiscal_position = fields.Many2one('account.fiscal.position', string='Fiscal Position',
readonly=True, states={'draft': [('readonly', False)]})
commercial_partner_id = fields.Many2one('res.partner', string='Commercial Entity',
related='partner_id.commercial_partner_id', store=True, readonly=True,
help="The commercial entity that will be used on Journal Entries for this invoice")
_sql_constraints = [
('number_uniq', 'unique(number, company_id, journal_id, type)',
'Invoice Number must be unique per Company!'),
]
@api.model
def fields_view_get(self, view_id=None, view_type=False, toolbar=False, submenu=False):
context = self._context
if context.get('active_model') == 'res.partner' and context.get('active_ids'):
partner = self.env['res.partner'].browse(context['active_ids'])[0]
if not view_type:
view_id = self.env['ir.ui.view'].search([('name', '=', 'account.invoice.tree')]).id
view_type = 'tree'
elif view_type == 'form':
if partner.supplier and not partner.customer:
view_id = self.env['ir.ui.view'].search([('name', '=', 'account.invoice.supplier.form')]).id
elif partner.customer and not partner.supplier:
view_id = self.env['ir.ui.view'].search([('name', '=', 'account.invoice.form')]).id
res = super(account_invoice, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
# adapt selection of field journal_id
for field in res['fields']:
if field == 'journal_id' and type:
journal_select = self.env['account.journal']._name_search('', [('type', '=', type)], name_get_uid=1)
res['fields'][field]['selection'] = journal_select
doc = etree.XML(res['arch'])
if context.get('type'):
for node in doc.xpath("//field[@name='partner_bank_id']"):
if context['type'] == 'in_refund':
node.set('domain', "[('partner_id.ref_companies', 'in', [company_id])]")
elif context['type'] == 'out_refund':
node.set('domain', "[('partner_id', '=', partner_id)]")
if view_type == 'search':
if context.get('type') in ('out_invoice', 'out_refund'):
for node in doc.xpath("//group[@name='extended filter']"):
doc.remove(node)
if view_type == 'tree':
partner_string = _('Customer')
if context.get('type') in ('in_invoice', 'in_refund'):
partner_string = _('Supplier')
for node in doc.xpath("//field[@name='reference']"):
node.set('invisible', '0')
for node in doc.xpath("//field[@name='partner_id']"):
node.set('string', partner_string)
res['arch'] = etree.tostring(doc)
return res
@api.multi
def invoice_print(self):
""" Print the invoice and mark it as sent, so that we can see more
easily the next step of the workflow
"""
assert len(self) == 1, 'This option should only be used for a single id at a time.'
self.sent = True
return self.env['report'].get_action(self, 'account.report_invoice')
@api.multi
def action_invoice_sent(self):
""" Open a window to compose an email, with the edi invoice template
message loaded by default
"""
assert len(self) == 1, 'This option should only be used for a single id at a time.'
template = self.env.ref('account.email_template_edi_invoice', False)
compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)
ctx = dict(
default_model='account.invoice',
default_res_id=self.id,
default_use_template=bool(template),
default_template_id=template.id,
default_composition_mode='comment',
mark_invoice_as_sent=True,
)
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': ctx,
}
@api.multi
def confirm_paid(self):
return self.write({'state': 'paid'})
@api.multi
def unlink(self):
for invoice in self:
if invoice.state not in ('draft', 'cancel'):
raise Warning(_('You cannot delete an invoice which is not draft or cancelled. You should refund it instead.'))
elif invoice.internal_number:
raise Warning(_('You cannot delete an invoice after it has been validated (and received a number). You can set it back to "Draft" state and modify its content, then re-confirm it.'))
return super(account_invoice, self).unlink()
@api.multi
def onchange_partner_id(self, type, partner_id, date_invoice=False,
payment_term=False, partner_bank_id=False, company_id=False):
account_id = False
payment_term_id = False
fiscal_position = False
bank_id = False
if partner_id:
p = self.env['res.partner'].browse(partner_id)
rec_account = p.property_account_receivable
pay_account = p.property_account_payable
if company_id:
if p.property_account_receivable.company_id and \
p.property_account_receivable.company_id.id != company_id and \
p.property_account_payable.company_id and \
p.property_account_payable.company_id.id != company_id:
prop = self.env['ir.property']
rec_dom = [('name', '=', 'property_account_receivable'), ('company_id', '=', company_id)]
pay_dom = [('name', '=', 'property_account_payable'), ('company_id', '=', company_id)]
res_dom = [('res_id', '=', 'res.partner,%s' % partner_id)]
rec_prop = prop.search(rec_dom + res_dom) or prop.search(rec_dom)
pay_prop = prop.search(pay_dom + res_dom) or prop.search(pay_dom)
rec_account = rec_prop.get_by_record(rec_prop)
pay_account = pay_prop.get_by_record(pay_prop)
if not rec_account and not pay_account:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
if type in ('out_invoice', 'out_refund'):
account_id = rec_account.id
payment_term_id = p.property_payment_term.id
else:
account_id = pay_account.id
payment_term_id = p.property_supplier_payment_term.id
fiscal_position = p.property_account_position.id
bank_id = p.bank_ids.id
result = {'value': {
'account_id': account_id,
'payment_term': payment_term_id,
'fiscal_position': fiscal_position,
}}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank_id'] = bank_id
if payment_term != payment_term_id:
if payment_term_id:
to_update = self.onchange_payment_term_date_invoice(payment_term_id, date_invoice)
result['value'].update(to_update.get('value', {}))
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(bank_id)
result['value'].update(to_update.get('value', {}))
return result
@api.multi
def onchange_journal_id(self, journal_id=False):
if journal_id:
journal = self.env['account.journal'].browse(journal_id)
return {
'value': {
'currency_id': journal.currency.id or journal.company_id.currency_id.id,
'company_id': journal.company_id.id,
}
}
return {}
@api.multi
def onchange_payment_term_date_invoice(self, payment_term_id, date_invoice):
if not date_invoice:
date_invoice = fields.Date.today()
if not payment_term_id:
# To make sure the invoice due date should contain due date which is
# entered by user when there is no payment term defined
return {'value': {'date_due': self.date_due or date_invoice}}
pterm = self.env['account.payment.term'].browse(payment_term_id)
pterm_list = pterm.compute(value=1, date_ref=date_invoice)[0]
if pterm_list:
return {'value': {'date_due': max(line[0] for line in pterm_list)}}
else:
raise except_orm(_('Insufficient Data!'),
_('The payment term of supplier does not have a payment term line.'))
@api.multi
def onchange_invoice_line(self, lines):
return {}
@api.multi
def onchange_partner_bank(self, partner_bank_id=False):
return {'value': {}}
@api.multi
def onchange_company_id(self, company_id, part_id, type, invoice_line, currency_id):
# TODO: add the missing context parameter when forward-porting in trunk
# so we can remove this hack!
self = self.with_context(self.env['res.users'].context_get())
values = {}
domain = {}
if company_id and part_id and type:
p = self.env['res.partner'].browse(part_id)
if p.property_account_payable and p.property_account_receivable and \
p.property_account_payable.company_id.id != company_id and \
p.property_account_receivable.company_id.id != company_id:
prop = self.env['ir.property']
rec_dom = [('name', '=', 'property_account_receivable'), ('company_id', '=', company_id)]
pay_dom = [('name', '=', 'property_account_payable'), ('company_id', '=', company_id)]
res_dom = [('res_id', '=', 'res.partner,%s' % part_id)]
rec_prop = prop.search(rec_dom + res_dom) or prop.search(rec_dom)
pay_prop = prop.search(pay_dom + res_dom) or prop.search(pay_dom)
rec_account = rec_prop.get_by_record(rec_prop)
pay_account = pay_prop.get_by_record(pay_prop)
if not rec_account and not pay_account:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
if type in ('out_invoice', 'out_refund'):
acc_id = rec_account.id
else:
acc_id = pay_account.id
values= {'account_id': acc_id}
if self:
if company_id:
for line in self.invoice_line:
if not line.account_id:
continue
if line.account_id.company_id.id == company_id:
continue
accounts = self.env['account.account'].search([('name', '=', line.account_id.name), ('company_id', '=', company_id)])
if not accounts:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
line.write({'account_id': accounts[-1].id})
else:
for line_cmd in invoice_line or []:
if len(line_cmd) >= 3 and isinstance(line_cmd[2], dict):
line = self.env['account.account'].browse(line_cmd[2]['account_id'])
if line.company_id.id != company_id:
raise except_orm(
_('Configuration Error!'),
_("Invoice line account's company and invoice's company does not match.")
)
if company_id and type:
journal_type = TYPE2JOURNAL[type]
journals = self.env['account.journal'].search([('type', '=', journal_type), ('company_id', '=', company_id)])
if journals:
values['journal_id'] = journals[0].id
journal_defaults = self.env['ir.values'].get_defaults_dict('account.invoice', 'type=%s' % type)
if 'journal_id' in journal_defaults:
values['journal_id'] = journal_defaults['journal_id']
if not values.get('journal_id'):
field_desc = journals.fields_get(['journal_id'])
type_label = next(t for t, label in field_desc['journal_id']['selection'] if t == journal_type)
action = self.env.ref('account.action_account_journal_form')
msg = _('Cannot find any account journal of type "%s" for this company, You should create one.\n Please go to Journal Configuration') % type_label
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
domain = {'journal_id': [('id', 'in', journals.ids)]}
return {'value': values, 'domain': domain}
@api.multi
def action_cancel_draft(self):
# go from canceled state to draft state
self.write({'state': 'draft'})
self.delete_workflow()
self.create_workflow()
return True
@api.one
@api.returns('ir.ui.view')
def get_formview_id(self):
""" Update form view id of action to open the invoice """
if self.type == 'in_invoice':
return self.env.ref('account.invoice_supplier_form')
else:
return self.env.ref('account.invoice_form')
@api.multi
def move_line_id_payment_get(self):
# return the move line ids with the same account as the invoice self
if not self.id:
return []
query = """ SELECT l.id
FROM account_move_line l, account_invoice i
WHERE i.id = %s AND l.move_id = i.move_id AND l.account_id = i.account_id
"""
self._cr.execute(query, (self.id,))
return [row[0] for row in self._cr.fetchall()]
@api.multi
def test_paid(self):
# check whether all corresponding account move lines are reconciled
line_ids = self.move_line_id_payment_get()
if not line_ids:
return False
query = "SELECT reconcile_id FROM account_move_line WHERE id IN %s"
self._cr.execute(query, (tuple(line_ids),))
return all(row[0] for row in self._cr.fetchall())
@api.multi
def button_reset_taxes(self):
account_invoice_tax = self.env['account.invoice.tax']
ctx = dict(self._context)
for invoice in self:
self._cr.execute("DELETE FROM account_invoice_tax WHERE invoice_id=%s AND manual is False", (invoice.id,))
self.invalidate_cache()
partner = invoice.partner_id
if partner.lang:
ctx['lang'] = partner.lang
for taxe in account_invoice_tax.compute(invoice).values():
account_invoice_tax.create(taxe)
# dummy write on self to trigger recomputations
return self.with_context(ctx).write({'invoice_line': []})
@api.multi
def button_compute(self, set_total=False):
self.button_reset_taxes()
for invoice in self:
if set_total:
invoice.check_total = invoice.amount_total
return True
@staticmethod
def _convert_ref(ref):
return (ref or '').replace('/','')
@api.multi
def _get_analytic_lines(self):
""" Return a list of dict for creating analytic lines for self[0] """
company_currency = self.company_id.currency_id
sign = 1 if self.type in ('out_invoice', 'in_refund') else -1
iml = self.env['account.invoice.line'].move_line_get(self.id)
for il in iml:
if il['account_analytic_id']:
if self.type in ('in_invoice', 'in_refund'):
ref = self.reference
else:
ref = self._convert_ref(self.number)
if not self.journal_id.analytic_journal_id:
raise except_orm(_('No Analytic Journal!'),
_("You have to define an analytic journal on the '%s' journal!") % (self.journal_id.name,))
currency = self.currency_id.with_context(date=self.date_invoice)
il['analytic_lines'] = [(0,0, {
'name': il['name'],
'date': self.date_invoice,
'account_id': il['account_analytic_id'],
'unit_amount': il['quantity'],
'amount': currency.compute(il['price'], company_currency) * sign,
'product_id': il['product_id'],
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': self.journal_id.analytic_journal_id.id,
'ref': ref,
})]
return iml
@api.multi
def action_date_assign(self):
for inv in self:
res = inv.onchange_payment_term_date_invoice(inv.payment_term.id, inv.date_invoice)
if res and res.get('value'):
inv.write(res['value'])
return True
@api.multi
def finalize_invoice_move_lines(self, move_lines):
""" finalize_invoice_move_lines(move_lines) -> move_lines
Hook method to be overridden in additional modules to verify and
possibly alter the move lines to be created by an invoice, for
special cases.
:param move_lines: list of dictionaries with the account.move.lines (as for create())
:return: the (possibly updated) final move_lines to create for this invoice
"""
return move_lines
@api.multi
def check_tax_lines(self, compute_taxes):
account_invoice_tax = self.env['account.invoice.tax']
company_currency = self.company_id.currency_id
if not self.tax_line:
for tax in compute_taxes.values():
account_invoice_tax.create(tax)
else:
tax_key = []
for tax in self.tax_line:
if tax.manual:
continue
key = (tax.tax_code_id.id, tax.base_code_id.id, tax.account_id.id)
tax_key.append(key)
if key not in compute_taxes:
raise except_orm(_('Warning!'), _('Global taxes defined, but they are not in invoice lines !'))
base = compute_taxes[key]['base']
if abs(base - tax.base) > company_currency.rounding:
raise except_orm(_('Warning!'), _('Tax base different!\nClick on compute to update the tax base.'))
for key in compute_taxes:
if key not in tax_key:
raise except_orm(_('Warning!'), _('Taxes are missing!\nClick on compute button.'))
@api.multi
def compute_invoice_totals(self, company_currency, ref, invoice_move_lines):
total = 0
total_currency = 0
for line in invoice_move_lines:
if self.currency_id != company_currency:
currency = self.currency_id.with_context(date=self.date_invoice or fields.Date.today())
line['currency_id'] = currency.id
line['amount_currency'] = line['price']
line['price'] = currency.compute(line['price'], company_currency)
else:
line['currency_id'] = False
line['amount_currency'] = False
line['ref'] = ref
if self.type in ('out_invoice','in_refund'):
total += line['price']
total_currency += line['amount_currency'] or line['price']
line['price'] = - line['price']
else:
total -= line['price']
total_currency -= line['amount_currency'] or line['price']
return total, total_currency, invoice_move_lines
def inv_line_characteristic_hashcode(self, invoice_line):
"""Overridable hashcode generation for invoice lines. Lines having the same hashcode
will be grouped together if the journal has the 'group line' option. Of course a module
can add fields to invoice lines that would need to be tested too before merging lines
or not."""
return "%s-%s-%s-%s-%s" % (
invoice_line['account_id'],
invoice_line.get('tax_code_id', 'False'),
invoice_line.get('product_id', 'False'),
invoice_line.get('analytic_account_id', 'False'),
invoice_line.get('date_maturity', 'False'),
)
def group_lines(self, iml, line):
"""Merge account move lines (and hence analytic lines) if invoice line hashcodes are equals"""
if self.journal_id.group_invoice_lines:
line2 = {}
for x, y, l in line:
tmp = self.inv_line_characteristic_hashcode(l)
if tmp in line2:
am = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit'])
line2[tmp]['debit'] = (am > 0) and am or 0.0
line2[tmp]['credit'] = (am < 0) and -am or 0.0
line2[tmp]['tax_amount'] += l['tax_amount']
line2[tmp]['analytic_lines'] += l['analytic_lines']
else:
line2[tmp] = l
line = []
for key, val in line2.items():
line.append((0,0,val))
return line
@api.multi
def action_move_create(self):
""" Creates invoice related analytics and financial move lines """
account_invoice_tax = self.env['account.invoice.tax']
account_move = self.env['account.move']
for inv in self:
if not inv.journal_id.sequence_id:
raise except_orm(_('Error!'), _('Please define sequence on the journal related to this invoice.'))
if not inv.invoice_line:
raise except_orm(_('No Invoice Lines!'), _('Please create some invoice lines.'))
if inv.move_id:
continue
ctx = dict(self._context, lang=inv.partner_id.lang)
if not inv.date_invoice:
inv.with_context(ctx).write({'date_invoice': fields.Date.context_today(self)})
date_invoice = inv.date_invoice
company_currency = inv.company_id.currency_id
# create the analytical lines, one move line per invoice line
iml = inv._get_analytic_lines()
# check if taxes are all computed
compute_taxes = account_invoice_tax.compute(inv)
inv.check_tax_lines(compute_taxes)
# I disabled the check_total feature
group_check_total = self.env.ref('account.group_supplier_inv_check_total')
if self.env.user in group_check_total.users:
if inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding / 2.0):
raise except_orm(_('Bad Total!'), _('Please verify the price of the invoice!\nThe encoded total does not match the computed total.'))
if inv.payment_term:
total_fixed = total_percent = 0
for line in inv.payment_term.line_ids:
if line.value == 'fixed':
total_fixed += line.value_amount
if line.value == 'procent':
total_percent += line.value_amount
total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)
if (total_fixed + total_percent) > 100:
raise except_orm(_('Error!'), _("Cannot create the invoice.\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'."))
# one move line per tax line
iml += account_invoice_tax.move_line_get(inv.id)
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = self._convert_ref(inv.number)
diff_currency = inv.currency_id != company_currency
# create one move line for the total and possibly adjust the other lines amount
total, total_currency, iml = inv.with_context(ctx).compute_invoice_totals(company_currency, ref, iml)
name = inv.name or inv.supplier_invoice_number or '/'
totlines = []
if inv.payment_term:
totlines = inv.with_context(ctx).payment_term.compute(total, date_invoice)[0]
if totlines:
res_amount_currency = total_currency
ctx['date'] = date_invoice
for i, t in enumerate(totlines):
if inv.currency_id != company_currency:
amount_currency = company_currency.with_context(ctx).compute(t[1], inv.currency_id)
else:
amount_currency = False
# last line: add the diff
res_amount_currency -= amount_currency or 0
if i + 1 == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': inv.account_id.id,
'date_maturity': t[0],
'amount_currency': diff_currency and amount_currency,
'currency_id': diff_currency and inv.currency_id.id,
'ref': ref,
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': inv.account_id.id,
'date_maturity': inv.date_due,
'amount_currency': diff_currency and total_currency,
'currency_id': diff_currency and inv.currency_id.id,
'ref': ref
})
date = date_invoice
part = self.env['res.partner']._find_accounting_partner(inv.partner_id)
line = [(0, 0, self.line_get_convert(l, part.id, date)) for l in iml]
line = inv.group_lines(iml, line)
journal = inv.journal_id.with_context(ctx)
if journal.centralisation:
raise except_orm(_('User Error!'),
_('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))
line = inv.finalize_invoice_move_lines(line)
move_vals = {
'ref': inv.reference or inv.name,
'line_id': line,
'journal_id': journal.id,
'date': date,
'narration': inv.comment,
'company_id': inv.company_id.id,
}
ctx['company_id'] = inv.company_id.id
period = inv.period_id
if not period:
period = period.with_context(ctx).find(date_invoice)[:1]
if period:
move_vals['period_id'] = period.id
for i in line:
i[2]['period_id'] = period.id
ctx['invoice'] = inv
move = account_move.with_context(ctx).create(move_vals)
# make the invoice point to that move
vals = {
'move_id': move.id,
'period_id': period.id,
'move_name': move.name,
}
inv.with_context(ctx).write(vals)
# Pass invoice in context in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
move.post()
self._log_event()
return True
@api.multi
def invoice_validate(self):
return self.write({'state': 'open'})
@api.model
def line_get_convert(self, line, part, date):
return {
'date_maturity': line.get('date_maturity', False),
'partner_id': part,
'name': line['name'][:64],
'date': date,
'debit': line['price']>0 and line['price'],
'credit': line['price']<0 and -line['price'],
'account_id': line['account_id'],
'analytic_lines': line.get('analytic_lines', []),
'amount_currency': line['price']>0 and abs(line.get('amount_currency', False)) or -abs(line.get('amount_currency', False)),
'currency_id': line.get('currency_id', False),
'tax_code_id': line.get('tax_code_id', False),
'tax_amount': line.get('tax_amount', False),
'ref': line.get('ref', False),
'quantity': line.get('quantity',1.00),
'product_id': line.get('product_id', False),
'product_uom_id': line.get('uos_id', False),
'analytic_account_id': line.get('account_analytic_id', False),
}
@api.multi
def action_number(self):
#TODO: not correct fix but required a fresh values before reading it.
self.write({})
for inv in self:
self.write({'internal_number': inv.number})
if inv.type in ('in_invoice', 'in_refund'):
if not inv.reference:
ref = self._convert_ref(inv.number)
else:
ref = inv.reference
else:
ref = self._convert_ref(inv.number)
self._cr.execute(""" UPDATE account_move SET ref=%s
WHERE id=%s AND (ref IS NULL OR ref = '')""",
(ref, inv.move_id.id))
self._cr.execute(""" UPDATE account_move_line SET ref=%s
WHERE move_id=%s AND (ref IS NULL OR ref = '')""",
(ref, inv.move_id.id))
self._cr.execute(""" UPDATE account_analytic_line SET ref=%s
FROM account_move_line
WHERE account_move_line.move_id = %s AND
account_analytic_line.move_id = account_move_line.id""",
(ref, inv.move_id.id))
self.invalidate_cache()
return True
@api.multi
def action_cancel(self):
moves = self.env['account.move']
for inv in self:
if inv.move_id:
moves += inv.move_id
if inv.payment_ids:
for move_line in inv.payment_ids:
if move_line.reconcile_partial_id.line_partial_ids:
raise except_orm(_('Error!'), _('You cannot cancel an invoice which is partially paid. You need to unreconcile related payment entries first.'))
# First, set the invoices as cancelled and detach the move ids
self.write({'state': 'cancel', 'move_id': False})
if moves:
# second, invalidate the move(s)
moves.button_cancel()
# delete the move this invoice was pointing to
# Note that the corresponding move_lines and move_reconciles
# will be automatically deleted too
moves.unlink()
self._log_event(-1.0, 'Cancel Invoice')
return True
###################
@api.multi
def _log_event(self, factor=1.0, name='Open Invoice'):
#TODO: implement messages system
return True
@api.multi
def name_get(self):
TYPES = {
'out_invoice': _('Invoice'),
'in_invoice': _('Supplier Invoice'),
'out_refund': _('Refund'),
'in_refund': _('Supplier Refund'),
}
result = []
for inv in self:
result.append((inv.id, "%s %s" % (inv.number or TYPES[inv.type], inv.name or '')))
return result
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
recs = self.browse()
if name:
recs = self.search([('number', '=', name)] + args, limit=limit)
if not recs:
recs = self.search([('name', operator, name)] + args, limit=limit)
return recs.name_get()
@api.model
def _refund_cleanup_lines(self, lines):
""" Convert records to dict of values suitable for one2many line creation
:param recordset lines: records to convert
:return: list of command tuple for one2many line creation [(0, 0, dict of valueis), ...]
"""
result = []
for line in lines:
values = {}
for name, field in line._fields.iteritems():
if name in MAGIC_COLUMNS:
continue
elif field.type == 'many2one':
values[name] = line[name].id
elif field.type not in ['many2many', 'one2many']:
values[name] = line[name]
elif name == 'invoice_line_tax_id':
values[name] = [(6, 0, line[name].ids)]
result.append((0, 0, values))
return result
@api.model
def _prepare_refund(self, invoice, date=None, period_id=None, description=None, journal_id=None):
""" Prepare the dict of values to create the new refund from the invoice.
This method may be overridden to implement custom
refund generation (making sure to call super() to establish
a clean extension chain).
:param record invoice: invoice to refund
:param string date: refund creation date from the wizard
:param integer period_id: force account.period from the wizard
:param string description: description of the refund from the wizard
:param integer journal_id: account.journal from the wizard
:return: dict of value to create() the refund
"""
values = {}
for field in ['name', 'reference', 'comment', 'date_due', 'partner_id', 'company_id',
'account_id', 'currency_id', 'payment_term', 'user_id', 'fiscal_position']:
if invoice._fields[field].type == 'many2one':
values[field] = invoice[field].id
else:
values[field] = invoice[field] or False
values['invoice_line'] = self._refund_cleanup_lines(invoice.invoice_line)
tax_lines = filter(lambda l: l.manual, invoice.tax_line)
values['tax_line'] = self._refund_cleanup_lines(tax_lines)
if journal_id:
journal = self.env['account.journal'].browse(journal_id)
elif invoice['type'] == 'in_invoice':
journal = self.env['account.journal'].search([('type', '=', 'purchase_refund')], limit=1)
else:
journal = self.env['account.journal'].search([('type', '=', 'sale_refund')], limit=1)
values['journal_id'] = journal.id
values['type'] = TYPE2REFUND[invoice['type']]
values['date_invoice'] = date or fields.Date.today()
values['state'] = 'draft'
values['number'] = False
if period_id:
values['period_id'] = period_id
if description:
values['name'] = description
return values
@api.multi
@api.returns('self')
def refund(self, date=None, period_id=None, description=None, journal_id=None):
new_invoices = self.browse()
for invoice in self:
# create the new invoice
values = self._prepare_refund(invoice, date=date, period_id=period_id,
description=description, journal_id=journal_id)
new_invoices += self.create(values)
return new_invoices
@api.v8
def pay_and_reconcile(self, pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, name=''):
# TODO check if we can use different period for payment and the writeoff line
assert len(self)==1, "Can only pay one invoice at a time."
# Take the seq as name for move
SIGN = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1}
direction = SIGN[self.type]
# take the chosen date
date = self._context.get('date_p') or fields.Date.today()
# Take the amount in currency and the currency of the payment
if self._context.get('amount_currency') and self._context.get('currency_id'):
amount_currency = self._context['amount_currency']
currency_id = self._context['currency_id']
else:
amount_currency = False
currency_id = False
pay_journal = self.env['account.journal'].browse(pay_journal_id)
if self.type in ('in_invoice', 'in_refund'):
ref = self.reference
else:
ref = self._convert_ref(self.number)
partner = self.partner_id._find_accounting_partner(self.partner_id)
name = name or self.invoice_line.name or self.number
# Pay attention to the sign for both debit/credit AND amount_currency
l1 = {
'name': name,
'debit': direction * pay_amount > 0 and direction * pay_amount,
'credit': direction * pay_amount < 0 and -direction * pay_amount,
'account_id': self.account_id.id,
'partner_id': partner.id,
'ref': ref,
'date': date,
'currency_id': currency_id,
'amount_currency': direction * (amount_currency or 0.0),
'company_id': self.company_id.id,
}
l2 = {
'name': name,
'debit': direction * pay_amount < 0 and -direction * pay_amount,
'credit': direction * pay_amount > 0 and direction * pay_amount,
'account_id': pay_account_id,
'partner_id': partner.id,
'ref': ref,
'date': date,
'currency_id': currency_id,
'amount_currency': -direction * (amount_currency or 0.0),
'company_id': self.company_id.id,
}
move = self.env['account.move'].create({
'ref': ref,
'line_id': [(0, 0, l1), (0, 0, l2)],
'journal_id': pay_journal_id,
'period_id': period_id,
'date': date,
})
move_ids = (move | self.move_id).ids
self._cr.execute("SELECT id FROM account_move_line WHERE move_id IN %s",
(tuple(move_ids),))
lines = self.env['account.move.line'].browse([r[0] for r in self._cr.fetchall()])
lines2rec = lines.browse()
total = 0.0
for line in itertools.chain(lines, self.payment_ids):
if line.account_id == self.account_id:
lines2rec += line
total += (line.debit or 0.0) - (line.credit or 0.0)
inv_id, name = self.name_get()[0]
if not round(total, self.env['decimal.precision'].precision_get('Account')) or writeoff_acc_id:
lines2rec.reconcile('manual', writeoff_acc_id, writeoff_period_id, writeoff_journal_id)
else:
code = self.currency_id.symbol
# TODO: use currency's formatting function
msg = _("Invoice partially paid: %s%s of %s%s (%s%s remaining).") % \
(pay_amount, code, self.amount_total, code, total, code)
self.message_post(body=msg)
lines2rec.reconcile_partial('manual')
# Update the stored value (fields.function), so we write to trigger recompute
return self.write({})
@api.v7
def pay_and_reconcile(self, cr, uid, ids, pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context=None, name=''):
recs = self.browse(cr, uid, ids, context)
return recs.pay_and_reconcile(pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, name=name)
class account_invoice_line(models.Model):
_name = "account.invoice.line"
_description = "Invoice Line"
_order = "invoice_id,sequence,id"
@api.one
@api.depends('price_unit', 'discount', 'invoice_line_tax_id', 'quantity',
'product_id', 'invoice_id.partner_id', 'invoice_id.currency_id')
def _compute_price(self):
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
taxes = self.invoice_line_tax_id.compute_all(price, self.quantity, product=self.product_id, partner=self.invoice_id.partner_id)
self.price_subtotal = taxes['total']
if self.invoice_id:
self.price_subtotal = self.invoice_id.currency_id.round(self.price_subtotal)
@api.model
def _default_price_unit(self):
if not self._context.get('check_total'):
return 0
total = self._context['check_total']
for l in self._context.get('invoice_line', []):
if isinstance(l, (list, tuple)) and len(l) >= 3 and l[2]:
vals = l[2]
price = vals.get('price_unit', 0) * (1 - vals.get('discount', 0) / 100.0)
total = total - (price * vals.get('quantity'))
taxes = vals.get('invoice_line_tax_id')
if taxes and len(taxes[0]) >= 3 and taxes[0][2]:
taxes = self.env['account.tax'].browse(taxes[0][2])
tax_res = taxes.compute_all(price, vals.get('quantity'),
product=vals.get('product_id'), partner=self._context.get('partner_id'))
for tax in tax_res['taxes']:
total = total - tax['amount']
return total
@api.model
def _default_account(self):
# XXX this gets the default account for the user's company,
# it should get the default account for the invoice's company
# however, the invoice's company does not reach this point
if self._context.get('type') in ('out_invoice', 'out_refund'):
return self.env['ir.property'].get('property_account_income_categ', 'product.category')
else:
return self.env['ir.property'].get('property_account_expense_categ', 'product.category')
name = fields.Text(string='Description', required=True)
origin = fields.Char(string='Source Document',
help="Reference of the document that produced this invoice.")
sequence = fields.Integer(string='Sequence', default=10,
help="Gives the sequence of this line when displaying the invoice.")
invoice_id = fields.Many2one('account.invoice', string='Invoice Reference',
ondelete='cascade', index=True)
uos_id = fields.Many2one('product.uom', string='Unit of Measure',
ondelete='set null', index=True)
product_id = fields.Many2one('product.product', string='Product',
ondelete='set null', index=True)
account_id = fields.Many2one('account.account', string='Account',
required=True, domain=[('type', 'not in', ['view', 'closed'])],
default=_default_account,
help="The income or expense account related to the selected product.")
price_unit = fields.Float(string='Unit Price', required=True,
digits= dp.get_precision('Product Price'),
default=_default_price_unit)
price_subtotal = fields.Float(string='Amount', digits= dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_price')
quantity = fields.Float(string='Quantity', digits= dp.get_precision('Product Unit of Measure'),
required=True, default=1)
discount = fields.Float(string='Discount (%)', digits= dp.get_precision('Discount'),
default=0.0)
invoice_line_tax_id = fields.Many2many('account.tax',
'account_invoice_line_tax', 'invoice_line_id', 'tax_id',
string='Taxes', domain=[('parent_id', '=', False)])
account_analytic_id = fields.Many2one('account.analytic.account',
string='Analytic Account')
company_id = fields.Many2one('res.company', string='Company',
related='invoice_id.company_id', store=True, readonly=True)
partner_id = fields.Many2one('res.partner', string='Partner',
related='invoice_id.partner_id', store=True, readonly=True)
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(account_invoice_line, self).fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if self._context.get('type'):
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='product_id']"):
if self._context['type'] in ('in_invoice', 'in_refund'):
node.set('domain', "[('purchase_ok', '=', True)]")
else:
node.set('domain', "[('sale_ok', '=', True)]")
res['arch'] = etree.tostring(doc)
return res
@api.multi
def product_id_change(self, product, uom_id, qty=0, name='', type='out_invoice',
partner_id=False, fposition_id=False, price_unit=False, currency_id=False,
context=None, company_id=None):
context = context or {}
company_id = company_id if company_id is not None else context.get('company_id', False)
self = self.with_context(company_id=company_id, force_company=company_id)
if not partner_id:
raise except_orm(_('No Partner Defined!'), _("You must first select a partner!"))
if not product:
if type in ('in_invoice', 'in_refund'):
return {'value': {}, 'domain': {'product_uom': []}}
else:
return {'value': {'price_unit': 0.0}, 'domain': {'product_uom': []}}
values = {}
part = self.env['res.partner'].browse(partner_id)
fpos = self.env['account.fiscal.position'].browse(fposition_id)
if part.lang:
self = self.with_context(lang=part.lang)
product = self.env['product.product'].browse(product)
values['name'] = product.partner_ref
if type in ('out_invoice', 'out_refund'):
account = product.property_account_income or product.categ_id.property_account_income_categ
else:
account = product.property_account_expense or product.categ_id.property_account_expense_categ
account = fpos.map_account(account)
if account:
values['account_id'] = account.id
if type in ('out_invoice', 'out_refund'):
taxes = product.taxes_id or account.tax_ids
if product.description_sale:
values['name'] += '\n' + product.description_sale
else:
taxes = product.supplier_taxes_id or account.tax_ids
if product.description_purchase:
values['name'] += '\n' + product.description_purchase
taxes = fpos.map_tax(taxes)
values['invoice_line_tax_id'] = taxes.ids
if type in ('in_invoice', 'in_refund'):
values['price_unit'] = price_unit or product.standard_price
else:
values['price_unit'] = product.list_price
values['uos_id'] = uom_id or product.uom_id.id
domain = {'uos_id': [('category_id', '=', product.uom_id.category_id.id)]}
company = self.env['res.company'].browse(company_id)
currency = self.env['res.currency'].browse(currency_id)
if company and currency:
if company.currency_id != currency:
if type in ('in_invoice', 'in_refund'):
values['price_unit'] = product.standard_price
values['price_unit'] = values['price_unit'] * currency.rate
if values['uos_id'] and values['uos_id'] != product.uom_id.id:
values['price_unit'] = self.env['product.uom']._compute_price(
product.uom_id.id, values['price_unit'], values['uos_id'])
return {'value': values, 'domain': domain}
@api.multi
def uos_id_change(self, product, uom, qty=0, name='', type='out_invoice', partner_id=False,
fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
context = context or {}
company_id = company_id if company_id != None else context.get('company_id', False)
self = self.with_context(company_id=company_id)
result = self.product_id_change(
product, uom, qty, name, type, partner_id, fposition_id, price_unit,
currency_id, context=context, company_id=company_id,
)
warning = {}
if not uom:
result['value']['price_unit'] = 0.0
if product and uom:
prod = self.env['product.product'].browse(product)
prod_uom = self.env['product.uom'].browse(uom)
if prod.uom_id.category_id != prod_uom.category_id:
warning = {
'title': _('Warning!'),
'message': _('The selected unit of measure is not compatible with the unit of measure of the product.'),
}
result['value']['uos_id'] = prod.uom_id.id
if warning:
result['warning'] = warning
return result
@api.model
def move_line_get(self, invoice_id):
inv = self.env['account.invoice'].browse(invoice_id)
currency = inv.currency_id.with_context(date=inv.date_invoice)
company_currency = inv.company_id.currency_id
res = []
for line in inv.invoice_line:
mres = self.move_line_get_item(line)
if not mres:
continue
res.append(mres)
tax_code_found = False
taxes = line.invoice_line_tax_id.compute_all(
(line.price_unit * (1.0 - (line.discount or 0.0) / 100.0)),
line.quantity, line.product_id, inv.partner_id)['taxes']
for tax in taxes:
if inv.type in ('out_invoice', 'in_invoice'):
tax_code_id = tax['base_code_id']
tax_amount = line.price_subtotal * tax['base_sign']
else:
tax_code_id = tax['ref_base_code_id']
tax_amount = line.price_subtotal * tax['ref_base_sign']
if tax_code_found:
if not tax_code_id:
continue
res.append(dict(mres))
res[-1]['price'] = 0.0
res[-1]['account_analytic_id'] = False
elif not tax_code_id:
continue
tax_code_found = True
res[-1]['tax_code_id'] = tax_code_id
res[-1]['tax_amount'] = currency.compute(tax_amount, company_currency)
return res
@api.model
def move_line_get_item(self, line):
return {
'type': 'src',
'name': line.name.split('\n')[0][:64],
'price_unit': line.price_unit,
'quantity': line.quantity,
'price': line.price_subtotal,
'account_id': line.account_id.id,
'product_id': line.product_id.id,
'uos_id': line.uos_id.id,
'account_analytic_id': line.account_analytic_id.id,
'taxes': line.invoice_line_tax_id,
}
#
# Set the tax field according to the account and the fiscal position
#
@api.multi
def onchange_account_id(self, product_id, partner_id, inv_type, fposition_id, account_id):
if not account_id:
return {}
unique_tax_ids = []
account = self.env['account.account'].browse(account_id)
if not product_id:
fpos = self.env['account.fiscal.position'].browse(fposition_id)
unique_tax_ids = fpos.map_tax(account.tax_ids).ids
else:
product_change_result = self.product_id_change(product_id, False, type=inv_type,
partner_id=partner_id, fposition_id=fposition_id, company_id=account.company_id.id)
if 'invoice_line_tax_id' in product_change_result.get('value', {}):
unique_tax_ids = product_change_result['value']['invoice_line_tax_id']
return {'value': {'invoice_line_tax_id': unique_tax_ids}}
class account_invoice_tax(models.Model):
_name = "account.invoice.tax"
_description = "Invoice Tax"
_order = 'sequence'
@api.one
@api.depends('base', 'base_amount', 'amount', 'tax_amount')
def _compute_factors(self):
self.factor_base = self.base_amount / self.base if self.base else 1.0
self.factor_tax = self.tax_amount / self.amount if self.amount else 1.0
invoice_id = fields.Many2one('account.invoice', string='Invoice Line',
ondelete='cascade', index=True)
name = fields.Char(string='Tax Description',
required=True)
account_id = fields.Many2one('account.account', string='Tax Account',
required=True, domain=[('type', 'not in', ['view', 'income', 'closed'])])
account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic account')
base = fields.Float(string='Base', digits=dp.get_precision('Account'))
amount = fields.Float(string='Amount', digits=dp.get_precision('Account'))
manual = fields.Boolean(string='Manual', default=True)
sequence = fields.Integer(string='Sequence',
help="Gives the sequence order when displaying a list of invoice tax.")
base_code_id = fields.Many2one('account.tax.code', string='Base Code',
help="The account basis of the tax declaration.")
base_amount = fields.Float(string='Base Code Amount', digits=dp.get_precision('Account'),
default=0.0)
tax_code_id = fields.Many2one('account.tax.code', string='Tax Code',
help="The tax basis of the tax declaration.")
tax_amount = fields.Float(string='Tax Code Amount', digits=dp.get_precision('Account'),
default=0.0)
company_id = fields.Many2one('res.company', string='Company',
related='account_id.company_id', store=True, readonly=True)
factor_base = fields.Float(string='Multipication factor for Base code',
compute='_compute_factors')
factor_tax = fields.Float(string='Multipication factor Tax code',
compute='_compute_factors')
@api.multi
def base_change(self, base, currency_id=False, company_id=False, date_invoice=False):
factor = self.factor_base if self else 1
company = self.env['res.company'].browse(company_id)
if currency_id and company.currency_id:
currency = self.env['res.currency'].browse(currency_id)
currency = currency.with_context(date=date_invoice or fields.Date.today())
base = currency.compute(base * factor, company.currency_id, round=False)
return {'value': {'base_amount': base}}
@api.multi
def amount_change(self, amount, currency_id=False, company_id=False, date_invoice=False):
factor = self.factor_tax if self else 1
company = self.env['res.company'].browse(company_id)
if currency_id and company.currency_id:
currency = self.env['res.currency'].browse(currency_id)
currency = currency.with_context(date=date_invoice or fields.Date.today())
amount = currency.compute(amount * factor, company.currency_id, round=False)
return {'value': {'tax_amount': amount}}
@api.v8
def compute(self, invoice):
tax_grouped = {}
currency = invoice.currency_id.with_context(date=invoice.date_invoice or fields.Date.today())
company_currency = invoice.company_id.currency_id
for line in invoice.invoice_line:
taxes = line.invoice_line_tax_id.compute_all(
(line.price_unit * (1 - (line.discount or 0.0) / 100.0)),
line.quantity, line.product_id, invoice.partner_id)['taxes']
for tax in taxes:
val = {
'invoice_id': invoice.id,
'name': tax['name'],
'amount': tax['amount'],
'manual': False,
'sequence': tax['sequence'],
'base': currency.round(tax['price_unit'] * line['quantity']),
}
if invoice.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_collected_id']
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['ref_base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['ref_tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_paid_id']
# If the taxes generate moves on the same financial account as the invoice line
# and no default analytic account is defined at the tax level, propagate the
# analytic account from the invoice line to the tax line. This is necessary
# in situations were (part of) the taxes cannot be reclaimed,
# to ensure the tax move is allocated to the proper analytic account.
if not val.get('account_analytic_id') and line.account_analytic_id and val['account_id'] == line.account_id.id:
val['account_analytic_id'] = line.account_analytic_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = currency.round(t['base'])
t['amount'] = currency.round(t['amount'])
t['base_amount'] = currency.round(t['base_amount'])
t['tax_amount'] = currency.round(t['tax_amount'])
return tax_grouped
@api.v7
def compute(self, cr, uid, invoice_id, context=None):
recs = self.browse(cr, uid, [], context)
invoice = recs.env['account.invoice'].browse(invoice_id)
return recs.compute(invoice)
@api.model
def move_line_get(self, invoice_id):
res = []
self._cr.execute(
'SELECT * FROM account_invoice_tax WHERE invoice_id = %s',
(invoice_id,)
)
for row in self._cr.dictfetchall():
if not (row['amount'] or row['tax_code_id'] or row['tax_amount']):
continue
res.append({
'type': 'tax',
'name': row['name'],
'price_unit': row['amount'],
'quantity': 1,
'price': row['amount'] or 0.0,
'account_id': row['account_id'],
'tax_code_id': row['tax_code_id'],
'tax_amount': row['tax_amount'],
'account_analytic_id': row['account_analytic_id'],
})
return res
class res_partner(models.Model):
# Inherits partner and adds invoice information in the partner form
_inherit = 'res.partner'
invoice_ids = fields.One2many('account.invoice', 'partner_id', string='Invoices',
readonly=True)
def _find_accounting_partner(self, partner):
'''
Find the partner for which the accounting entries will be created
'''
return partner.commercial_partner_id
class mail_compose_message(models.Model):
_inherit = 'mail.compose.message'
@api.multi
def send_mail(self):
context = self._context
if context.get('default_model') == 'account.invoice' and \
context.get('default_res_id') and context.get('mark_invoice_as_sent'):
invoice = self.env['account.invoice'].browse(context['default_res_id'])
invoice = invoice.with_context(mail_post_autofollow=True)
invoice.write({'sent': True})
invoice.message_post(body=_("Invoice sent"))
return super(mail_compose_message, self).send_mail()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,695,336,161,790,248,000 | 46.929225 | 303 | 0.570497 | false |
bitcoinknots/bitcoin | test/functional/feature_help.py | 28 | 2499 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Verify that starting bitcoin with -h works as expected."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class HelpTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes)
# Don't start the node
def get_node_output(self, *, ret_code_expected):
ret_code = self.nodes[0].process.wait(timeout=5)
assert_equal(ret_code, ret_code_expected)
self.nodes[0].stdout.seek(0)
self.nodes[0].stderr.seek(0)
out = self.nodes[0].stdout.read()
err = self.nodes[0].stderr.read()
self.nodes[0].stdout.close()
self.nodes[0].stderr.close()
# Clean up TestNode state
self.nodes[0].running = False
self.nodes[0].process = None
self.nodes[0].rpc_connected = False
self.nodes[0].rpc = None
return out, err
def run_test(self):
self.log.info("Start bitcoin with -h for help text")
self.nodes[0].start(extra_args=['-h'])
# Node should exit immediately and output help to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'Options' in output
self.log.info("Help text received: {} (...)".format(output[0:60]))
self.log.info("Start bitcoin with -version for version information")
self.nodes[0].start(extra_args=['-version'])
# Node should exit immediately and output version to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'version' in output
self.log.info("Version text received: {} (...)".format(output[0:60]))
# Test that arguments not in the help results in an error
self.log.info("Start bitcoind with -fakearg to make sure it does not start")
self.nodes[0].start(extra_args=['-fakearg'])
# Node should exit immediately and output an error to stderr
_, output = self.get_node_output(ret_code_expected=1)
assert b'Error parsing command line arguments' in output
self.log.info("Error message received: {} (...)".format(output[0:60]))
if __name__ == '__main__':
HelpTest().main()
| mit | -5,410,731,314,816,989,000 | 39.306452 | 84 | 0.643858 | false |
haikuginger/urllib3 | test/with_dummyserver/test_https.py | 2 | 21320 | import datetime
import logging
import ssl
import sys
import unittest
import warnings
import mock
from nose.plugins.skip import SkipTest
from dummyserver.testcase import (
HTTPSDummyServerTestCase, IPV6HTTPSDummyServerTestCase
)
from dummyserver.server import (DEFAULT_CA, DEFAULT_CA_BAD, DEFAULT_CERTS,
NO_SAN_CERTS, NO_SAN_CA, DEFAULT_CA_DIR,
IPV6_ADDR_CERTS, IPV6_ADDR_CA, HAS_IPV6)
from test import (
onlyPy26OrOlder,
onlyPy279OrNewer,
requires_network,
TARPIT_HOST,
clear_warnings,
)
from urllib3 import HTTPSConnectionPool
from urllib3.connection import (
VerifiedHTTPSConnection,
UnverifiedHTTPSConnection,
RECENT_DATE,
)
from urllib3.exceptions import (
SSLError,
ReadTimeoutError,
ConnectTimeoutError,
InsecureRequestWarning,
SystemTimeWarning,
InsecurePlatformWarning,
)
from urllib3.packages import six
from urllib3.util.timeout import Timeout
import urllib3.util as util
ResourceWarning = getattr(
six.moves.builtins,
'ResourceWarning', type('ResourceWarning', (), {}))
log = logging.getLogger('urllib3.connectionpool')
log.setLevel(logging.NOTSET)
log.addHandler(logging.StreamHandler(sys.stdout))
class TestHTTPS(HTTPSDummyServerTestCase):
def setUp(self):
self._pool = HTTPSConnectionPool(self.host, self.port)
def test_simple(self):
r = self._pool.request('GET', '/')
self.assertEqual(r.status, 200, r.data)
def test_set_ssl_version_to_tlsv1(self):
self._pool.ssl_version = ssl.PROTOCOL_TLSv1
r = self._pool.request('GET', '/')
self.assertEqual(r.status, 200, r.data)
def test_verified(self):
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
conn = https_pool._new_conn()
self.assertEqual(conn.__class__, VerifiedHTTPSConnection)
with mock.patch('warnings.warn') as warn:
r = https_pool.request('GET', '/')
self.assertEqual(r.status, 200)
# Modern versions of Python, or systems using PyOpenSSL, don't
# emit warnings.
if sys.version_info >= (2, 7, 9) or util.IS_PYOPENSSL:
self.assertFalse(warn.called, warn.call_args_list)
else:
self.assertTrue(warn.called)
if util.HAS_SNI:
call = warn.call_args_list[0]
else:
call = warn.call_args_list[1]
error = call[0][1]
self.assertEqual(error, InsecurePlatformWarning)
def test_verified_with_context(self):
ctx = util.ssl_.create_urllib3_context(cert_reqs=ssl.CERT_REQUIRED)
ctx.load_verify_locations(cafile=DEFAULT_CA)
https_pool = HTTPSConnectionPool(self.host, self.port,
ssl_context=ctx)
conn = https_pool._new_conn()
self.assertEqual(conn.__class__, VerifiedHTTPSConnection)
with mock.patch('warnings.warn') as warn:
r = https_pool.request('GET', '/')
self.assertEqual(r.status, 200)
# Modern versions of Python, or systems using PyOpenSSL, don't
# emit warnings.
if sys.version_info >= (2, 7, 9) or util.IS_PYOPENSSL:
self.assertFalse(warn.called, warn.call_args_list)
else:
self.assertTrue(warn.called)
if util.HAS_SNI:
call = warn.call_args_list[0]
else:
call = warn.call_args_list[1]
error = call[0][1]
self.assertEqual(error, InsecurePlatformWarning)
def test_context_combines_with_ca_certs(self):
ctx = util.ssl_.create_urllib3_context(cert_reqs=ssl.CERT_REQUIRED)
https_pool = HTTPSConnectionPool(self.host, self.port,
ca_certs=DEFAULT_CA,
ssl_context=ctx)
conn = https_pool._new_conn()
self.assertEqual(conn.__class__, VerifiedHTTPSConnection)
with mock.patch('warnings.warn') as warn:
r = https_pool.request('GET', '/')
self.assertEqual(r.status, 200)
# Modern versions of Python, or systems using PyOpenSSL, don't
# emit warnings.
if sys.version_info >= (2, 7, 9) or util.IS_PYOPENSSL:
self.assertFalse(warn.called, warn.call_args_list)
else:
self.assertTrue(warn.called)
if util.HAS_SNI:
call = warn.call_args_list[0]
else:
call = warn.call_args_list[1]
error = call[0][1]
self.assertEqual(error, InsecurePlatformWarning)
@onlyPy279OrNewer
def test_ca_dir_verified(self):
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED',
ca_cert_dir=DEFAULT_CA_DIR)
conn = https_pool._new_conn()
self.assertEqual(conn.__class__, VerifiedHTTPSConnection)
with mock.patch('warnings.warn') as warn:
r = https_pool.request('GET', '/')
self.assertEqual(r.status, 200)
if sys.version_info >= (2, 7, 9):
self.assertFalse(warn.called, warn.call_args_list)
else:
self.assertTrue(warn.called)
call, = warn.call_args_list
error = call[0][1]
self.assertEqual(error, InsecurePlatformWarning)
def test_invalid_common_name(self):
https_pool = HTTPSConnectionPool('127.0.0.1', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
try:
https_pool.request('GET', '/')
self.fail("Didn't raise SSL invalid common name")
except SSLError as e:
self.assertTrue("doesn't match" in str(e))
def test_verified_with_bad_ca_certs(self):
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA_BAD)
try:
https_pool.request('GET', '/')
self.fail("Didn't raise SSL error with bad CA certs")
except SSLError as e:
self.assertTrue('certificate verify failed' in str(e),
"Expected 'certificate verify failed',"
"instead got: %r" % e)
def test_verified_without_ca_certs(self):
# default is cert_reqs=None which is ssl.CERT_NONE
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED')
try:
https_pool.request('GET', '/')
self.fail("Didn't raise SSL error with no CA certs when"
"CERT_REQUIRED is set")
except SSLError as e:
# there is a different error message depending on whether or
# not pyopenssl is injected
self.assertTrue('No root certificates specified' in str(e) or
'certificate verify failed' in str(e),
"Expected 'No root certificates specified' or "
"'certificate verify failed', "
"instead got: %r" % e)
def test_no_ssl(self):
pool = HTTPSConnectionPool(self.host, self.port)
pool.ConnectionCls = None
self.assertRaises(SSLError, pool._new_conn)
self.assertRaises(SSLError, pool.request, 'GET', '/')
def test_unverified_ssl(self):
""" Test that bare HTTPSConnection can connect, make requests """
pool = HTTPSConnectionPool(self.host, self.port)
pool.ConnectionCls = UnverifiedHTTPSConnection
with mock.patch('warnings.warn') as warn:
r = pool.request('GET', '/')
self.assertEqual(r.status, 200)
self.assertTrue(warn.called)
# Modern versions of Python, or systems using PyOpenSSL, only emit
# the unverified warning. Older systems may also emit other
# warnings, which we want to ignore here.
calls = warn.call_args_list
if sys.version_info >= (2, 7, 9) or util.IS_PYOPENSSL:
category = calls[0][0][1]
elif util.HAS_SNI:
category = calls[1][0][1]
else:
category = calls[2][0][1]
self.assertEqual(category, InsecureRequestWarning)
def test_ssl_unverified_with_ca_certs(self):
pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_NONE',
ca_certs=DEFAULT_CA_BAD)
with mock.patch('warnings.warn') as warn:
r = pool.request('GET', '/')
self.assertEqual(r.status, 200)
self.assertTrue(warn.called)
# Modern versions of Python, or systems using PyOpenSSL, only emit
# the unverified warning. Older systems may also emit other
# warnings, which we want to ignore here.
calls = warn.call_args_list
if sys.version_info >= (2, 7, 9) or util.IS_PYOPENSSL:
category = calls[0][0][1]
elif util.HAS_SNI:
category = calls[1][0][1]
else:
category = calls[2][0][1]
self.assertEqual(category, InsecureRequestWarning)
def test_assert_hostname_false(self):
https_pool = HTTPSConnectionPool('localhost', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_hostname = False
https_pool.request('GET', '/')
def test_assert_specific_hostname(self):
https_pool = HTTPSConnectionPool('localhost', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_hostname = 'localhost'
https_pool.request('GET', '/')
def test_assert_fingerprint_md5(self):
https_pool = HTTPSConnectionPool('localhost', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_fingerprint = 'F2:06:5A:42:10:3F:45:1C:17:FE:E6:' \
'07:1E:8A:86:E5'
https_pool.request('GET', '/')
def test_assert_fingerprint_sha1(self):
https_pool = HTTPSConnectionPool('localhost', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_fingerprint = '92:81:FE:85:F7:0C:26:60:EC:D6:B3:' \
'BF:93:CF:F9:71:CC:07:7D:0A'
https_pool.request('GET', '/')
def test_assert_fingerprint_sha256(self):
https_pool = HTTPSConnectionPool('localhost', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_fingerprint = ('C5:4D:0B:83:84:89:2E:AE:B4:58:BB:12:'
'F7:A6:C4:76:05:03:88:D8:57:65:51:F3:'
'1E:60:B0:8B:70:18:64:E6')
https_pool.request('GET', '/')
def test_assert_invalid_fingerprint(self):
https_pool = HTTPSConnectionPool('127.0.0.1', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_fingerprint = 'AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:' \
'AA:AA:AA:AA:AA:AA:AA:AA:AA'
self.assertRaises(SSLError, https_pool.request, 'GET', '/')
https_pool._get_conn()
# Uneven length
https_pool.assert_fingerprint = 'AA:A'
self.assertRaises(SSLError, https_pool.request, 'GET', '/')
https_pool._get_conn()
# Invalid length
https_pool.assert_fingerprint = 'AA'
self.assertRaises(SSLError, https_pool.request, 'GET', '/')
def test_verify_none_and_bad_fingerprint(self):
https_pool = HTTPSConnectionPool('127.0.0.1', self.port,
cert_reqs='CERT_NONE',
ca_certs=DEFAULT_CA_BAD)
https_pool.assert_fingerprint = 'AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:' \
'AA:AA:AA:AA:AA:AA:AA:AA:AA'
self.assertRaises(SSLError, https_pool.request, 'GET', '/')
def test_verify_none_and_good_fingerprint(self):
https_pool = HTTPSConnectionPool('127.0.0.1', self.port,
cert_reqs='CERT_NONE',
ca_certs=DEFAULT_CA_BAD)
https_pool.assert_fingerprint = '92:81:FE:85:F7:0C:26:60:EC:D6:B3:' \
'BF:93:CF:F9:71:CC:07:7D:0A'
https_pool.request('GET', '/')
def test_good_fingerprint_and_hostname_mismatch(self):
https_pool = HTTPSConnectionPool('127.0.0.1', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_fingerprint = '92:81:FE:85:F7:0C:26:60:EC:D6:B3:' \
'BF:93:CF:F9:71:CC:07:7D:0A'
https_pool.request('GET', '/')
@requires_network
def test_https_timeout(self):
timeout = Timeout(connect=0.001)
https_pool = HTTPSConnectionPool(TARPIT_HOST, self.port,
timeout=timeout, retries=False,
cert_reqs='CERT_REQUIRED')
timeout = Timeout(total=None, connect=0.001)
https_pool = HTTPSConnectionPool(TARPIT_HOST, self.port,
timeout=timeout, retries=False,
cert_reqs='CERT_REQUIRED')
self.assertRaises(ConnectTimeoutError, https_pool.request, 'GET', '/')
timeout = Timeout(read=0.001)
https_pool = HTTPSConnectionPool(self.host, self.port,
timeout=timeout, retries=False,
cert_reqs='CERT_REQUIRED')
https_pool.ca_certs = DEFAULT_CA
https_pool.assert_fingerprint = '92:81:FE:85:F7:0C:26:60:EC:D6:B3:' \
'BF:93:CF:F9:71:CC:07:7D:0A'
timeout = Timeout(total=None)
https_pool = HTTPSConnectionPool(self.host, self.port, timeout=timeout,
cert_reqs='CERT_NONE')
https_pool.request('GET', '/')
def test_tunnel(self):
""" test the _tunnel behavior """
timeout = Timeout(total=None)
https_pool = HTTPSConnectionPool(self.host, self.port, timeout=timeout,
cert_reqs='CERT_NONE')
conn = https_pool._new_conn()
try:
conn.set_tunnel(self.host, self.port)
except AttributeError: # python 2.6
conn._set_tunnel(self.host, self.port)
conn._tunnel = mock.Mock()
https_pool._make_request(conn, 'GET', '/')
conn._tunnel.assert_called_once_with()
@onlyPy26OrOlder
def test_tunnel_old_python(self):
"""HTTPSConnection can still make connections if _tunnel_host isn't set
The _tunnel_host attribute was added in 2.6.3 - because our test runners
generally use the latest Python 2.6, we simulate the old version by
deleting the attribute from the HTTPSConnection.
"""
conn = self._pool._new_conn()
del conn._tunnel_host
self._pool._make_request(conn, 'GET', '/')
@requires_network
def test_enhanced_timeout(self):
def new_pool(timeout, cert_reqs='CERT_REQUIRED'):
https_pool = HTTPSConnectionPool(TARPIT_HOST, self.port,
timeout=timeout,
retries=False,
cert_reqs=cert_reqs)
return https_pool
https_pool = new_pool(Timeout(connect=0.001))
conn = https_pool._new_conn()
self.assertRaises(ConnectTimeoutError, https_pool.request, 'GET', '/')
self.assertRaises(ConnectTimeoutError, https_pool._make_request, conn,
'GET', '/')
https_pool = new_pool(Timeout(connect=5))
self.assertRaises(ConnectTimeoutError, https_pool.request, 'GET', '/',
timeout=Timeout(connect=0.001))
t = Timeout(total=None)
https_pool = new_pool(t)
conn = https_pool._new_conn()
self.assertRaises(ConnectTimeoutError, https_pool.request, 'GET', '/',
timeout=Timeout(total=None, connect=0.001))
def test_enhanced_ssl_connection(self):
fingerprint = '92:81:FE:85:F7:0C:26:60:EC:D6:B3:BF:93:CF:F9:71:CC:07:7D:0A'
conn = VerifiedHTTPSConnection(self.host, self.port)
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED', ca_certs=DEFAULT_CA,
assert_fingerprint=fingerprint)
https_pool._make_request(conn, 'GET', '/')
def test_ssl_correct_system_time(self):
self._pool.cert_reqs = 'CERT_REQUIRED'
self._pool.ca_certs = DEFAULT_CA
w = self._request_without_resource_warnings('GET', '/')
self.assertEqual([], w)
def test_ssl_wrong_system_time(self):
self._pool.cert_reqs = 'CERT_REQUIRED'
self._pool.ca_certs = DEFAULT_CA
with mock.patch('urllib3.connection.datetime') as mock_date:
mock_date.date.today.return_value = datetime.date(1970, 1, 1)
w = self._request_without_resource_warnings('GET', '/')
self.assertEqual(len(w), 1)
warning = w[0]
self.assertEqual(SystemTimeWarning, warning.category)
self.assertTrue(str(RECENT_DATE) in warning.message.args[0])
def _request_without_resource_warnings(self, method, url):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self._pool.request(method, url)
return [x for x in w if not isinstance(x.message, ResourceWarning)]
class TestHTTPS_TLSv1(HTTPSDummyServerTestCase):
certs = DEFAULT_CERTS.copy()
certs['ssl_version'] = ssl.PROTOCOL_TLSv1
def setUp(self):
self._pool = HTTPSConnectionPool(self.host, self.port)
def test_set_ssl_version_to_sslv3(self):
self._pool.ssl_version = ssl.PROTOCOL_SSLv3
self.assertRaises(SSLError, self._pool.request, 'GET', '/')
def test_ssl_version_as_string(self):
self._pool.ssl_version = 'PROTOCOL_SSLv3'
self.assertRaises(SSLError, self._pool.request, 'GET', '/')
def test_ssl_version_as_short_string(self):
self._pool.ssl_version = 'SSLv3'
self.assertRaises(SSLError, self._pool.request, 'GET', '/')
def test_discards_connection_on_sslerror(self):
self._pool.cert_reqs = 'CERT_REQUIRED'
self.assertRaises(SSLError, self._pool.request, 'GET', '/')
self._pool.ca_certs = DEFAULT_CA
self._pool.request('GET', '/')
def test_set_cert_default_cert_required(self):
conn = VerifiedHTTPSConnection(self.host, self.port)
conn.set_cert(ca_certs=DEFAULT_CA)
self.assertEqual(conn.cert_reqs, 'CERT_REQUIRED')
class TestHTTPS_NoSAN(HTTPSDummyServerTestCase):
certs = NO_SAN_CERTS
def test_warning_for_certs_without_a_san(self):
"""Ensure that a warning is raised when the cert from the server has
no Subject Alternative Name."""
with mock.patch('warnings.warn') as warn:
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=NO_SAN_CA)
r = https_pool.request('GET', '/')
self.assertEqual(r.status, 200)
self.assertTrue(warn.called)
class TestHTTPS_IPv6Addr(IPV6HTTPSDummyServerTestCase):
certs = IPV6_ADDR_CERTS
def test_strip_square_brackets_before_validating(self):
"""Test that the fix for #760 works."""
if not HAS_IPV6:
raise SkipTest("Only runs on IPv6 systems")
https_pool = HTTPSConnectionPool('[::1]', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=IPV6_ADDR_CA)
r = https_pool.request('GET', '/')
self.assertEqual(r.status, 200)
if __name__ == '__main__':
unittest.main()
| mit | 1,013,436,522,286,272,900 | 39.455408 | 83 | 0.548921 | false |
jtyuan/racetrack | configs/My/se.py | 1 | 8964 | # Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Simple test script
#
# "m5 test.py"
import optparse
import sys
import os
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
import Options
import Ruby
import Simulation
import CacheConfig
import MemConfig
from Caches import *
from cpu2000 import *
import spec2006
def get_processes(options):
"""Interprets provided options and returns a list of processes"""
multiprocesses = []
inputs = []
outputs = []
errouts = []
pargs = []
workloads = options.cmd.split(';')
if options.input != "":
inputs = options.input.split(';')
if options.output != "":
outputs = options.output.split(';')
if options.errout != "":
errouts = options.errout.split(';')
if options.options != "":
pargs = options.options.split(';')
idx = 0
for wrkld in workloads:
process = LiveProcess()
process.executable = wrkld
process.cwd = os.getcwd()
if len(pargs) > idx:
process.cmd = [wrkld] + pargs[idx].split()
else:
process.cmd = [wrkld]
if len(inputs) > idx:
process.input = inputs[idx]
if len(outputs) > idx:
process.output = outputs[idx]
if len(errouts) > idx:
process.errout = errouts[idx]
multiprocesses.append(process)
idx += 1
if options.smt:
assert(options.cpu_type == "detailed" or options.cpu_type == "inorder")
return multiprocesses, idx
else:
return multiprocesses, 1
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
multiprocesses = []
numThreads = 1
if options.bench:
apps = options.bench.split("-")
if len(apps) != options.num_cpus:
print "number of benchmarks not equal to set num_cpus!"
sys.exit(1)
apps = options.bench.split("-")
for app in apps:
if app == 'bzip2':
process = spec2006.bzip2()
elif app == 'gcc':
process = spec2006.gcc()
elif app == 'bwaves':
process = spec2006.bwaves()
else:
print "unkown benchamarks"
sys.exit(1)
multiprocesses.append(process)
elif options.cmd:
multiprocesses, numThreads = get_processes(options)
else:
print >> sys.stderr, "No workload specified. Exiting!\n"
sys.exit(1)
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.numThreads = numThreads
# Check -- do not allow SMT with multiple CPUs
if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size)
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# Create a CPU voltage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a separate clock domain for the CPUs
system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
system.cpu_voltage_domain)
# All cpus belong to a common cpu_clk_domain, therefore running at a common
# frequency.
for cpu in system.cpu:
cpu.clk_domain = system.cpu_clk_domain
# Sanity check
if options.fastmem:
if CPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
if options.simpoint_profile:
if not options.fastmem:
# Atomic CPU checked with fastmem option already
fatal("SimPoint generation should be done with atomic cpu and fastmem")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
system.cpu[i].workload = multiprocesses[0]
else:
system.cpu[i].workload = multiprocesses[i]
if options.fastmem:
system.cpu[i].fastmem = True
if options.simpoint_profile:
system.cpu[i].simpoint_profile = True
system.cpu[i].simpoint_interval = options.simpoint_interval
if options.checker:
system.cpu[i].addCheckerCpu()
system.cpu[i].createThreads()
if options.ruby:
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
# Use SimpleMemory with the null option since this memory is only used
# for determining which addresses are within the range of the memory.
# No space allocation is required.
system.physmem = SimpleMemory(range=AddrRange(options.mem_size),
null = True)
options.use_map = True
Ruby.create_system(options, system)
assert(options.num_cpus == len(system.ruby._cpu_ports))
for i in xrange(np):
ruby_port = system.ruby._cpu_ports[i]
# Create the interrupt controller and connect its ports to Ruby
# Note that the interrupt controller is always present but only
# in x86 does it have message ports that need to be connected
system.cpu[i].createInterruptController()
# Connect the cpu's cache ports to Ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].interrupts.pio = ruby_port.master
system.cpu[i].interrupts.int_master = ruby_port.slave
system.cpu[i].interrupts.int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
else:
MemClass = Simulation.setMemClass(options)
system.membus = CoherentBus()
system.system_port = system.membus.slave
CacheConfig.config_cache(options, system)
MemConfig.config_mem(options, system)
root = Root(full_system = False, system = system)
Simulation.run(options, root, system, FutureClass)
| bsd-3-clause | 2,185,763,618,451,419,400 | 33.610039 | 79 | 0.680054 | false |
bpsinc-native/src_third_party_chromite | scripts/cros_list_modified_packages.py | 2 | 8815 | # Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Calculate what workon packages have changed since the last build.
A workon package is treated as changed if any of the below are true:
1) The package is not installed.
2) A file exists in the associated repository which has a newer modification
time than the installed package.
3) The source ebuild has a newer modification time than the installed package.
Some caveats:
- We do not look at eclasses. This replicates the existing behavior of the
commit queue, which also does not look at eclass changes.
- We do not try to fallback to the non-workon package if the local tree is
unmodified. This is probably a good thing, since developers who are
"working on" a package want to compile it locally.
- Portage only stores the time that a package finished building, so we
aren't able to detect when users modify source code during builds.
"""
import errno
import logging
import multiprocessing
import optparse
import os
try:
import Queue
except ImportError:
# Python-3 renamed to "queue". We still use Queue to avoid collisions
# with naming variables as "queue". Maybe we'll transition at some point.
# pylint: disable=F0401
import queue as Queue
from chromite.cbuildbot import constants
from chromite.cbuildbot import portage_utilities
from chromite.lib import cros_build_lib
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import parallel
class WorkonProjectsMonitor(object):
"""Class for monitoring the last modification time of workon projects.
Members:
_tasks: A list of the (project, path) pairs to check.
_result_queue: A queue. When GetProjectModificationTimes is called,
(project, mtime) tuples are pushed onto the end of this queue.
"""
def __init__(self, projects):
"""Create a new object for checking what projects were modified and when.
Args:
projects: A list of the project names we are interested in monitoring.
"""
manifest = git.ManifestCheckout.Cached(constants.SOURCE_ROOT)
self._tasks = []
for project in set(projects).intersection(manifest.checkouts_by_name):
for checkout in manifest.FindCheckouts(project):
self._tasks.append((project, checkout.GetPath(absolute=True)))
self._result_queue = multiprocessing.Queue(len(self._tasks))
def _EnqueueProjectModificationTime(self, project, path):
"""Calculate the last time that this project was modified, and enqueue it.
Args:
project: The project to look at.
path: The path associated with the specified project.
"""
if os.path.isdir(path):
self._result_queue.put((project, self._LastModificationTime(path)))
def _LastModificationTime(self, path):
"""Calculate the last time a directory subtree was modified.
Args:
path: Directory to look at.
"""
cmd = 'find . -name .git -prune -o -printf "%T@\n" | sort -nr | head -n1'
ret = cros_build_lib.RunCommand(cmd, cwd=path, shell=True, print_cmd=False,
capture_output=True)
return float(ret.output) if ret.output else 0
def GetProjectModificationTimes(self):
"""Get the last modification time of each specified project.
Returns:
A dictionary mapping project names to last modification times.
"""
task = self._EnqueueProjectModificationTime
parallel.RunTasksInProcessPool(task, self._tasks)
# Create a dictionary mapping project names to last modification times.
# All of the workon projects are already stored in the queue, so we can
# retrieve them all without waiting any longer.
mtimes = {}
while True:
try:
project, mtime = self._result_queue.get_nowait()
except Queue.Empty:
break
mtimes[project] = mtime
return mtimes
class WorkonPackageInfo(object):
"""Class for getting information about workon packages.
Members:
cp: The package name (e.g. chromeos-base/power_manager).
mtime: The modification time of the installed package.
project: The project associated with the installed package.
src_ebuild_mtime: The modification time of the source ebuild.
"""
def __init__(self, cp, mtime, projects, src_ebuild_mtime):
self.cp = cp
self.pkg_mtime = int(mtime)
self.projects = projects
self.src_ebuild_mtime = src_ebuild_mtime
def ListWorkonPackages(board, host, all_opt=False):
"""List the packages that are currently being worked on.
Args:
board: The board to look at. If host is True, this should be set to None.
host: Whether to look at workon packages for the host.
all_opt: Pass --all to cros_workon. For testing purposes.
"""
cmd = [os.path.join(constants.CROSUTILS_DIR, 'cros_workon'), 'list']
cmd.extend(['--host'] if host else ['--board', board])
if all_opt:
cmd.append('--all')
result = cros_build_lib.RunCommand(cmd, print_cmd=False, capture_output=True)
return result.output.split()
def ListWorkonPackagesInfo(board, host):
"""Find the specified workon packages for the specified board.
Args:
board: The board to look at. If host is True, this should be set to None.
host: Whether to look at workon packages for the host.
Returns:
A list of unique packages being worked on.
"""
# Import portage late so that this script can be imported outside the chroot.
# pylint: disable=F0401
import portage.const
packages = ListWorkonPackages(board, host)
if not packages:
return []
results = {}
install_root = cros_build_lib.GetSysroot(board=board)
vdb_path = os.path.join(install_root, portage.const.VDB_PATH)
buildroot, both = constants.SOURCE_ROOT, constants.BOTH_OVERLAYS
for overlay in portage_utilities.FindOverlays(both, board, buildroot):
for filename, projects in portage_utilities.GetWorkonProjectMap(overlay,
packages):
# chromeos-base/power_manager/power_manager-9999
# cp = chromeos-base/power_manager
# cpv = chromeos-base/power_manager-9999
category, pn, p = portage_utilities.SplitEbuildPath(filename)
cp = '%s/%s' % (category, pn)
cpv = '%s/%s' % (category, p)
# Get the time the package finished building. TODO(build): Teach Portage
# to store the time the package started building and use that here.
pkg_mtime_file = os.path.join(vdb_path, cpv, 'BUILD_TIME')
try:
pkg_mtime = int(osutils.ReadFile(pkg_mtime_file))
except EnvironmentError as ex:
if ex.errno != errno.ENOENT:
raise
pkg_mtime = 0
# Get the modificaton time of the ebuild in the overlay.
src_ebuild_mtime = os.lstat(os.path.join(overlay, filename)).st_mtime
# Write info into the results dictionary, overwriting any previous
# values. This ensures that overlays override appropriately.
results[cp] = WorkonPackageInfo(cp, pkg_mtime, projects, src_ebuild_mtime)
return results.values()
def ListModifiedWorkonPackages(board, host):
"""List the workon packages that need to be rebuilt.
Args:
board: The board to look at. If host is True, this should be set to None.
host: Whether to look at workon packages for the host.
"""
packages = ListWorkonPackagesInfo(board, host)
if packages:
projects = []
for info in packages:
projects.extend(info.projects)
mtimes = WorkonProjectsMonitor(projects).GetProjectModificationTimes()
for info in packages:
mtime = int(max([mtimes.get(p, 0) for p in info.projects] +
[info.src_ebuild_mtime]))
if mtime >= info.pkg_mtime:
yield info.cp
def _ParseArguments(argv):
parser = optparse.OptionParser(usage='USAGE: %prog [options]')
parser.add_option('--board', default=None,
dest='board',
help='Board name')
parser.add_option('--host', default=False,
dest='host', action='store_true',
help='Look at host packages instead of board packages')
flags, remaining_arguments = parser.parse_args(argv)
if not flags.board and not flags.host:
parser.print_help()
cros_build_lib.Die('--board or --host is required')
if flags.board is not None and flags.host:
parser.print_help()
cros_build_lib.Die('--board and --host are mutually exclusive')
if remaining_arguments:
parser.print_help()
cros_build_lib.Die('Invalid arguments')
return flags
def main(argv):
logging.getLogger().setLevel(logging.INFO)
flags = _ParseArguments(argv)
modified = ListModifiedWorkonPackages(flags.board, flags.host)
print ' '.join(sorted(modified))
| bsd-3-clause | -8,551,598,722,907,524,000 | 35.882845 | 80 | 0.693364 | false |
Jumpscale/web | pythonlib/eve/tests/utils.py | 1 | 11277 | # -*- coding: utf-8 -*-
import copy
import hashlib
from bson.json_util import dumps
from datetime import datetime, timedelta
from eve.tests import TestBase
from eve.utils import parse_request, str_to_date, config, weak_date, \
date_to_str, querydef, document_etag, extract_key_values, \
debug_error_message, resource_uri, home_uri
class TestUtils(TestBase):
""" collection, document and home_link methods (and resource_uri, which is
used by all of them) are tested in 'tests.methods' since we need an active
flaskapp context
"""
def setUp(self):
super(TestUtils, self).setUp()
self.dt_fmt = config.DATE_FORMAT
self.datestr = 'Tue, 18 Sep 2012 10:12:30 GMT'
self.valid = datetime.strptime(self.datestr, self.dt_fmt)
self.etag = '56eaadbbd9fa287e7270cf13a41083c94f52ab9b'
def test_parse_request_where(self):
self.app.config['DOMAIN'][self.known_resource]['allowed_filters'] = \
['ref']
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).where, None)
with self.app.test_request_context('/?where=hello'):
self.assertEqual(parse_request(self.known_resource).where, 'hello')
def test_parse_request_sort(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).sort, None)
with self.app.test_request_context('/?sort=hello'):
self.assertEqual(parse_request(self.known_resource).sort, 'hello')
def test_parse_request_page(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=2'):
self.assertEqual(parse_request(self.known_resource).page, 2)
with self.app.test_request_context('/?page=-1'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=0'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=1.1'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=string'):
self.assertEqual(parse_request(self.known_resource).page, 1)
def test_parse_request_max_results(self):
default = config.PAGINATION_DEFAULT
limit = config.PAGINATION_LIMIT
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=%d' % (limit + 1)):
self.assertEqual(parse_request(self.known_resource).max_results,
limit)
with self.app.test_request_context('/?max_results=2'):
self.assertEqual(parse_request(self.known_resource).max_results, 2)
with self.app.test_request_context('/?max_results=-1'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=0'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=1.1'):
self.assertEqual(parse_request(self.known_resource).max_results, 1)
with self.app.test_request_context('/?max_results=string'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
def test_parse_request_max_results_disabled_pagination(self):
self.app.config['DOMAIN'][self.known_resource]['pagination'] = False
default = 0
limit = config.PAGINATION_LIMIT
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=%d' % (limit + 1)):
self.assertEqual(parse_request(self.known_resource).max_results,
limit + 1)
with self.app.test_request_context('/?max_results=2'):
self.assertEqual(parse_request(self.known_resource).max_results, 2)
with self.app.test_request_context('/?max_results=-1'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=0'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=1.1'):
self.assertEqual(parse_request(self.known_resource).max_results, 1)
with self.app.test_request_context('/?max_results=string'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
def test_parse_request_if_modified_since(self):
ims = 'If-Modified-Since'
with self.app.test_request_context():
self.assertEqual(parse_request(
self.known_resource).if_modified_since, None)
with self.app.test_request_context(headers=None):
self.assertEqual(
parse_request(self.known_resource).if_modified_since, None)
with self.app.test_request_context(headers={ims: self.datestr}):
self.assertEqual(
parse_request(self.known_resource).if_modified_since,
self.valid + timedelta(seconds=1))
with self.app.test_request_context(headers={ims: 'not-a-date'}):
self.assertRaises(ValueError, parse_request, self.known_resource)
with self.app.test_request_context(
headers={ims:
self.datestr.replace('GMT', 'UTC')}):
self.assertRaises(ValueError, parse_request, self.known_resource)
self.assertRaises(ValueError, parse_request, self.known_resource)
def test_parse_request_if_none_match(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).if_none_match,
None)
with self.app.test_request_context(headers=None):
self.assertEqual(parse_request(self.known_resource).if_none_match,
None)
with self.app.test_request_context(headers={'If-None-Match':
self.etag}):
self.assertEqual(parse_request(self.known_resource).if_none_match,
self.etag)
def test_parse_request_if_match(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).if_match, None)
with self.app.test_request_context(headers=None):
self.assertEqual(parse_request(self.known_resource).if_match, None)
with self.app.test_request_context(headers={'If-Match': self.etag}):
self.assertEqual(parse_request(self.known_resource).if_match,
self.etag)
def test_weak_date(self):
self.assertEqual(weak_date(self.datestr), self.valid +
timedelta(seconds=1))
def test_str_to_date(self):
self.assertEqual(str_to_date(self.datestr), self.valid)
self.assertRaises(ValueError, str_to_date, 'not-a-date')
self.assertRaises(ValueError, str_to_date,
self.datestr.replace('GMT', 'UTC'))
def test_date_to_str(self):
self.assertEqual(date_to_str(self.valid), self.datestr)
def test_querydef(self):
self.assertEqual(querydef(max_results=10), '?max_results=10')
self.assertEqual(querydef(page=10), '?page=10')
self.assertEqual(querydef(where='wherepart'), '?where=wherepart')
self.assertEqual(querydef(sort='sortpart'), '?sort=sortpart')
self.assertEqual(querydef(where='wherepart', sort='sortpart'),
'?where=wherepart&sort=sortpart')
self.assertEqual(querydef(max_results=10, sort='sortpart'),
'?max_results=10&sort=sortpart')
def test_document_etag(self):
test = {'key1': 'value1', 'another': 'value2'}
challenge = dumps(test, sort_keys=True).encode('utf-8')
self.assertEqual(hashlib.sha1(challenge).hexdigest(),
document_etag(test))
def test_extract_key_values(self):
test = {
'key1': 'value1',
'key2': {
'key1': 'value2',
'nested': {
'key1': 'value3'
}
}
}
self.assertEqual(list(extract_key_values('key1', test)),
['value1', 'value2', 'value3'])
def test_debug_error_message(self):
with self.app.test_request_context():
self.app.config['DEBUG'] = False
self.assertEqual(debug_error_message('An error message'), None)
self.app.config['DEBUG'] = True
self.assertEqual(debug_error_message('An error message'),
'An error message')
def test_resource_uri(self):
with self.app.test_request_context():
self.app.config['URL_PROTOCOL'] = 'http'
self.app.config['SERVER_NAME'] = '0.0.0.0:5000'
self.assertEqual(resource_uri('users'),
'http://0.0.0.0:5000/users')
def test_home_uri(self):
with self.app.test_request_context():
self.app.config['URL_PROTOCOL'] = 'http'
self.app.config['SERVER_NAME'] = '0.0.0.0:5000'
self.assertEqual(home_uri(), 'http://0.0.0.0:5000')
class DummyEvent(object):
"""
Even handler that records the call parameters and asserts a check
Usage::
app = Eve()
app.on_my_event = DummyEvent(element_not_deleted)
In the test::
assert app.on_my_event.called[0] == expected_param_0
"""
def __init__(self, check, deepcopy=False):
"""
:param check: method checking the state of something during the event.
:type: check: callable returning bool
:param deepcopy: Do we need to store a copy of the argument calls? In
some events arguments are changed after the event, so keeping a
reference to the original object doesn't allow a test to check what
was passed. The default is False.
:type deepcopy: bool
"""
self.__called = None
self.__check = check
self.__deepcopy = deepcopy
def __call__(self, *args):
assert self.__check()
# In some method the arguments are changed after the events
if self.__deepcopy:
args = copy.deepcopy(args)
self.__called = args
@property
def called(self):
"""
The results of the call to the event.
:rtype: It returns None if the event hasn't been called or a tuple with
the positional arguments of the last call if called.
"""
return self.__called
| apache-2.0 | 8,861,762,488,242,893,000 | 44.289157 | 79 | 0.601224 | false |
nistormihai/superdesk-core | superdesk/io/feeding_services/email.py | 3 | 3126 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import socket
import imaplib
from flask import current_app as app
from superdesk.errors import IngestEmailError
from superdesk.io.registry import register_feeding_service
from superdesk.io.feeding_services import FeedingService
from superdesk.upload import url_for_media
class EmailFeedingService(FeedingService):
"""
Feeding Service class which can read the article(s) from a configured mail box.
"""
NAME = 'email'
ERRORS = [IngestEmailError.emailError().get_error_description(),
IngestEmailError.emailLoginError().get_error_description()]
label = 'Email'
def _test(self, provider):
self._update(provider, update=None, test=True)
def _update(self, provider, update, test=False):
config = provider.get('config', {})
server = config.get('server', '')
port = int(config.get('port', 993))
new_items = []
try:
try:
socket.setdefaulttimeout(app.config.get('EMAIL_TIMEOUT', 10))
imap = imaplib.IMAP4_SSL(host=server, port=port)
except (socket.gaierror, OSError) as e:
raise IngestEmailError.emailHostError(exception=e)
try:
imap.login(config.get('user', None), config.get('password', None))
except imaplib.IMAP4.error:
raise IngestEmailError.emailLoginError(imaplib.IMAP4.error, provider)
try:
rv, data = imap.select(config.get('mailbox', None), readonly=False)
if rv != 'OK':
raise IngestEmailError.emailMailboxError()
try:
rv, data = imap.search(None, config.get('filter', '(UNSEEN)'))
if rv != 'OK':
raise IngestEmailError.emailFilterError()
for num in data[0].split():
rv, data = imap.fetch(num, '(RFC822)')
if rv == 'OK' and not test:
try:
parser = self.get_feed_parser(provider, data)
new_items.append(parser.parse(data, provider))
rv, data = imap.store(num, '+FLAGS', '\\Seen')
except IngestEmailError:
continue
finally:
imap.close()
finally:
imap.logout()
except IngestEmailError:
raise
except Exception as ex:
raise IngestEmailError.emailError(ex, provider)
return new_items
def prepare_href(self, href, mimetype=None):
return url_for_media(href, mimetype)
register_feeding_service(EmailFeedingService.NAME, EmailFeedingService(), EmailFeedingService.ERRORS)
| agpl-3.0 | -4,501,573,990,264,255,500 | 36.214286 | 101 | 0.578375 | false |
chassing/pytest-asyncio | setup.py | 2 | 1457 | import codecs
import os
import re
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
def read(*parts):
here = os.path.abspath(os.path.dirname(__file__))
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='pytest-asyncio',
version=find_version('pytest_asyncio', '__init__.py'),
packages=find_packages(),
url='https://github.com/pytest-dev/pytest-asyncio',
license='Apache 2.0',
author='Tin Tvrtkovic',
author_email='[email protected]',
description='Pytest support for asyncio.',
long_description=readme,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Testing",
],
install_requires=[
'pytest',
],
extras_require={
':python_version == "3.3"': ['asyncio']
},
entry_points={
'pytest11': ['asyncio = pytest_asyncio.plugin'],
},
)
| apache-2.0 | 8,396,818,782,576,419,000 | 27.568627 | 68 | 0.597117 | false |
onkelpit/i3pystatus | i3pystatus/pomodoro.py | 6 | 3405 | import os
import subprocess
import locale
from datetime import datetime, timedelta
from i3pystatus import IntervalModule
class Pomodoro(IntervalModule):
"""
This plugin shows Pomodoro timer.
Left click starts/restarts timer.
Right click stops it.
"""
settings = (
('sound',
'Path to sound file to play as alarm. Played by "aplay" utility'),
('pomodoro_duration',
'Working (pomodoro) interval duration in seconds'),
('break_duration', 'Short break duration in seconds'),
('long_break_duration', 'Long break duration in seconds'),
('short_break_count', 'Short break count before first long break'),
('format', 'format string, available formatters: current_pomodoro, '
'total_pomodoro, time')
)
required = ('sound',)
color_stopped = '#2ECCFA'
color_running = '#FFFF00'
color_break = '#37FF00'
interval = 1
short_break_count = 3
format = '☯ {current_pomodoro}/{total_pomodoro} {time}'
pomodoro_duration = 25 * 60
break_duration = 5 * 60
long_break_duration = 15 * 60
on_rightclick = "stop"
on_leftclick = "start"
def init(self):
# state could be either running/break or stopped
self.state = 'stopped'
self.breaks = 0
self.time = None
def run(self):
if self.time and datetime.now() >= self.time:
if self.state == 'running':
self.state = 'break'
if self.breaks == self.short_break_count:
self.time = datetime.now() + \
timedelta(seconds=self.long_break_duration)
self.breaks = 0
else:
self.time = datetime.now() + \
timedelta(seconds=self.break_duration)
self.breaks += 1
text = 'Go for a break!'
else:
self.state = 'running'
self.time = datetime.now() + \
timedelta(seconds=self.pomodoro_duration)
text = 'Back to work!'
self._alarm(text)
if self.state == 'running' or self.state == 'break':
min, sec = divmod((self.time - datetime.now()).total_seconds(), 60)
text = '{:02}:{:02}'.format(int(min), int(sec))
color = self.color_running if self.state == 'running' else self.color_break
else:
self.output = {
'full_text': 'Stopped',
'color': self.color_stopped
}
return
sdict = {
'time': text,
'current_pomodoro': self.breaks,
'total_pomodoro': self.short_break_count + 1,
}
self.output = {
'full_text': self.format.format(**sdict),
'color': color
}
def start(self):
self.state = 'running'
self.time = datetime.now() + timedelta(seconds=self.pomodoro_duration)
self.breaks = 0
def stop(self):
self.state = 'stopped'
self.time = None
def _alarm(self, text):
subprocess.call(['notify-send',
'Alarm!',
text])
subprocess.Popen(['aplay',
self.sound,
'-q'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
| mit | 238,373,914,549,527,940 | 30.220183 | 87 | 0.523656 | false |
petroswork/pydantic | pydantic/utils.py | 1 | 3273 | import re
import email_validator
from importlib import import_module
from typing import Tuple
PRETTY_REGEX = re.compile(r'([\w ]*?) *<(.*)> *')
def validate_email(value) -> Tuple[str, str]:
"""
Brutally simple email address validation. Note unlike most email address validation
* raw ip address (literal) domain parts are not allowed.
* "John Doe <[email protected]>" style "pretty" email addresses are processed
* the local part check is extremely basic. This raises the possibility of unicode spoofing, but no better
solution is really possible.
* spaces are striped from the beginning and end of addresses but no error is raised
See RFC 5322 but treat it with suspicion, there seems to exist no universally acknowledged test for a valid email!
"""
m = PRETTY_REGEX.fullmatch(value)
if m:
name, value = m.groups()
else:
name = None
email = value.strip()
try:
email_validator.validate_email(email, check_deliverability=False)
except email_validator.EmailSyntaxError as e:
raise ValueError('Email address is not valid')
return name or email[:email.index('@')], email.lower()
def _rfc_1738_quote(text):
return re.sub(r'[:@/]', lambda m: '%{:X}'.format(ord(m.group(0))), text)
def make_dsn(*,
driver: str,
user: str=None,
password: str=None,
host: str=None,
port: str=None,
name: str=None,
query: str=None):
"""
Create a DSN from from connection settings.
Stolen approximately from sqlalchemy/engine/url.py:URL.
"""
s = driver + '://'
if user is not None:
s += _rfc_1738_quote(user)
if password is not None:
s += ':' + _rfc_1738_quote(password)
s += '@'
if host is not None:
if ':' in host:
s += '[{}]'.format(host)
else:
s += host
if port is not None:
s += ':{}'.format(int(port))
if name is not None:
s += '/' + name
query = query or {}
if query:
keys = list(query)
keys.sort()
s += '?' + '&'.join('{}={}'.format(k, query[k]) for k in keys)
return s
def import_string(dotted_path):
"""
Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import fails.
"""
try:
module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)
except ValueError as e:
raise ImportError(f'"{dotted_path}" doesn\'t look like a module path') from e
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as e:
raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute') from e
def truncate(v, *, max_len=80):
"""
Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long
"""
if isinstance(v, str) and len(v) > (max_len - 2):
# -3 so quote + string + … + quote has correct length
return repr(v[:(max_len - 3)] + '…')
v = repr(v)
if len(v) > max_len:
v = v[:max_len - 1] + '…'
return v
| mit | 2,759,232,664,444,075,500 | 31.346535 | 118 | 0.595654 | false |
defaultnamehere/grr | lib/flows/cron/filestore_stats_test.py | 3 | 3088 | #!/usr/bin/env python
"""Tests for grr.lib.flows.cron.filestore_stats."""
# pylint: disable=unused-import, g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import, g-bad-import-order
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.flows.cron import filestore_stats
class FilestoreStatsCronFlowTest(test_lib.FlowTestsBaseclass):
def setUp(self):
super(FilestoreStatsCronFlowTest, self).setUp()
for i in range(0, 10):
newfd = aff4.FACTORY.Create("aff4:/files/hash/generic/sha256/fsi%s" % i,
"FileStoreImage", token=self.token)
newfd.size = i * 1e6
for j in range(0, i):
newfd.AddIndex("aff4:/C.000000000000000%s/fs/os/blah%s" % (j, j))
newfd.Close()
newfd = aff4.FACTORY.Create("aff4:/files/hash/generic/sha256/blobbig",
"FileStoreImage", token=self.token)
newfd.size = 1e12
newfd.AddIndex("aff4:/C.0000000000000001/fs/os/1")
newfd.AddIndex("aff4:/C.0000000000000001/fs/os/2")
newfd.Close()
newfd = aff4.FACTORY.Create("aff4:/files/hash/generic/sha256/blobtiny",
"FileStoreImage", token=self.token)
newfd.size = 12
newfd.AddIndex("aff4:/C.0000000000000001/fs/os/1")
newfd.Close()
def testFileTypes(self):
for _ in test_lib.TestFlowHelper("FilestoreStatsCronFlow",
token=self.token):
pass
fd = aff4.FACTORY.Open(
filestore_stats.FilestoreStatsCronFlow.FILESTORE_STATS_URN,
token=self.token)
filetypes = fd.Get(fd.Schema.FILESTORE_FILETYPES)
self.assertEqual(len(filetypes), 1)
self.assertEqual(filetypes.data[0].label, "FileStoreImage")
self.assertEqual(filetypes.data[0].y_value, 12)
filetype_size = fd.Get(fd.Schema.FILESTORE_FILETYPES_SIZE)
self.assertEqual(filetype_size.data[0].label, "FileStoreImage")
self.assertEqual(filetype_size.data[0].y_value, 931.364501953125)
filesizes = fd.Get(fd.Schema.FILESTORE_FILESIZE_HISTOGRAM)
self.assertEqual(filesizes.data[0].x_value, 0)
self.assertEqual(filesizes.data[0].y_value, 1)
self.assertEqual(filesizes.data[1].x_value, 2)
self.assertEqual(filesizes.data[1].y_value, 1)
self.assertEqual(filesizes.data[8].x_value, 1000000)
self.assertEqual(filesizes.data[8].y_value, 4)
self.assertEqual(filesizes.data[9].x_value, 5000000)
self.assertEqual(filesizes.data[9].y_value, 5)
self.assertEqual(filesizes.data[-1].y_value, 1)
clientcount = fd.Get(fd.Schema.FILESTORE_CLIENTCOUNT_HISTOGRAM)
self.assertEqual(clientcount.data[0].x_value, 0)
self.assertEqual(clientcount.data[0].y_value, 1)
self.assertEqual(clientcount.data[1].x_value, 1)
self.assertEqual(clientcount.data[1].y_value, 6)
self.assertEqual(clientcount.data[2].x_value, 5)
self.assertEqual(clientcount.data[2].y_value, 5)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 | 672,511,809,003,904,600 | 36.204819 | 78 | 0.680376 | false |
Subsets and Splits