repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
nycholas/ask-undrgz | src/ask-undrgz/django/contrib/gis/tests/geoapp/feeds.py | 326 | 1856 | from django.contrib.gis import feeds
from django.contrib.gis.tests.utils import mysql
from models import City, Country
class TestGeoRSS1(feeds.Feed):
link = '/city/'
title = 'Test GeoDjango Cities'
def items(self):
return City.objects.all()
def item_link(self, item):
return '/city/%s/' % item.pk
def item_geometry(self, item):
return item.point
class TestGeoRSS2(TestGeoRSS1):
def geometry(self, obj):
# This should attach a <georss:box> element for the extent of
# of the cities in the database. This tuple came from
# calling `City.objects.extent()` -- we can't do that call here
# because `extent` is not implemented for MySQL/Oracle.
return (-123.30, -41.32, 174.78, 48.46)
def item_geometry(self, item):
# Returning a simple tuple for the geometry.
return item.point.x, item.point.y
class TestGeoAtom1(TestGeoRSS1):
feed_type = feeds.GeoAtom1Feed
class TestGeoAtom2(TestGeoRSS2):
feed_type = feeds.GeoAtom1Feed
def geometry(self, obj):
# This time we'll use a 2-tuple of coordinates for the box.
return ((-123.30, -41.32), (174.78, 48.46))
class TestW3CGeo1(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
# The following feeds are invalid, and will raise exceptions.
class TestW3CGeo2(TestGeoRSS2):
feed_type = feeds.W3CGeoFeed
class TestW3CGeo3(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
def item_geometry(self, item):
from django.contrib.gis.geos import Polygon
return Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
# The feed dictionary to use for URLs.
feed_dict = {
'rss1' : TestGeoRSS1,
'rss2' : TestGeoRSS2,
'atom1' : TestGeoAtom1,
'atom2' : TestGeoAtom2,
'w3cgeo1' : TestW3CGeo1,
'w3cgeo2' : TestW3CGeo2,
'w3cgeo3' : TestW3CGeo3,
}
| bsd-3-clause | 7,496,078,380,103,000,000 | 28.460317 | 71 | 0.656789 | false |
DimensionDataCBUSydney/libcloud | libcloud/test/test_file_fixtures.py | 50 | 1229 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.test.file_fixtures import ComputeFileFixtures
class FileFixturesTests(unittest.TestCase):
def test_success(self):
f = ComputeFileFixtures('meta')
self.assertEqual("Hello, World!", f.load('helloworld.txt'))
def test_failure(self):
f = ComputeFileFixtures('meta')
self.assertRaises(IOError, f.load, 'nil')
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | 5,021,350,735,820,555,000 | 37.40625 | 74 | 0.738812 | false |
jazkarta/edx-platform-for-isc | cms/djangoapps/contentstore/management/commands/git_export.py | 18 | 2804 | """
This command exports a course from CMS to a git repository.
It takes as arguments the course id to export (i.e MITx/999/2020 ) and
the repository to commit too. It takes username as an option for identifying
the commit, as well as a directory path to place the git repository.
By default it will use settings.GIT_REPO_EXPORT_DIR/repo_name as the cloned
directory. It is branch aware, but will reset all local changes to the
repository before attempting to export the XML, add, and commit changes if
any have taken place.
This functionality is also available as an export view in studio if the giturl
attribute is set and the FEATURE['ENABLE_EXPORT_GIT'] is set.
"""
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
import contentstore.git_export_utils as git_export_utils
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys import InvalidKeyError
from contentstore.git_export_utils import GitExportError
from opaque_keys.edx.keys import CourseKey
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Take a course from studio and export it to a git repository.
"""
option_list = BaseCommand.option_list + (
make_option('--username', '-u', dest='user',
help=('Specify a username from LMS/Studio to be used '
'as the commit author.')),
make_option('--repo_dir', '-r', dest='repo',
help='Specify existing git repo directory.'),
)
help = _('Take the specified course and attempt to '
'export it to a git repository\n. Course directory '
'must already be a git repository. Usage: '
' git_export <course_loc> <git_url>')
def handle(self, *args, **options):
"""
Checks arguments and runs export function if they are good
"""
if len(args) != 2:
raise CommandError('This script requires exactly two arguments: '
'course_loc and git_url')
# Rethrow GitExportError as CommandError for SystemExit
try:
course_key = CourseKey.from_string(args[0])
except InvalidKeyError:
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(args[0])
except InvalidKeyError:
raise CommandError(_(GitExportError.BAD_COURSE))
try:
git_export_utils.export_to_git(
course_key,
args[1],
options.get('user', ''),
options.get('rdir', None)
)
except git_export_utils.GitExportError as ex:
raise CommandError(_(ex.message))
| agpl-3.0 | 2,256,731,160,365,950,700 | 36.386667 | 84 | 0.646576 | false |
mrshelly/openerp71313 | openerp/addons/mail/tests/test_mail_message.py | 1 | 23446 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.test_mail_base import TestMailBase
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class test_mail_access_rights(TestMailBase):
def setUp(self):
super(test_mail_access_rights, self).setUp()
cr, uid = self.cr, self.uid
# Test mail.group: public to provide access to everyone
self.group_jobs_id = self.mail_group.create(cr, uid, {'name': 'Jobs', 'public': 'public'})
# Test mail.group: private to restrict access
self.group_priv_id = self.mail_group.create(cr, uid, {'name': 'Private', 'public': 'private'})
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_00_mail_group_access_rights(self):
""" Testing mail_group access rights and basic mail_thread features """
cr, uid, user_bert_id, user_raoul_id = self.cr, self.uid, self.user_bert_id, self.user_raoul_id
# Do: Bert reads Jobs -> ok, public
self.mail_group.read(cr, user_bert_id, [self.group_jobs_id])
# Do: Bert read Pigs -> ko, restricted to employees
self.assertRaises(except_orm, self.mail_group.read,
cr, user_bert_id, [self.group_pigs_id])
# Do: Raoul read Pigs -> ok, belong to employees
self.mail_group.read(cr, user_raoul_id, [self.group_pigs_id])
# Do: Bert creates a group -> ko, no access rights
self.assertRaises(except_orm, self.mail_group.create,
cr, user_bert_id, {'name': 'Test'})
# Do: Raoul creates a restricted group -> ok
new_group_id = self.mail_group.create(cr, user_raoul_id, {'name': 'Test'})
# Do: Bert added in followers, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [new_group_id], [user_bert_id])
self.mail_group.read(cr, user_bert_id, [new_group_id])
# Do: Raoul reads Priv -> ko, private
self.assertRaises(except_orm, self.mail_group.read,
cr, user_raoul_id, [self.group_priv_id])
# Do: Raoul added in follower, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [self.group_priv_id], [user_raoul_id])
self.mail_group.read(cr, user_raoul_id, [self.group_priv_id])
# Do: Raoul write on Jobs -> ok
self.mail_group.write(cr, user_raoul_id, [self.group_priv_id], {'name': 'modified'})
# Do: Bert cannot write on Private -> ko (read but no write)
self.assertRaises(except_orm, self.mail_group.write,
cr, user_bert_id, [self.group_priv_id], {'name': 're-modified'})
# Test: Bert cannot unlink the group
self.assertRaises(except_orm,
self.mail_group.unlink,
cr, user_bert_id, [self.group_priv_id])
# Do: Raoul unlinks the group, there are no followers and messages left
self.mail_group.unlink(cr, user_raoul_id, [self.group_priv_id])
fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(fol_ids, 'unlinked document should not have any followers left')
msg_ids = self.mail_message.search(cr, uid, [('model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(msg_ids, 'unlinked document should not have any followers left')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_10_mail_message_search_access_rights(self):
""" Testing mail_message.search() using specific _search implementation """
cr, uid, group_pigs_id = self.cr, self.uid, self.group_pigs_id
# Data: comment subtype for mail.message creation
ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'mail', 'mt_comment')
subtype_id = ref and ref[1] or False
# Data: Birds group, private
group_birds_id = self.mail_group.create(self.cr, self.uid, {'name': 'Birds', 'public': 'private'})
# Data: Raoul is member of Pigs
self.mail_group.message_subscribe(cr, uid, [group_pigs_id], [self.partner_raoul_id])
# Data: various author_ids, partner_ids, documents
msg_id1 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A', 'subtype_id': subtype_id})
msg_id2 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B', 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})
msg_id3 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'subtype_id': subtype_id})
msg_id4 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})
msg_id5 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+R Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})
msg_id6 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Birds', 'model': 'mail.group', 'res_id': group_birds_id, 'subtype_id': subtype_id})
msg_id7 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B', 'subtype_id': subtype_id})
msg_id8 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B+R', 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})
# Test: Bert: 2 messages that have Bert in partner_ids
msg_ids = self.mail_message.search(cr, self.user_bert_id, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id2, msg_id4]), set(msg_ids), 'mail_message search failed')
# Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group)
msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test'), ('body', 'like', 'A')])
self.assertEqual(set([msg_id3, msg_id4, msg_id5]), set(msg_ids), 'mail_message search failed')
# Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group) + 2 messages as author
msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id3, msg_id4, msg_id5, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')
# Test: Admin: all messages
msg_ids = self.mail_message.search(cr, uid, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id1, msg_id2, msg_id3, msg_id4, msg_id5, msg_id6, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_15_mail_message_check_access_rule(self):
""" Testing mail_message.check_access_rule() """
cr, uid = self.cr, self.uid
partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id
user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id
# Prepare groups: Pigs (employee), Jobs (public)
pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message')
priv_msg_id = self.mail_group.message_post(cr, uid, self.group_priv_id, body='Message')
# prepare an attachment
attachment_id = self.ir_attachment.create(cr, uid, {'datas': 'My attachment'.encode('base64'), 'name': 'doc.txt', 'datas_fname': 'doc.txt'})
# ----------------------------------------
# CASE1: read
# ----------------------------------------
# Do: create a new mail.message
message_id = self.mail_message.create(cr, uid, {'body': 'My Body', 'attachment_ids': [(4, attachment_id)]})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, user_bert_id, message_id)
# Do: message is pushed to Bert
notif_id = self.mail_notification.create(cr, uid, {'message_id': message_id, 'partner_id': partner_bert_id})
# Test: Bert reads the message, ok because notification pushed
self.mail_message.read(cr, user_bert_id, message_id)
# Test: Bert downloads attachment, ok because he can read message
self.mail_message.download_attachment(cr, user_bert_id, message_id, attachment_id)
# Do: remove notification
self.mail_notification.unlink(cr, uid, notif_id)
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, self.user_bert_id, message_id)
# Test: Bert downloads attachment, crash because he can't read message
self.assertRaises(except_orm, self.mail_message.download_attachment,
cr, user_bert_id, message_id, attachment_id)
# Do: Bert is now the author
self.mail_message.write(cr, uid, [message_id], {'author_id': partner_bert_id})
# Test: Bert reads the message, ok because Bert is the author
self.mail_message.read(cr, user_bert_id, message_id)
# Do: Bert is not the author anymore
self.mail_message.write(cr, uid, [message_id], {'author_id': partner_raoul_id})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, user_bert_id, message_id)
# Do: message is attached to a document Bert can read, Jobs
self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_jobs_id})
# Test: Bert reads the message, ok because linked to a doc he is allowed to read
self.mail_message.read(cr, user_bert_id, message_id)
# Do: message is attached to a document Bert cannot read, Pigs
self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_pigs_id})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, user_bert_id, message_id)
# ----------------------------------------
# CASE2: create
# ----------------------------------------
# Do: Bert creates a message on Pigs -> ko, no creation rights
self.assertRaises(except_orm, self.mail_message.create,
cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_pigs_id, 'body': 'Test'})
# Do: Bert create a message on Jobs -> ko, no creation rights
self.assertRaises(except_orm, self.mail_message.create,
cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})
# Do: Bert create a private message -> ko, no creation rights
self.assertRaises(except_orm, self.mail_message.create,
cr, user_bert_id, {'body': 'Test'})
# Do: Raoul creates a message on Jobs -> ok, write access to the related document
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})
# Do: Raoul creates a message on Priv -> ko, no write access to the related document
self.assertRaises(except_orm, self.mail_message.create,
cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test'})
# Do: Raoul creates a private message -> ok
self.mail_message.create(cr, user_raoul_id, {'body': 'Test'})
# Do: Raoul creates a reply to a message on Priv -> ko
self.assertRaises(except_orm, self.mail_message.create,
cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})
# Do: Raoul creates a reply to a message on Priv-> ok if has received parent
self.mail_notification.create(cr, uid, {'message_id': priv_msg_id, 'partner_id': self.partner_raoul_id})
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})
def test_20_message_set_star(self):
""" Tests for starring messages and its related access rights """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin stars msg
self.mail_message.set_message_starred(cr, uid, [msg.id], True)
msg.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')
# Test: notification starred
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.starred, 'mail_notification starred failed')
self.assertTrue(msg.starred, 'mail_message starred failed')
# Do: Raoul stars msg
self.mail_message.set_message_starred(cr, self.user_raoul_id, [msg.id], True)
msg_raoul.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')
# Test: notification starred
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.starred, 'mail_notification starred failed')
self.assertTrue(msg_raoul.starred, 'mail_message starred failed')
# Do: Admin unstars msg
self.mail_message.set_message_starred(cr, uid, [msg.id], False)
msg.refresh()
msg_raoul.refresh()
# Test: msg unstarred for Admin, starred for Raoul
self.assertFalse(msg.starred, 'mail_message starred failed')
self.assertTrue(msg_raoul.starred, 'mail_message starred failed')
def test_30_message_set_read(self):
""" Tests for reading messages and its related access rights """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin reads msg
self.mail_message.set_message_read(cr, uid, [msg.id], True)
msg.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')
# Test: notification read
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.read, 'mail_notification read failed')
self.assertFalse(msg.to_read, 'mail_message read failed')
# Do: Raoul reads msg
self.mail_message.set_message_read(cr, self.user_raoul_id, [msg.id], True)
msg_raoul.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')
# Test: notification read
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.read, 'mail_notification starred failed')
self.assertFalse(msg_raoul.to_read, 'mail_message starred failed')
# Do: Admin unreads msg
self.mail_message.set_message_read(cr, uid, [msg.id], False)
msg.refresh()
msg_raoul.refresh()
# Test: msg unread for Admin, read for Raoul
self.assertTrue(msg.to_read, 'mail_message read failed')
self.assertFalse(msg_raoul.to_read, 'mail_message read failed')
def test_40_message_vote(self):
""" Test designed for the vote/unvote feature. """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin vote for msg
self.mail_message.vote_toggle(cr, uid, [msg.id])
msg.refresh()
# Test: msg has Admin as voter
self.assertEqual(set(msg.vote_user_ids), set([self.user_admin]), 'mail_message vote: after voting, Admin should be in the voter')
# Do: Bert vote for msg
self.mail_message.vote_toggle(cr, self.user_raoul_id, [msg.id])
msg_raoul.refresh()
# Test: msg has Admin and Bert as voters
self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_admin, self.user_raoul]), 'mail_message vote: after voting, Admin and Bert should be in the voters')
# Do: Admin unvote for msg
self.mail_message.vote_toggle(cr, uid, [msg.id])
msg.refresh()
msg_raoul.refresh()
# Test: msg has Bert as voter
self.assertEqual(set(msg.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')
self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_50_mail_flow_access_rights(self):
""" Test a Chatter-looks alike flow to test access rights """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id
user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id
# Prepare groups: Pigs (employee), Jobs (public)
pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message', partner_ids=[(4, self.partner_admin_id)])
jobs_msg_id = self.mail_group.message_post(cr, uid, self.group_jobs_id, body='Message', partner_ids=[(4, self.partner_admin_id)])
# ----------------------------------------
# CASE1: Bert, without groups
# ----------------------------------------
# Do: Bert reads Jobs basic fields, ok because public = read access on the group
self.mail_group.read(cr, user_bert_id, self.group_jobs_id, ['name', 'description'])
# Do: Bert reads Jobs messages, ok because read access on the group => read access on its messages
jobs_message_ids = self.mail_group.read(cr, user_bert_id, self.group_jobs_id, ['message_ids'])['message_ids']
self.mail_message.read(cr, user_bert_id, jobs_message_ids)
# Do: Bert browses Jobs, ok (no direct browse of partners), ok for messages, ko for followers (accessible to employees or partner manager)
bert_jobs = self.mail_group.browse(cr, user_bert_id, self.group_jobs_id)
trigger_read = bert_jobs.name
for message in bert_jobs.message_ids:
trigger_read = message.subject
for partner in bert_jobs.message_follower_ids:
with self.assertRaises(except_orm):
trigger_read = partner.name
# Do: Bert comments Jobs, ko because no creation right
self.assertRaises(except_orm,
self.mail_group.message_post,
cr, user_bert_id, self.group_jobs_id, body='I love Pigs')
# Do: Bert writes on its own profile, ko because no message create access
with self.assertRaises(except_orm):
self.res_users.message_post(cr, user_bert_id, user_bert_id, body='I love Bert')
self.res_partner.message_post(cr, user_bert_id, partner_bert_id, body='I love Bert')
# ----------------------------------------
# CASE2: Raoul, employee
# ----------------------------------------
# Do: Raoul browses Jobs -> ok, ok for message_ids, of for message_follower_ids
raoul_jobs = self.mail_group.browse(cr, user_raoul_id, self.group_jobs_id)
trigger_read = raoul_jobs.name
for message in raoul_jobs.message_ids:
trigger_read = message.subject
for partner in raoul_jobs.message_follower_ids:
trigger_read = partner.name
# Do: Raoul comments Jobs, ok
self.mail_group.message_post(cr, user_raoul_id, self.group_jobs_id, body='I love Pigs')
# Do: Raoul create a mail.compose.message record on Jobs, because he uses the wizard
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text', 'partner_ids': []},
{'default_composition_mode': 'comment', 'default_model': 'mail.group', 'default_res_id': self.group_jobs_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
# Do: Raoul replies to a Jobs message using the composer
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text'},
{'default_composition_mode': 'reply', 'default_parent_id': pigs_msg_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
# Do: Raoul writes on its own profile, ok because follower of its partner
self.res_users.message_post(cr, user_raoul_id, user_raoul_id, body='I love Raoul')
self.res_partner.message_post(cr, user_raoul_id, partner_raoul_id, body='I love Raoul')
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text', 'partner_ids': []},
{'default_composition_mode': 'comment', 'default_model': 'res.users', 'default_res_id': self.user_raoul_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
| agpl-3.0 | 8,600,911,126,959,699,000 | 61.522667 | 217 | 0.625053 | false |
inflector/singnet | agent/sn_agent/api/poller.py | 6 | 1373 | import asyncio
import datetime
import logging
from contextlib import suppress
from aiohttp import web
logger = logging.getLogger(__file__)
class Periodic:
def __init__(self, func, time):
self.func = func
self.time = time
self.is_started = False
self._task = None
async def start(self):
logger.debug('Starting periodic task')
if not self.is_started:
self.is_started = True
# Start task to call func periodically:
self._task = asyncio.ensure_future(self._run())
async def stop(self):
logger.debug('Stopping periodic task')
if self.is_started:
self.is_started = False
# Stop task and await it stopped:
self._task.cancel()
with suppress(asyncio.CancelledError):
await self._task
async def _run(self):
while True:
await asyncio.sleep(self.time)
self.func()
def task_to_run():
print('Periodic Task: %s' % datetime.datetime.now())
async def startup(app: web.Application):
poller = Periodic(task_to_run, 5)
await poller.start()
app['eth_client_poller'] = poller
async def cleanup(app: web.Application):
await app['eth_client_poller'].stop()
def setup_poller(app):
app.on_startup.append(startup)
app.on_cleanup.append(cleanup)
| mit | 8,070,426,383,488,599,000 | 23.517857 | 59 | 0.612527 | false |
echodaemon/Empire | lib/common/messages.py | 8 | 16901 | """
Common terminal messages used across Empire.
Titles, agent displays, listener displays, etc.
"""
import os
import time
import textwrap
# Empire imports
import helpers
###############################################################
#
# Messages
#
###############################################################
def title(version):
"""
Print the tool title, with version.
"""
os.system('clear')
print "================================================================"
# print ' [Empire] PowerShell/Python post-exploitation framework'
print " [Empire] Post-Exploitation Framework"
print '================================================================'
print " [Version] %s | [Web] https://github.com/empireProject/Empire" % (version)
print '================================================================'
print """
_______ .___ ___. .______ __ .______ _______
| ____|| \/ | | _ \ | | | _ \ | ____|
| |__ | \ / | | |_) | | | | |_) | | |__
| __| | |\/| | | ___/ | | | / | __|
| |____ | | | | | | | | | |\ \----.| |____
|_______||__| |__| | _| |__| | _| `._____||_______|
"""
def loading():
"""
Print and ascii loading screen.
"""
print """
`````````
``````.--::///+
````-+sydmmmNNNNNNN
``./ymmNNNNNNNNNNNNNN
``-ymmNNNNNNNNNNNNNNNNN
```ommmmNNNNNNNNNNNNNNNNN
``.ydmNNNNNNNNNNNNNNNNNNNN
```odmmNNNNNNNNNNNNNNNNNNNN
```/hmmmNNNNNNNNNNNNNNNNMNNN
````+hmmmNNNNNNNNNNNNNNNNNMMN
````..ymmmNNNNNNNNNNNNNNNNNNNN
````:.+so+//:---.......----::-
`````.`````````....----:///++++
``````.-/osy+////:::---...-dNNNN
````:sdyyydy` ```:mNNNNM
````-hmmdhdmm:` ``.+hNNNNNNM
```.odNNmdmmNNo````.:+yNNNNNNNNNN
```-sNNNmdh/dNNhhdNNNNNNNNNNNNNNN
```-hNNNmNo::mNNNNNNNNNNNNNNNNNNN
```-hNNmdNo--/dNNNNNNNNNNNNNNNNNN
````:dNmmdmd-:+NNNNNNNNNNNNNNNNNNm
```/hNNmmddmd+mNNNNNNNNNNNNNNds++o
``/dNNNNNmmmmmmmNNNNNNNNNNNmdoosydd
`sNNNNdyydNNNNmmmmmmNNNNNmyoymNNNNN
:NNmmmdso++dNNNNmmNNNNNdhymNNNNNNNN
-NmdmmNNdsyohNNNNmmNNNNNNNNNNNNNNNN
`sdhmmNNNNdyhdNNNNNNNNNNNNNNNNNNNNN
/yhmNNmmNNNNNNNNNNNNNNNNNNNNNNmhh
`+yhmmNNNNNNNNNNNNNNNNNNNNNNmh+:
`./dmmmmNNNNNNNNNNNNNNNNmmd.
`ommmmmNNNNNNNmNmNNNNmmd:
:dmmmmNNNNNmh../oyhhhy:
`sdmmmmNNNmmh/++-.+oh.
`/dmmmmmmmmdo-:/ossd:
`/ohhdmmmmmmdddddmh/
`-/osyhdddddhyo:
``.----.`
Welcome to the Empire"""
time.sleep(3)
os.system('clear')
def wrap_string(data, width=40, indent=32, indentAll=False, followingHeader=None):
"""
Print a option description message in a nicely
wrapped and formatted paragraph.
followingHeader -> text that also goes on the first line
"""
data = str(data)
if len(data) > width:
lines = textwrap.wrap(textwrap.dedent(data).strip(), width=width)
if indentAll:
returnString = ' ' * indent + lines[0]
if followingHeader:
returnString += " " + followingHeader
else:
returnString = lines[0]
if followingHeader:
returnString += " " + followingHeader
i = 1
while i < len(lines):
returnString += "\n" + ' ' * indent + (lines[i]).strip()
i += 1
return returnString
else:
return data.strip()
def wrap_columns(col1, col2, width1=24, width2=40, indent=31):
"""
Takes two strings of text and turns them into nicely formatted column output.
Used by display_module()
"""
lines1 = textwrap.wrap(textwrap.dedent(col1).strip(), width=width1)
lines2 = textwrap.wrap(textwrap.dedent(col2).strip(), width=width2)
result = ''
limit = max(len(lines1), len(lines2))
for x in xrange(limit):
if x < len(lines1):
if x != 0:
result += ' ' * indent
result += '{line: <0{width}s}'.format(width=width1, line=lines1[x])
else:
if x == 0:
result += ' ' * width1
else:
result += ' ' * (indent + width1)
if x < len(lines2):
result += ' ' + '{line: <0{width}s}'.format(width=width2, line=lines2[x])
if x != limit-1:
result += "\n"
return result
def display_options(options, color=True):
"""
Take a dictionary and display it nicely.
"""
for key in options:
if color:
print "\t%s\t%s" % (helpers.color('{0: <16}'.format(key), "green"), wrap_string(options[key]))
else:
print "\t%s\t%s" % ('{0: <16}'.format(key), wrap_string(options[key]))
def display_agents(agents):
"""
Take a dictionary of agents and build the display for the main menu.
"""
if len(agents) > 0:
print ''
print helpers.color("[*] Active agents:\n")
print " Name Lang Internal IP Machine Name Username Process Delay Last Seen"
print " --------- ---- ----------- ------------ --------- ------- ----- --------------------"
for agent in agents:
if str(agent['high_integrity']) == '1':
# add a * to the username if it's high integrity
agent['username'] = '*' + str(agent['username'])
if not agent['language'] or agent['language'] == '':
agent['language'] = 'X'
elif agent['language'].lower() == 'powershell':
agent['language'] = 'ps'
elif agent['language'].lower() == 'python':
agent['language'] = 'py'
else:
agent['language'] = 'X'
print " %.16s%.6s%.16s%.16s%.20s%.20s%.9s%.20s" % ('{0: <16}'.format(agent['name']), '{0: <6}'.format(agent['language']), '{0: <16}'.format(agent['internal_ip']), '{0: <16}'.format(agent['hostname']), '{0: <20}'.format(agent['username']), '{0: <20}'.format(str(agent['process_name']) + "/" + str(agent['process_id'])), '{0: <9}'.format(str(agent['delay']) + "/" +str(agent['jitter'])), agent['lastseen_time'])
print ''
else:
print helpers.color('[!] No agents currently registered')
def display_agent(agent, returnAsString=False):
"""
Display an agent all nice-like.
Takes in the tuple of the raw agent database results.
"""
if returnAsString:
agentString = "\n[*] Agent info:\n"
for key, value in agent.iteritems():
if key != 'functions' and key != 'takings' and key != 'results':
agentString += " %s\t%s\n" % ('{0: <16}'.format(key), wrap_string(value, width=70))
return agentString + '\n'
else:
print helpers.color("\n[*] Agent info:\n")
for key, value in agent.iteritems():
if key != 'functions' and key != 'takings' and key != 'results':
print "\t%s\t%s" % (helpers.color('{0: <16}'.format(key), "blue"), wrap_string(value, width=70))
print ''
def display_active_listeners(listeners):
"""
Take an active listeners list and display everything nicely.
"""
if len(listeners) > 0:
print ''
print helpers.color("[*] Active listeners:\n")
print " Name Module Host Delay/Jitter KillDate"
print " ---- ------ ---- ------------ --------"
for listenerName, listener in listeners.iteritems():
moduleName = listener['moduleName']
if 'Host' in listener['options']:
host = listener['options']['Host']['Value']
else:
host = ''
if 'DefaultDelay' in listener['options']:
defaultDelay = listener['options']['DefaultDelay']['Value']
else:
defaultDelay = 'n/a'
if 'DefaultJitter' in listener['options']:
defaultJitter = listener['options']['DefaultJitter']['Value']
else:
defaultJitter = ''
if defaultDelay == 'n/a':
connectInterval = 'n/a'
else:
connectInterval = "%s/%s" % (defaultDelay, defaultJitter)
if 'KillDate' in listener['options']:
killDate = listener['options']['KillDate']['Value']
else:
killDate = 'n/a'
print " %s%s%s%s%s" % ('{0: <18}'.format(listenerName), '{0: <16}'.format(moduleName), '{0: <37}'.format(host), '{0: <15}'.format(connectInterval), '{0: <12}'.format(killDate))
print ''
else:
print helpers.color("[!] No listeners currently active ")
def display_active_listener(listener):
"""
Displays an active listener's information structure.
"""
print "\n%s Options:\n" % (listener['options']['Name']['Value'])
print " Name Required Value Description"
print " ---- -------- ------- -----------"
for option, values in listener['options'].iteritems():
# if there's a long value length, wrap it
if len(str(values['Value'])) > 33:
print " %s%s%s" % ('{0: <18}'.format(option), '{0: <12}'.format(("True" if values['Required'] else "False")), '{0: <33}'.format(wrap_string(values['Value'], width=32, indent=32, followingHeader=values['Description'])))
else:
print " %s%s%s%s" % ('{0: <18}'.format(option), '{0: <12}'.format(("True" if values['Required'] else "False")), '{0: <33}'.format(values['Value']), values['Description'])
print "\n"
def display_listener_module(listener):
"""
Displays a listener module's information structure.
"""
print '\n{0: >10}'.format("Name: ") + str(listener.info['Name'])
print '{0: >10}'.format("Category: ") + str(listener.info['Category'])
print "\nAuthors:"
for author in listener.info['Author']:
print " " +author
print "\nDescription:"
desc = wrap_string(listener.info['Description'], width=60, indent=2, indentAll=True)
if len(desc.splitlines()) == 1:
print " " + str(desc)
else:
print desc
if 'Comments' in listener.info:
comments = listener.info['Comments']
if isinstance(comments, list):
comments = ' '.join(comments)
if comments.strip() != '':
print "\nComments:"
if isinstance(comments, list):
comments = ' '.join(comments)
comment = wrap_string(comments, width=60, indent=2, indentAll=True)
if len(comment.splitlines()) == 1:
print " " + str(comment)
else:
print comment
print "\n%s Options:\n" % (listener.info['Name'])
print " Name Required Value Description"
print " ---- -------- ------- -----------"
for option, values in listener.options.iteritems():
# if there's a long value length, wrap it
if len(str(values['Value'])) > 33:
print " %s%s%s" % ('{0: <18}'.format(option), '{0: <12}'.format(("True" if values['Required'] else "False")), '{0: <33}'.format(wrap_string(values['Value'], width=32, indent=32, followingHeader=values['Description'])))
else:
print " %s%s%s%s" % ('{0: <18}'.format(option), '{0: <12}'.format(("True" if values['Required'] else "False")), '{0: <33}'.format(values['Value']), values['Description'])
print "\n"
def display_stager(stager):
"""
Displays a stager's information structure.
"""
print "\nName: " + stager.info['Name']
print "\nDescription:"
desc = wrap_string(stager.info['Description'], width=50, indent=2, indentAll=True)
if len(desc.splitlines()) == 1:
print " " + str(desc)
else:
print desc
# print out any options, if present
if stager.options:
print "\nOptions:\n"
print " Name Required Value Description"
print " ---- -------- ------- -----------"
for option, values in stager.options.iteritems():
print " %s%s%s%s" % ('{0: <17}'.format(option), '{0: <12}'.format(("True" if values['Required'] else "False")), '{0: <18}'.format(values['Value']), wrap_string(values['Description'], indent=49))
print "\n"
def display_module(moduleName, module):
"""
Displays a module's information structure.
"""
print '\n{0: >20}'.format("Name: ") + str(module.info['Name'])
print '{0: >20}'.format("Module: ") + str(moduleName)
if 'NeedsAdmin' in module.info:
print '{0: >20}'.format("NeedsAdmin: ") + ("True" if module.info['NeedsAdmin'] else "False")
if 'OpsecSafe' in module.info:
print '{0: >20}'.format("OpsecSafe: ") + ("True" if module.info['OpsecSafe'] else "False")
if 'Language' in module.info:
print '{0: >20}'.format("Language: ") + str(module.info['Language'])
if 'MinLanguageVersion' in module.info:
print '{0: >20}'.format("MinLanguageVersion: ") + str(module.info['MinLanguageVersion'])
if 'Background' in module.info:
print '{0: >20}'.format("Background: ") + ("True" if module.info['Background'] else "False")
if 'OutputExtension' in module.info:
print '{0: >20}'.format("OutputExtension: ") + (str(module.info['OutputExtension']) if module.info['OutputExtension'] else "None")
print "\nAuthors:"
for author in module.info['Author']:
print " " +author
print "\nDescription:"
desc = wrap_string(module.info['Description'], width=60, indent=2, indentAll=True)
if len(desc.splitlines()) == 1:
print " " + str(desc)
else:
print desc
if 'Comments' in module.info:
comments = module.info['Comments']
if isinstance(comments, list):
comments = ' '.join(comments)
if comments.strip() != '':
print "\nComments:"
if isinstance(comments, list):
comments = ' '.join(comments)
comment = wrap_string(comments, width=60, indent=2, indentAll=True)
if len(comment.splitlines()) == 1:
print " " + str(comment)
else:
print comment
# print out any options, if present
if module.options:
# get the size for the first column
maxNameLen = len(max(module.options.keys(), key=len))
print "\nOptions:\n"
print " %sRequired Value Description" %('{:<{}s}'.format("Name", maxNameLen+1))
print " %s-------- ------- -----------" %('{:<{}s}'.format("----", maxNameLen+1))
for option, values in module.options.iteritems():
print " %s%s%s" % ('{:<{}s}'.format(str(option), maxNameLen+1), '{0: <12}'.format(("True" if values['Required'] else "False")), wrap_columns(str(values['Value']), str(values['Description']), indent=(31 + (maxNameLen-16))))
print ''
def display_module_search(moduleName, module):
"""
Displays the name/description of a module for search results.
"""
# Suffix modules requring elevated context with '*'
if module.info['NeedsAdmin']:
print " %s*\n" % (helpers.color(moduleName, 'blue'))
else:
print " %s\n" % (helpers.color(moduleName, 'blue'))
# width=40, indent=32, indentAll=False,
lines = textwrap.wrap(textwrap.dedent(module.info['Description']).strip(), width=70)
for line in lines:
print "\t" + line
print "\n"
def display_credentials(creds):
"""
Take a credential array and display everything nicely.
"""
print helpers.color("\nCredentials:\n", "blue")
print " CredID CredType Domain UserName Host Password"
print " ------ -------- ------ -------- ---- --------"
for cred in creds:
# (id, credtype, domain, username, password, host, notes, sid)
credID = cred[0]
credType = cred[1]
domain = cred[2]
username = cred[3]
password = cred[4]
host = cred[5]
print " %s%s%s%s%s%s" % ('{0: <8}'.format(credID), '{0: <11}'.format(credType), '{0: <25}'.format(domain), '{0: <17}'.format(username), '{0: <17}'.format(host), password)
print ''
| bsd-3-clause | -5,717,281,891,315,895,000 | 35.424569 | 423 | 0.503816 | false |
augustozuniga/arisgames | zxing-master/cpp/scons/scons-local-2.0.0.final.0/SCons/compat/_scons_collections.py | 34 | 1869 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
collections compatibility module for older (pre-2.4) Python versions
This does not not NOT (repeat, *NOT*) provide complete collections
functionality. It only wraps the portions of collections functionality
used by SCons, in an interface that looks enough like collections for
our purposes.
"""
__revision__ = "src/engine/SCons/compat/_scons_collections.py 5023 2010/06/14 22:05:46 scons"
# Use exec to hide old names from fixers.
exec("""if True:
from UserDict import UserDict
from UserList import UserList
from UserString import UserString""")
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 8,439,368,118,038,856,000 | 40.533333 | 95 | 0.751204 | false |
ychfan/tensorflow | tensorflow/python/profiler/profile_context_test.py | 32 | 4282 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import option_builder
# pylint: disable=g-bad-import-order
from tensorflow.python.profiler import profile_context
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
builder = option_builder.ProfileOptionBuilder
class ProfilerContextTest(test.TestCase):
def testBasics(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), "dump")
opts = builder(builder.time_and_memory()
).with_file_output(outfile).build()
x = lib.BuildFullModel()
profile_str = None
profile_step100 = os.path.join(test.get_temp_dir(), "profile_100")
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
pctx.add_auto_profiling("op", options=opts, profile_steps=[15, 50, 100])
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
total_steps = 101
for i in range(total_steps):
sess.run(x)
if i == 14 or i == 49:
self.assertTrue(gfile.Exists(outfile))
gfile.Remove(outfile)
if i == 99:
self.assertTrue(gfile.Exists(profile_step100))
with gfile.Open(outfile, "r") as f:
profile_str = f.read()
gfile.Remove(outfile)
with lib.ProfilerFromFile(
os.path.join(test.get_temp_dir(), "profile_100")) as profiler:
profiler.profile_operations(options=opts)
with gfile.Open(outfile, "r") as f:
self.assertEqual(profile_str, f.read())
def testAutoTracingInDeubMode(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(), debug=True):
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
# Warm up, no tracing.
self.assertFalse("run_meta" in f)
sess.run(x)
self.assertTrue(
gfile.Exists(os.path.join(test.get_temp_dir(), "run_meta_11")))
gfile.Remove(os.path.join(test.get_temp_dir(), "run_meta_11"))
# fetched already.
sess.run(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
self.assertFalse("run_meta" in f)
def testDisabled(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(),
enabled=False) as pctx:
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
self.assertTrue(pctx.profiler is None)
self.assertTrue(
getattr(session.BaseSession, "profile_context", None) is None)
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
self.assertFalse(pctx.profiler is None)
self.assertFalse(
getattr(session.BaseSession, "profile_context", None) is None)
if __name__ == "__main__":
test.main()
| apache-2.0 | 8,503,255,060,501,801,000 | 36.234783 | 80 | 0.652265 | false |
pjurik2/pykarma | feeds/rss.py | 1 | 3205 | import os, sys
import random
import time
import feedparser
import itertools
import HTMLParser
from feed import Feed
if os.getcwd().rstrip(os.sep).endswith('feeds'):
os.chdir('..')
sys.path.insert(0, os.getcwd())
from gui_client import new_rpc
import web
import reddit
class RSSFeed(Feed):
def __init__(self):
self.title = 'RSS Feed'
self.streams = []
self.wait_range = (60, 70)
self.max_error_wait = 600
self.max_subs = 0
self.urls = set()
def configure(self):
pass
def watch(self, new_streams=None):
self.configure()
self.web = web.Web()
try:
self.rpc = new_rpc(self.title)
except:
self.rpc = None
print 'Warning: Running without RPC'
if new_streams is None:
new_streams = []
streams = self.streams + new_streams
for url in itertools.cycle(streams):
print url
self.check_feed(url)
time.sleep(random.randint(*self.wait_range))
def check_feed(self, url):
for fail_count in itertools.count():
try:
datad = feedparser.parse(url)
except:
print 'Parse error for', url
time.sleep(min(2 ** fail_count, self.max_error_wait))
else:
break
try:
posts = datad['items']
except:
print 'No items field for', url
posts = []
for post in posts:
self.check_post(post)
def check_post(self, post):
if ('link' not in post):
return False
url = self.url_pre_filter(post['link'])
try:
req = self.web.get(url)
url = req.geturl()
except:
print 'URL retrieval error for ', url
return False
url = self.url_post_filter(url)
if (url in self.urls) or not url.startswith('http://'):
return False
self.urls.add(url)
feed_title = self.default_title_filter(post.get('title', ''))
page_title = self.default_title_filter(self.web.title(req))
title = self.title_filter(page_title, feed_title)
if self.rpc is not None:
subreddit = self.rpc.get_title_subreddit(title)
keywords = self.rpc.get_title_keywords(title)
if self.rpc.get_link_posted_count(url, title) <= self.max_subs:
stats = self.rpc.get_learned_stats(title, keywords)
self.rpc.gui_link_add(self.title, title, url, subreddit, keywords, **stats)
try:
req.close()
except:
pass
print title
print url
def url_pre_filter(self, url):
return url
def url_post_filter(self, url):
return url
def default_title_filter(self, title):
h = HTMLParser.HTMLParser()
return h.unescape(title)
def title_filter(self, page_title, feed_title):
return page_title
if __name__ == '__main__':
f = RSSFeed()
f.watch(['http://www.physorg.com/rss-feed/'])
| mit | 5,330,201,188,933,551,000 | 24.64 | 91 | 0.531669 | false |
aman-iitj/scipy | scipy/linalg/tests/test_special_matrices.py | 36 | 22800 | """Tests for functions in special_matrices.py."""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import arange, add, array, eye, copy, sqrt
from numpy.testing import (TestCase, run_module_suite, assert_raises,
assert_equal, assert_array_equal, assert_array_almost_equal,
assert_allclose)
from scipy._lib.six import xrange
from scipy.special import comb
from scipy.linalg import (toeplitz, hankel, circulant, hadamard, leslie,
companion, tri, triu, tril, kron, block_diag,
helmert, hilbert, invhilbert, pascal, invpascal, dft)
from scipy.fftpack import fft
from numpy.linalg import cond
def get_mat(n):
data = arange(n)
data = add.outer(data,data)
return data
class TestTri(TestCase):
def test_basic(self):
assert_equal(tri(4),array([[1,0,0,0],
[1,1,0,0],
[1,1,1,0],
[1,1,1,1]]))
assert_equal(tri(4,dtype='f'),array([[1,0,0,0],
[1,1,0,0],
[1,1,1,0],
[1,1,1,1]],'f'))
def test_diag(self):
assert_equal(tri(4,k=1),array([[1,1,0,0],
[1,1,1,0],
[1,1,1,1],
[1,1,1,1]]))
assert_equal(tri(4,k=-1),array([[0,0,0,0],
[1,0,0,0],
[1,1,0,0],
[1,1,1,0]]))
def test_2d(self):
assert_equal(tri(4,3),array([[1,0,0],
[1,1,0],
[1,1,1],
[1,1,1]]))
assert_equal(tri(3,4),array([[1,0,0,0],
[1,1,0,0],
[1,1,1,0]]))
def test_diag2d(self):
assert_equal(tri(3,4,k=2),array([[1,1,1,0],
[1,1,1,1],
[1,1,1,1]]))
assert_equal(tri(4,3,k=-2),array([[0,0,0],
[0,0,0],
[1,0,0],
[1,1,0]]))
class TestTril(TestCase):
def test_basic(self):
a = (100*get_mat(5)).astype('l')
b = a.copy()
for k in range(5):
for l in range(k+1,5):
b[k,l] = 0
assert_equal(tril(a),b)
def test_diag(self):
a = (100*get_mat(5)).astype('f')
b = a.copy()
for k in range(5):
for l in range(k+3,5):
b[k,l] = 0
assert_equal(tril(a,k=2),b)
b = a.copy()
for k in range(5):
for l in range(max((k-1,0)),5):
b[k,l] = 0
assert_equal(tril(a,k=-2),b)
class TestTriu(TestCase):
def test_basic(self):
a = (100*get_mat(5)).astype('l')
b = a.copy()
for k in range(5):
for l in range(k+1,5):
b[l,k] = 0
assert_equal(triu(a),b)
def test_diag(self):
a = (100*get_mat(5)).astype('f')
b = a.copy()
for k in range(5):
for l in range(max((k-1,0)),5):
b[l,k] = 0
assert_equal(triu(a,k=2),b)
b = a.copy()
for k in range(5):
for l in range(k+3,5):
b[l,k] = 0
assert_equal(triu(a,k=-2),b)
class TestToeplitz(TestCase):
def test_basic(self):
y = toeplitz([1,2,3])
assert_array_equal(y,[[1,2,3],[2,1,2],[3,2,1]])
y = toeplitz([1,2,3],[1,4,5])
assert_array_equal(y,[[1,4,5],[2,1,4],[3,2,1]])
def test_complex_01(self):
data = (1.0 + arange(3.0)) * (1.0 + 1.0j)
x = copy(data)
t = toeplitz(x)
# Calling toeplitz should not change x.
assert_array_equal(x, data)
# According to the docstring, x should be the first column of t.
col0 = t[:,0]
assert_array_equal(col0, data)
assert_array_equal(t[0,1:], data[1:].conj())
def test_scalar_00(self):
"""Scalar arguments still produce a 2D array."""
t = toeplitz(10)
assert_array_equal(t, [[10]])
t = toeplitz(10, 20)
assert_array_equal(t, [[10]])
def test_scalar_01(self):
c = array([1,2,3])
t = toeplitz(c, 1)
assert_array_equal(t, [[1],[2],[3]])
def test_scalar_02(self):
c = array([1,2,3])
t = toeplitz(c, array(1))
assert_array_equal(t, [[1],[2],[3]])
def test_scalar_03(self):
c = array([1,2,3])
t = toeplitz(c, array([1]))
assert_array_equal(t, [[1],[2],[3]])
def test_scalar_04(self):
r = array([10,2,3])
t = toeplitz(1, r)
assert_array_equal(t, [[1,2,3]])
class TestHankel(TestCase):
def test_basic(self):
y = hankel([1,2,3])
assert_array_equal(y, [[1,2,3], [2,3,0], [3,0,0]])
y = hankel([1,2,3], [3,4,5])
assert_array_equal(y, [[1,2,3], [2,3,4], [3,4,5]])
class TestCirculant(TestCase):
def test_basic(self):
y = circulant([1,2,3])
assert_array_equal(y, [[1,3,2], [2,1,3], [3,2,1]])
class TestHadamard(TestCase):
def test_basic(self):
y = hadamard(1)
assert_array_equal(y, [[1]])
y = hadamard(2, dtype=float)
assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]])
y = hadamard(4)
assert_array_equal(y, [[1,1,1,1], [1,-1,1,-1], [1,1,-1,-1], [1,-1,-1,1]])
assert_raises(ValueError, hadamard, 0)
assert_raises(ValueError, hadamard, 5)
class TestLeslie(TestCase):
def test_bad_shapes(self):
assert_raises(ValueError, leslie, [[1,1],[2,2]], [3,4,5])
assert_raises(ValueError, leslie, [3,4,5], [[1,1],[2,2]])
assert_raises(ValueError, leslie, [1,2], [1,2])
assert_raises(ValueError, leslie, [1], [])
def test_basic(self):
a = leslie([1, 2, 3], [0.25, 0.5])
expected = array([
[1.0, 2.0, 3.0],
[0.25, 0.0, 0.0],
[0.0, 0.5, 0.0]])
assert_array_equal(a, expected)
class TestCompanion(TestCase):
def test_bad_shapes(self):
assert_raises(ValueError, companion, [[1,1],[2,2]])
assert_raises(ValueError, companion, [0,4,5])
assert_raises(ValueError, companion, [1])
assert_raises(ValueError, companion, [])
def test_basic(self):
c = companion([1, 2, 3])
expected = array([
[-2.0, -3.0],
[1.0, 0.0]])
assert_array_equal(c, expected)
c = companion([2.0, 5.0, -10.0])
expected = array([
[-2.5, 5.0],
[1.0, 0.0]])
assert_array_equal(c, expected)
class TestBlockDiag:
def test_basic(self):
x = block_diag(eye(2), [[1,2], [3,4], [5,6]], [[1, 2, 3]])
assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0, 0],
[0, 0, 3, 4, 0, 0, 0],
[0, 0, 5, 6, 0, 0, 0],
[0, 0, 0, 0, 1, 2, 3]])
def test_dtype(self):
x = block_diag([[1.5]])
assert_equal(x.dtype, float)
x = block_diag([[True]])
assert_equal(x.dtype, bool)
def test_mixed_dtypes(self):
actual = block_diag([[1]], [[1j]])
desired = np.array([[1, 0], [0, 1j]])
assert_array_equal(actual, desired)
def test_scalar_and_1d_args(self):
a = block_diag(1)
assert_equal(a.shape, (1,1))
assert_array_equal(a, [[1]])
a = block_diag([2,3], 4)
assert_array_equal(a, [[2, 3, 0], [0, 0, 4]])
def test_bad_arg(self):
assert_raises(ValueError, block_diag, [[[1]]])
def test_no_args(self):
a = block_diag()
assert_equal(a.ndim, 2)
assert_equal(a.nbytes, 0)
def test_empty_matrix_arg(self):
# regression test for gh-4596: check the shape of the result for empty matrix inputs
a = block_diag([[1, 0], [0, 1]],
[],
[[2, 3], [4, 5], [6, 7]])
assert_array_equal(a, [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 2, 3],
[0, 0, 4, 5],
[0, 0, 6, 7]])
class TestKron:
def test_basic(self):
a = kron(array([[1, 2], [3, 4]]), array([[1, 1, 1]]))
assert_array_equal(a, array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]]))
m1 = array([[1, 2], [3, 4]])
m2 = array([[10], [11]])
a = kron(m1, m2)
expected = array([[10, 20],
[11, 22],
[30, 40],
[33, 44]])
assert_array_equal(a, expected)
class TestHelmert(TestCase):
def test_orthogonality(self):
for n in range(1, 7):
H = helmert(n, full=True)
I = np.eye(n)
assert_allclose(H.dot(H.T), I, atol=1e-12)
assert_allclose(H.T.dot(H), I, atol=1e-12)
def test_subspace(self):
for n in range(2, 7):
H_full = helmert(n, full=True)
H_partial = helmert(n)
for U in H_full[1:, :].T, H_partial.T:
C = np.eye(n) - np.ones((n, n)) / n
assert_allclose(U.dot(U.T), C)
assert_allclose(U.T.dot(U), np.eye(n-1), atol=1e-12)
class TestHilbert(TestCase):
def test_basic(self):
h3 = array([[1.0, 1/2., 1/3.],
[1/2., 1/3., 1/4.],
[1/3., 1/4., 1/5.]])
assert_array_almost_equal(hilbert(3), h3)
assert_array_equal(hilbert(1), [[1.0]])
h0 = hilbert(0)
assert_equal(h0.shape, (0,0))
class TestInvHilbert(TestCase):
def test_basic(self):
invh1 = array([[1]])
assert_array_equal(invhilbert(1, exact=True), invh1)
assert_array_equal(invhilbert(1), invh1)
invh2 = array([[4, -6],
[-6, 12]])
assert_array_equal(invhilbert(2, exact=True), invh2)
assert_array_almost_equal(invhilbert(2), invh2)
invh3 = array([[9, -36, 30],
[-36, 192, -180],
[30, -180, 180]])
assert_array_equal(invhilbert(3, exact=True), invh3)
assert_array_almost_equal(invhilbert(3), invh3)
invh4 = array([[16, -120, 240, -140],
[-120, 1200, -2700, 1680],
[240, -2700, 6480, -4200],
[-140, 1680, -4200, 2800]])
assert_array_equal(invhilbert(4, exact=True), invh4)
assert_array_almost_equal(invhilbert(4), invh4)
invh5 = array([[25, -300, 1050, -1400, 630],
[-300, 4800, -18900, 26880, -12600],
[1050, -18900, 79380, -117600, 56700],
[-1400, 26880, -117600, 179200, -88200],
[630, -12600, 56700, -88200, 44100]])
assert_array_equal(invhilbert(5, exact=True), invh5)
assert_array_almost_equal(invhilbert(5), invh5)
invh17 = array([
[289, -41616, 1976760, -46124400, 629598060, -5540462928,
33374693352, -143034400080, 446982500250, -1033026222800,
1774926873720, -2258997839280, 2099709530100, -1384423866000,
613101997800, -163493866080, 19835652870],
[-41616, 7990272, -426980160, 10627061760, -151103534400, 1367702848512,
-8410422724704, 36616806420480, -115857864064800, 270465047424000,
-468580694662080, 600545887119360, -561522320049600, 372133135180800,
-165537539406000, 44316454993920, -5395297580640],
[1976760, -426980160, 24337869120, -630981792000, 9228108708000,
-85267724461920, 532660105897920, -2348052711713280, 7504429831470000,
-17664748409880000, 30818191841236800, -39732544853164800,
37341234283298400, -24857330514030000, 11100752642520000,
-2982128117299200, 364182586693200],
[-46124400, 10627061760, -630981792000, 16826181120000,
-251209625940000, 2358021022156800, -14914482965141760,
66409571644416000, -214015221119700000, 507295338950400000,
-890303319857952000, 1153715376477081600, -1089119333262870000,
727848632044800000, -326170262829600000, 87894302404608000,
-10763618673376800],
[629598060, -151103534400, 9228108708000,
-251209625940000, 3810012660090000, -36210360321495360,
231343968720664800, -1038687206500944000, 3370739732635275000,
-8037460526495400000, 14178080368737885600, -18454939322943942000,
17489975175339030000, -11728977435138600000, 5272370630081100000,
-1424711708039692800, 174908803442373000],
[-5540462928, 1367702848512, -85267724461920, 2358021022156800,
-36210360321495360, 347619459086355456, -2239409617216035264,
10124803292907663360, -33052510749726468000, 79217210949138662400,
-140362995650505067440, 183420385176741672960, -174433352415381259200,
117339159519533952000, -52892422160973595200, 14328529177999196160,
-1763080738699119840],
[33374693352, -8410422724704, 532660105897920,
-14914482965141760, 231343968720664800, -2239409617216035264,
14527452132196331328, -66072377044391477760, 216799987176909536400,
-521925895055522958000, 928414062734059661760, -1217424500995626443520,
1161358898976091015200, -783401860847777371200, 354015418167362952000,
-96120549902411274240, 11851820521255194480],
[-143034400080, 36616806420480, -2348052711713280, 66409571644416000,
-1038687206500944000, 10124803292907663360, -66072377044391477760,
302045152202932469760, -995510145200094810000, 2405996923185123840000,
-4294704507885446054400, 5649058909023744614400,
-5403874060541811254400, 3654352703663101440000,
-1655137020003255360000, 450325202737117593600, -55630994283442749600],
[446982500250, -115857864064800, 7504429831470000, -214015221119700000,
3370739732635275000, -33052510749726468000, 216799987176909536400,
-995510145200094810000, 3293967392206196062500,
-7988661659013106500000, 14303908928401362270000,
-18866974090684772052000, 18093328327706957325000,
-12263364009096700500000, 5565847995255512250000,
-1517208935002984080000, 187754605706619279900],
[-1033026222800, 270465047424000, -17664748409880000,
507295338950400000, -8037460526495400000, 79217210949138662400,
-521925895055522958000, 2405996923185123840000,
-7988661659013106500000, 19434404971634224000000,
-34894474126569249192000, 46141453390504792320000,
-44349976506971935800000, 30121928988527376000000,
-13697025107665828500000, 3740200989399948902400,
-463591619028689580000],
[1774926873720, -468580694662080,
30818191841236800, -890303319857952000, 14178080368737885600,
-140362995650505067440, 928414062734059661760, -4294704507885446054400,
14303908928401362270000, -34894474126569249192000,
62810053427824648545600, -83243376594051600326400,
80177044485212743068000, -54558343880470209780000,
24851882355348879230400, -6797096028813368678400, 843736746632215035600],
[-2258997839280, 600545887119360, -39732544853164800,
1153715376477081600, -18454939322943942000, 183420385176741672960,
-1217424500995626443520, 5649058909023744614400,
-18866974090684772052000, 46141453390504792320000,
-83243376594051600326400, 110552468520163390156800,
-106681852579497947388000, 72720410752415168870400,
-33177973900974346080000, 9087761081682520473600,
-1129631016152221783200],
[2099709530100, -561522320049600, 37341234283298400,
-1089119333262870000, 17489975175339030000, -174433352415381259200,
1161358898976091015200, -5403874060541811254400,
18093328327706957325000, -44349976506971935800000,
80177044485212743068000, -106681852579497947388000,
103125790826848015808400, -70409051543137015800000,
32171029219823375700000, -8824053728865840192000,
1098252376814660067000],
[-1384423866000, 372133135180800,
-24857330514030000, 727848632044800000, -11728977435138600000,
117339159519533952000, -783401860847777371200, 3654352703663101440000,
-12263364009096700500000, 30121928988527376000000,
-54558343880470209780000, 72720410752415168870400,
-70409051543137015800000, 48142941226076592000000,
-22027500987368499000000, 6049545098753157120000,
-753830033789944188000],
[613101997800, -165537539406000,
11100752642520000, -326170262829600000, 5272370630081100000,
-52892422160973595200, 354015418167362952000, -1655137020003255360000,
5565847995255512250000, -13697025107665828500000,
24851882355348879230400, -33177973900974346080000,
32171029219823375700000, -22027500987368499000000,
10091416708498869000000, -2774765838662800128000, 346146444087219270000],
[-163493866080, 44316454993920, -2982128117299200, 87894302404608000,
-1424711708039692800, 14328529177999196160, -96120549902411274240,
450325202737117593600, -1517208935002984080000, 3740200989399948902400,
-6797096028813368678400, 9087761081682520473600,
-8824053728865840192000, 6049545098753157120000,
-2774765838662800128000, 763806510427609497600, -95382575704033754400],
[19835652870, -5395297580640, 364182586693200, -10763618673376800,
174908803442373000, -1763080738699119840, 11851820521255194480,
-55630994283442749600, 187754605706619279900, -463591619028689580000,
843736746632215035600, -1129631016152221783200, 1098252376814660067000,
-753830033789944188000, 346146444087219270000, -95382575704033754400,
11922821963004219300]
])
assert_array_equal(invhilbert(17, exact=True), invh17)
assert_allclose(invhilbert(17), invh17.astype(float), rtol=1e-12)
def test_inverse(self):
for n in xrange(1, 10):
a = hilbert(n)
b = invhilbert(n)
# The Hilbert matrix is increasingly badly conditioned,
# so take that into account in the test
c = cond(a)
assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c)
class TestPascal(TestCase):
cases = [
(1, array([[1]]), array([[1]])),
(2, array([[1, 1],
[1, 2]]),
array([[1, 0],
[1, 1]])),
(3, array([[1, 1, 1],
[1, 2, 3],
[1, 3, 6]]),
array([[1, 0, 0],
[1, 1, 0],
[1, 2, 1]])),
(4, array([[1, 1, 1, 1],
[1, 2, 3, 4],
[1, 3, 6, 10],
[1, 4, 10, 20]]),
array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 2, 1, 0],
[1, 3, 3, 1]])),
]
def check_case(self, n, sym, low):
assert_array_equal(pascal(n), sym)
assert_array_equal(pascal(n, kind='lower'), low)
assert_array_equal(pascal(n, kind='upper'), low.T)
assert_array_almost_equal(pascal(n, exact=False), sym)
assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low)
assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T)
def test_cases(self):
for n, sym, low in self.cases:
self.check_case(n, sym, low)
def test_big(self):
p = pascal(50)
assert_equal(p[-1, -1], comb(98, 49, exact=True))
def test_threshold(self):
# Regression test. An early version of `pascal` returned an
# array of type np.uint64 for n=35, but that data type is too small
# to hold p[-1, -1]. The second assert_equal below would fail
# because p[-1, -1] overflowed.
p = pascal(34)
assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 34")
p = pascal(35)
assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 35")
def test_invpascal():
def check_invpascal(n, kind, exact):
ip = invpascal(n, kind=kind, exact=exact)
p = pascal(n, kind=kind, exact=exact)
# Matrix-multiply ip and p, and check that we get the identity matrix.
# We can't use the simple expression e = ip.dot(p), because when
# n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is
# np.int64. The product of those dtypes is np.float64, which loses
# precision when n is greater than 18. Instead we'll cast both to
# object arrays, and then multiply.
e = ip.astype(object).dot(p.astype(object))
assert_array_equal(e, eye(n), err_msg="n=%d kind=%r exact=%r" %
(n, kind, exact))
kinds = ['symmetric', 'lower', 'upper']
ns = [1, 2, 5, 18]
for n in ns:
for kind in kinds:
for exact in [True, False]:
yield check_invpascal, n, kind, exact
ns = [19, 34, 35, 50]
for n in ns:
for kind in kinds:
yield check_invpascal, n, kind, True
def test_dft():
m = dft(2)
expected = array([[1.0, 1.0], [1.0, -1.0]])
yield (assert_array_almost_equal, m, expected)
m = dft(2, scale='n')
yield (assert_array_almost_equal, m, expected/2.0)
m = dft(2, scale='sqrtn')
yield (assert_array_almost_equal, m, expected/sqrt(2.0))
x = array([0, 1, 2, 3, 4, 5, 0, 1])
m = dft(8)
mx = m.dot(x)
fx = fft(x)
yield (assert_array_almost_equal, mx, fx)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 662,025,206,139,913,100 | 38.041096 | 92 | 0.539649 | false |
leedm777/ansible-modules-core | files/acl.py | 47 | 9778 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: acl
version_added: "1.4"
short_description: Sets and retrieves file ACL information.
description:
- Sets and retrieves file ACL information.
options:
name:
required: true
default: null
description:
- The full path of the file or object.
aliases: ['path']
state:
required: false
default: query
choices: [ 'query', 'present', 'absent' ]
description:
- defines whether the ACL should be present or not. The C(query) state gets the current acl without changing it, for use in 'register' operations.
follow:
required: false
default: yes
choices: [ 'yes', 'no' ]
description:
- whether to follow symlinks on the path if a symlink is encountered.
default:
version_added: "1.5"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if name is a file.
entity:
version_added: "1.5"
required: false
description:
- actual user or group that the ACL applies to when matching entity types user or group are selected.
etype:
version_added: "1.5"
required: false
default: null
choices: [ 'user', 'group', 'mask', 'other' ]
description:
- the entity type of the ACL to apply, see setfacl documentation for more info.
permissions:
version_added: "1.5"
required: false
default: null
description:
- Permissions to apply/remove can be any combination of r, w and x (read, write and execute respectively)
entry:
required: false
default: null
description:
- DEPRECATED. The acl to set or remove. This must always be quoted in the form of '<etype>:<qualifier>:<perms>'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields.
author: "Brian Coca (@bcoca)"
notes:
- The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed.
'''
EXAMPLES = '''
# Grant user Joe read access to a file
- acl: name=/etc/foo.conf entity=joe etype=user permissions="r" state=present
# Removes the acl for Joe on a specific file
- acl: name=/etc/foo.conf entity=joe etype=user state=absent
# Sets default acl for joe on foo.d
- acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present
# Same as previous but using entry shorthand
- acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present
# Obtain the acl for a specific file
- acl: name=/etc/foo.conf
register: acl_info
'''
RETURN = '''
acl:
description: Current acl on provided path (after changes, if any)
returned: success
type: list
sample: [ "user::rwx", "group::rwx", "other::rwx" ]
'''
def normalize_permissions(p):
perms = ['-','-','-']
for char in p:
if char == 'r':
perms[0] = 'r'
if char == 'w':
perms[1] = 'w'
if char == 'x':
perms[2] = 'x'
if char == 'X':
if perms[2] != 'x': # 'x' is more permissive
perms[2] = 'X'
return ''.join(perms)
def split_entry(entry):
''' splits entry and ensures normalized return'''
a = entry.split(':')
a.reverse()
if len(a) == 3:
a.append(False)
try:
p,e,t,d = a
except ValueError, e:
print "wtf?? %s => %s" % (entry,a)
raise e
if d:
d = True
if t.startswith("u"):
t = "user"
elif t.startswith("g"):
t = "group"
elif t.startswith("m"):
t = "mask"
elif t.startswith("o"):
t = "other"
else:
t = None
p = normalize_permissions(p)
return [d,t,e,p]
def get_acls(module,path,follow):
cmd = [ module.get_bin_path('getfacl', True) ]
if not follow:
cmd.append('-h')
# prevents absolute path warnings and removes headers
cmd.append('--omit-header')
cmd.append('--absolute-names')
cmd.append(path)
return _run_acl(module,cmd)
def set_acl(module,path,entry,follow,default):
cmd = [ module.get_bin_path('setfacl', True) ]
if not follow:
cmd.append('-h')
if default:
cmd.append('-d')
cmd.append('-m "%s"' % entry)
cmd.append(path)
return _run_acl(module,cmd)
def rm_acl(module,path,entry,follow,default):
cmd = [ module.get_bin_path('setfacl', True) ]
if not follow:
cmd.append('-h')
if default:
cmd.append('-k')
entry = entry[0:entry.rfind(':')]
cmd.append('-x "%s"' % entry)
cmd.append(path)
return _run_acl(module,cmd,False)
def _run_acl(module,cmd,check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception, e:
module.fail_json(msg=e.strerror)
# trim last line as it is always empty
ret = out.splitlines()
return ret[0:len(ret)-1]
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True,aliases=['path'], type='str'),
entry = dict(required=False, etype='str'),
entity = dict(required=False, type='str', default=''),
etype = dict(required=False, choices=['other', 'user', 'group', 'mask'], type='str'),
permissions = dict(required=False, type='str'),
state = dict(required=False, default='query', choices=[ 'query', 'present', 'absent' ], type='str'),
follow = dict(required=False, type='bool', default=True),
default= dict(required=False, type='bool', default=False),
),
supports_check_mode=True,
)
path = os.path.expanduser(module.params.get('name'))
entry = module.params.get('entry')
entity = module.params.get('entity')
etype = module.params.get('etype')
permissions = module.params.get('permissions')
state = module.params.get('state')
follow = module.params.get('follow')
default = module.params.get('default')
if permissions:
permissions = normalize_permissions(permissions)
if not os.path.exists(path):
module.fail_json(msg="path not found or not accessible!")
if state in ['present','absent']:
if not entry and not etype:
module.fail_json(msg="%s requires either etype and permissions or just entry be set" % state)
if entry:
if etype or entity or permissions:
module.fail_json(msg="entry and another incompatible field (entity, etype or permissions) are also set")
if entry.count(":") not in [2,3]:
module.fail_json(msg="Invalid entry: '%s', it requires 3 or 4 sections divided by ':'" % entry)
default, etype, entity, permissions = split_entry(entry)
changed=False
msg = ""
currentacls = get_acls(module,path,follow)
if (state == 'present'):
matched = False
for oldentry in currentacls:
if oldentry.count(":") == 0:
continue
old_default, old_type, old_entity, old_permissions = split_entry(oldentry)
if old_default == default:
if old_type == etype:
if etype in ['user', 'group']:
if old_entity == entity:
matched = True
if not old_permissions == permissions:
changed = True
break
else:
matched = True
if not old_permissions == permissions:
changed = True
break
if not matched:
changed=True
if changed and not module.check_mode:
set_acl(module,path,':'.join([etype, str(entity), permissions]),follow,default)
msg="%s is present" % ':'.join([etype, str(entity), permissions])
elif state == 'absent':
for oldentry in currentacls:
if oldentry.count(":") == 0:
continue
old_default, old_type, old_entity, old_permissions = split_entry(oldentry)
if old_default == default:
if old_type == etype:
if etype in ['user', 'group']:
if old_entity == entity:
changed=True
break
else:
changed=True
break
if changed and not module.check_mode:
rm_acl(module,path,':'.join([etype, entity, '---']),follow,default)
msg="%s is absent" % ':'.join([etype, entity, '---'])
else:
msg="current acl"
if changed:
currentacls = get_acls(module,path,follow)
module.exit_json(changed=changed, msg=msg, acl=currentacls)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -573,065,690,727,570,600 | 30.954248 | 341 | 0.595521 | false |
tashaxe/Red-DiscordBot | lib/discord/ext/commands/bot.py | 17 | 27409 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import discord
import inspect
import importlib
import sys
import traceback
import re
from .core import GroupMixin, Command, command
from .view import StringView
from .context import Context
from .errors import CommandNotFound, CommandError
from .formatter import HelpFormatter
def _get_variable(name):
stack = inspect.stack()
try:
for frames in stack:
try:
frame = frames[0]
current_locals = frame.f_locals
if name in current_locals:
return current_locals[name]
finally:
del frame
finally:
del stack
def when_mentioned(bot, msg):
"""A callable that implements a command prefix equivalent
to being mentioned, e.g. ``@bot ``."""
server = msg.server
if server is not None:
return '{0.me.mention} '.format(server)
return '{0.user.mention} '.format(bot)
def when_mentioned_or(*prefixes):
"""A callable that implements when mentioned or other prefixes provided.
Example
--------
.. code-block:: python
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
See Also
----------
:func:`when_mentioned`
"""
def inner(bot, msg):
r = list(prefixes)
r.append(when_mentioned(bot, msg))
return r
return inner
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
@asyncio.coroutine
def _default_help_command(ctx, *commands : str):
"""Shows this message."""
bot = ctx.bot
destination = ctx.message.author if bot.pm_help else ctx.message.channel
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(commands) == 0:
pages = bot.formatter.format_help_for(ctx, bot)
elif len(commands) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, commands[0])
command = None
if name in bot.cogs:
command = bot.cogs[name]
else:
command = bot.commands.get(name)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(name))
return
pages = bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, commands[0])
command = bot.commands.get(name)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(name))
return
for key in commands[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.commands.get(key)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(key))
return
except AttributeError:
yield from bot.send_message(destination, bot.command_has_no_subcommands.format(command, key))
return
pages = bot.formatter.format_help_for(ctx, command)
if bot.pm_help is None:
characters = sum(map(lambda l: len(l), pages))
# modify destination based on length of pages.
if characters > 1000:
destination = ctx.message.author
for page in pages:
yield from bot.send_message(destination, page)
class Bot(GroupMixin, discord.Client):
"""Represents a discord bot.
This class is a subclass of :class:`discord.Client` and as a result
anything that you can do with a :class:`discord.Client` you can do with
this bot.
This class also subclasses :class:`GroupMixin` to provide the functionality
to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`discord.Message` as its second
parameter and returns the prefix. This is to facilitate "dynamic"
command prefixes. This callable can be either a regular function or
a coroutine.
The command prefix could also be a list or a tuple indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`Context.prefix`.
description : str
The content prefixed into the default help message.
self_bot : bool
If ``True``, the bot will only listen to commands invoked by itself rather
than ignoring itself. If ``False`` (the default) then the bot will ignore
itself. This cannot be changed once initialised.
formatter : :class:`HelpFormatter`
The formatter used to format the help message. By default, it uses a
the :class:`HelpFormatter`. Check it for more info on how to override it.
If you want to change the help command completely (add aliases, etc) then
a call to :meth:`remove_command` with 'help' as the argument would do the
trick.
pm_help : Optional[bool]
A tribool that indicates if the help command should PM the user instead of
sending it to the channel it received it from. If the boolean is set to
``True``, then all help output is PM'd. If ``False``, none of the help
output is PM'd. If ``None``, then the bot will only PM when the help
message becomes too long (dictated by more than 1000 characters).
Defaults to ``False``.
help_attrs : dict
A dictionary of options to pass in for the construction of the help command.
This allows you to change the command behaviour without actually changing
the implementation of the command. The attributes will be the same as the
ones passed in the :class:`Command` constructor. Note that ``pass_context``
will always be set to ``True`` regardless of what you pass in.
command_not_found : str
The format string used when the help command is invoked with a command that
is not found. Useful for i18n. Defaults to ``"No command called {} found."``.
The only format argument is the name of the command passed.
command_has_no_subcommands : str
The format string used when the help command is invoked with requests for a
subcommand but the command does not have any subcommands. Defaults to
``"Command {0.name} has no subcommands."``. The first format argument is the
:class:`Command` attempted to get a subcommand and the second is the name.
"""
def __init__(self, command_prefix, formatter=None, description=None, pm_help=False, **options):
super().__init__(**options)
self.command_prefix = command_prefix
self.extra_events = {}
self.cogs = {}
self.extensions = {}
self._checks = []
self.description = inspect.cleandoc(description) if description else ''
self.pm_help = pm_help
self.command_not_found = options.pop('command_not_found', 'No command called "{}" found.')
self.command_has_no_subcommands = options.pop('command_has_no_subcommands', 'Command {0.name} has no subcommands.')
self._skip_check = discord.User.__ne__ if options.pop('self_bot', False) else discord.User.__eq__
self.help_attrs = options.pop('help_attrs', {})
self.help_attrs['pass_context'] = True
if 'name' not in self.help_attrs:
self.help_attrs['name'] = 'help'
if formatter is not None:
if not isinstance(formatter, HelpFormatter):
raise discord.ClientException('Formatter must be a subclass of HelpFormatter')
self.formatter = formatter
else:
self.formatter = HelpFormatter()
# pay no mind to this ugliness.
self.command(**self.help_attrs)(_default_help_command)
# internal helpers
@asyncio.coroutine
def _get_prefix(self, message):
prefix = self.command_prefix
if callable(prefix):
ret = prefix(self, message)
if asyncio.iscoroutine(ret):
ret = yield from ret
return ret
else:
return prefix
@asyncio.coroutine
def _run_extra(self, coro, event_name, *args, **kwargs):
try:
yield from coro(*args, **kwargs)
except asyncio.CancelledError:
pass
except Exception:
try:
yield from self.on_error(event_name, *args, **kwargs)
except asyncio.CancelledError:
pass
def dispatch(self, event_name, *args, **kwargs):
super().dispatch(event_name, *args, **kwargs)
ev = 'on_' + event_name
if ev in self.extra_events:
for event in self.extra_events[ev]:
coro = self._run_extra(event, event_name, *args, **kwargs)
discord.compat.create_task(coro, loop=self.loop)
@asyncio.coroutine
def close(self):
for extension in tuple(self.extensions):
try:
self.unload_extension(extension)
except:
pass
for cog in tuple(self.cogs):
try:
self.remove_cog(cog)
except:
pass
yield from super().close()
@asyncio.coroutine
def on_command_error(self, exception, context):
"""|coro|
The default command error handler provided by the bot.
By default this prints to ``sys.stderr`` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get('on_command_error', None):
return
if hasattr(context.command, "on_error"):
return
print('Ignoring exception in command {}'.format(context.command), file=sys.stderr)
traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
# utility "send_*" functions
@asyncio.coroutine
def _augmented_msg(self, coro, **kwargs):
msg = yield from coro
delete_after = kwargs.get('delete_after')
if delete_after is not None:
@asyncio.coroutine
def delete():
yield from asyncio.sleep(delete_after, loop=self.loop)
yield from self.delete_message(msg)
discord.compat.create_task(delete(), loop=self.loop)
return msg
def say(self, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_message(message.channel, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_message`
"""
destination = _get_variable('_internal_channel')
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_message(destination, *args, **kwargs)
return self._augmented_msg(coro, **params)
def whisper(self, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_message(message.author, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_message`
"""
destination = _get_variable('_internal_author')
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_message(destination, *args, **kwargs)
return self._augmented_msg(coro, **params)
def reply(self, content, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
msg = '{0.mention}, {1}'.format(message.author, content)
self.send_message(message.channel, msg, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_message`
"""
author = _get_variable('_internal_author')
destination = _get_variable('_internal_channel')
fmt = '{0.mention}, {1}'.format(author, str(content))
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_message(destination, fmt, *args, **kwargs)
return self._augmented_msg(coro, **params)
def upload(self, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_file(message.channel, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_file`
"""
destination = _get_variable('_internal_channel')
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_file(destination, *args, **kwargs)
return self._augmented_msg(coro, **params)
def type(self):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_typing(message.channel)
See Also
---------
The :meth:`Client.send_typing` function.
"""
destination = _get_variable('_internal_channel')
return self.send_typing(destination)
# global check registration
def check(self, func):
"""A decorator that adds a global check to the bot.
A global check is similar to a :func:`check` that is applied
on a per command basis except it is run before any command checks
have been verified and applies to every command the bot has.
.. warning::
This function must be a *regular* function and not a coroutine.
Similar to a command :func:`check`\, this takes a single parameter
of type :class:`Context` and can only raise exceptions derived from
:exc:`CommandError`.
Example
---------
.. code-block:: python
@bot.check
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func)
return func
def add_check(self, func):
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`check`.
Parameters
-----------
func
The function that was used as a global check.
"""
self._checks.append(func)
def remove_check(self, func):
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
Parameters
-----------
func
The function to remove from the global checks.
"""
try:
self._checks.remove(func)
except ValueError:
pass
def can_run(self, ctx):
return all(f(ctx) for f in self._checks)
# listener registration
def add_listener(self, func, name=None):
"""The non decorator alternative to :meth:`listen`.
Parameters
-----------
func : coroutine
The extra event to listen to.
name : Optional[str]
The name of the command to use. Defaults to ``func.__name__``.
Example
--------
.. code-block:: python
async def on_ready(): pass
async def my_message(message): pass
bot.add_listener(on_ready)
bot.add_listener(my_message, 'on_message')
"""
name = func.__name__ if name is None else name
if not asyncio.iscoroutinefunction(func):
raise discord.ClientException('Listeners must be coroutines')
if name in self.extra_events:
self.extra_events[name].append(func)
else:
self.extra_events[name] = [func]
def remove_listener(self, func, name=None):
"""Removes a listener from the pool of listeners.
Parameters
-----------
func
The function that was used as a listener to remove.
name
The name of the event we want to remove. Defaults to
``func.__name__``.
"""
name = func.__name__ if name is None else name
if name in self.extra_events:
try:
self.extra_events[name].remove(func)
except ValueError:
pass
def listen(self, name=None):
"""A decorator that registers another function as an external
event listener. Basically this allows you to listen to multiple
events from different places e.g. such as :func:`discord.on_ready`
The functions being listened to must be a coroutine.
Example
--------
.. code-block:: python
@bot.listen()
async def on_message(message):
print('one')
# in some other file...
@bot.listen('on_message')
async def my_message(message):
print('two')
Would print one and two in an unspecified order.
Raises
-------
discord.ClientException
The function being listened to is not a coroutine.
"""
def decorator(func):
self.add_listener(func, name)
return func
return decorator
# cogs
def add_cog(self, cog):
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
They are meant as a way to organize multiple relevant commands
into a singular class that shares some state or no state at all.
The cog can also have a ``__check`` member function that allows
you to define a global check. See :meth:`check` for more info.
More information will be documented soon.
Parameters
-----------
cog
The cog to register to the bot.
"""
self.cogs[type(cog).__name__] = cog
try:
check = getattr(cog, '_{.__class__.__name__}__check'.format(cog))
except AttributeError:
pass
else:
self.add_check(check)
members = inspect.getmembers(cog)
for name, member in members:
# register commands the cog has
if isinstance(member, Command):
if member.parent is None:
self.add_command(member)
continue
# register event listeners the cog has
if name.startswith('on_'):
self.add_listener(member)
def get_cog(self, name):
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
-----------
name : str
The name of the cog you are requesting.
"""
return self.cogs.get(name)
def remove_cog(self, name):
"""Removes a cog from the bot.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then ``None`` is returned, otherwise
the cog instance that is being removed is returned.
If the cog defines a special member function named ``__unload``
then it is called when removal has completed. This function
**cannot** be a coroutine. It must be a regular function.
Parameters
-----------
name : str
The name of the cog to remove.
"""
cog = self.cogs.pop(name, None)
if cog is None:
return cog
members = inspect.getmembers(cog)
for name, member in members:
# remove commands the cog has
if isinstance(member, Command):
if member.parent is None:
self.remove_command(member.name)
continue
# remove event listeners the cog has
if name.startswith('on_'):
self.remove_listener(member)
try:
check = getattr(cog, '_{0.__class__.__name__}__check'.format(cog))
except AttributeError:
pass
else:
self.remove_check(check)
unloader_name = '_{0.__class__.__name__}__unload'.format(cog)
try:
unloader = getattr(cog, unloader_name)
except AttributeError:
pass
else:
unloader()
del cog
# extensions
def load_extension(self, name):
if name in self.extensions:
return
lib = importlib.import_module(name)
if not hasattr(lib, 'setup'):
del lib
del sys.modules[name]
raise discord.ClientException('extension does not have a setup function')
lib.setup(self)
self.extensions[name] = lib
def unload_extension(self, name):
lib = self.extensions.get(name)
if lib is None:
return
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.cogs.copy().items():
if inspect.getmodule(cog) is lib:
self.remove_cog(cogname)
# first remove all the commands from the module
for command in self.commands.copy().values():
if command.module is lib:
command.module = None
if isinstance(command, GroupMixin):
command.recursively_remove_all_commands()
self.remove_command(command.name)
# then remove all the listeners from the module
for event_list in self.extra_events.copy().values():
remove = []
for index, event in enumerate(event_list):
if inspect.getmodule(event) is lib:
remove.append(index)
for index in reversed(remove):
del event_list[index]
try:
func = getattr(lib, 'teardown')
except AttributeError:
pass
else:
try:
func(self)
except:
pass
finally:
# finally remove the import..
del lib
del self.extensions[name]
del sys.modules[name]
# command processing
@asyncio.coroutine
def process_commands(self, message):
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`on_message`
event. If you choose to override the :func:`on_message` event, then
you should invoke this coroutine as well.
Warning
--------
This function is necessary for :meth:`say`, :meth:`whisper`,
:meth:`type`, :meth:`reply`, and :meth:`upload` to work due to the
way they are written. It is also required for the :func:`on_command`
and :func:`on_command_completion` events.
Parameters
-----------
message : discord.Message
The message to process commands for.
"""
_internal_channel = message.channel
_internal_author = message.author
view = StringView(message.content)
if self._skip_check(message.author, self.user):
return
prefix = yield from self._get_prefix(message)
invoked_prefix = prefix
if not isinstance(prefix, (tuple, list)):
if not view.skip_string(prefix):
return
else:
invoked_prefix = discord.utils.find(view.skip_string, prefix)
if invoked_prefix is None:
return
invoker = view.get_word()
tmp = {
'bot': self,
'invoked_with': invoker,
'message': message,
'view': view,
'prefix': invoked_prefix
}
ctx = Context(**tmp)
del tmp
if invoker in self.commands:
command = self.commands[invoker]
self.dispatch('command', command, ctx)
try:
yield from command.invoke(ctx)
except CommandError as e:
ctx.command.dispatch_error(e, ctx)
else:
self.dispatch('command_completion', command, ctx)
elif invoker:
exc = CommandNotFound('Command "{}" is not found'.format(invoker))
self.dispatch('command_error', exc, ctx)
@asyncio.coroutine
def on_message(self, message):
yield from self.process_commands(message)
| gpl-3.0 | 1,922,032,840,912,959,700 | 30.982497 | 123 | 0.587654 | false |
slank/ansible | lib/ansible/modules/system/hostname.py | 12 | 23676 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Hiroaki Nakamura <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: hostname
author:
- "Adrian Likins (@alikins)"
- "Hideki Saito (@saito-hideki)"
version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- Set system's hostname.
- Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI, Alpine Linux.
- Any distribution that uses systemd as their init system.
- Note, this module does *NOT* modify /etc/hosts. You need to modify it yourself using other modules like template or replace.
options:
name:
required: true
description:
- Name of the host
'''
EXAMPLES = '''
- hostname:
name: web01
'''
import socket
from distutils.version import LooseVersion
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils._text import to_bytes, to_native
class UnimplementedStrategy(object):
def __init__(self, module):
self.module = module
def update_current_and_permanent_hostname(self):
self.unimplemented_error()
def update_current_hostname(self):
self.unimplemented_error()
def update_permanent_hostname(self):
self.unimplemented_error()
def get_current_hostname(self):
self.unimplemented_error()
def set_current_hostname(self, name):
self.unimplemented_error()
def get_permanent_hostname(self):
self.unimplemented_error()
def set_permanent_hostname(self, name):
self.unimplemented_error()
def unimplemented_error(self):
platform = get_platform()
distribution = get_distribution()
if distribution is not None:
msg_platform = '%s (%s)' % (platform, distribution)
else:
msg_platform = platform
self.module.fail_json(
msg='hostname module cannot be used on platform %s' % msg_platform)
class Hostname(object):
"""
This is a generic Hostname manipulation class that is subclassed
based on platform.
A subclass may wish to set different strategy instance to self.strategy.
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
strategy_class = UnimplementedStrategy
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Hostname, args, kwargs)
def __init__(self, module):
self.module = module
self.name = module.params['name']
if self.platform == 'Linux' and Facts(module).is_systemd_managed():
self.strategy = SystemdStrategy(module)
else:
self.strategy = self.strategy_class(module)
def update_current_and_permanent_hostname(self):
return self.strategy.update_current_and_permanent_hostname()
def get_current_hostname(self):
return self.strategy.get_current_hostname()
def set_current_hostname(self, name):
self.strategy.set_current_hostname(name)
def get_permanent_hostname(self):
return self.strategy.get_permanent_hostname()
def set_permanent_hostname(self, name):
self.strategy.set_permanent_hostname(name)
class GenericStrategy(object):
"""
This is a generic Hostname manipulation strategy class.
A subclass may wish to override some or all of these methods.
- get_current_hostname()
- get_permanent_hostname()
- set_current_hostname(name)
- set_permanent_hostname(name)
"""
def __init__(self, module):
self.module = module
self.hostname_cmd = self.module.get_bin_path('hostname', True)
self.changed = False
def update_current_and_permanent_hostname(self):
self.update_current_hostname()
self.update_permanent_hostname()
return self.changed
def update_current_hostname(self):
name = self.module.params['name']
current_name = self.get_current_hostname()
if current_name != name:
self.set_current_hostname(name)
self.changed = True
def update_permanent_hostname(self):
name = self.module.params['name']
permanent_name = self.get_permanent_hostname()
if permanent_name != name:
self.set_permanent_hostname(name)
self.changed = True
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
return None
def set_permanent_hostname(self, name):
pass
# ===========================================
class DebianStrategy(GenericStrategy):
"""
This is a Debian family Hostname manipulation strategy class - it edits
the /etc/hostname file.
"""
HOSTNAME_FILE = '/etc/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class SLESStrategy(GenericStrategy):
"""
This is a SLES Hostname strategy class - it edits the
/etc/HOSTNAME file.
"""
HOSTNAME_FILE = '/etc/HOSTNAME'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class RedHatStrategy(GenericStrategy):
"""
This is a Redhat Hostname strategy class - it edits the
/etc/sysconfig/network file.
"""
NETWORK_FILE = '/etc/sysconfig/network'
def get_permanent_hostname(self):
try:
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
k, v = line.split('=')
return v.strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
lines = []
found = False
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
lines.append("HOSTNAME=%s\n" % name)
found = True
else:
lines.append(line)
finally:
f.close()
if not found:
lines.append("HOSTNAME=%s\n" % name)
f = open(self.NETWORK_FILE, 'w+')
try:
f.writelines(lines)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class AlpineStrategy(GenericStrategy):
"""
This is a Alpine Linux Hostname manipulation strategy class - it edits
the /etc/hostname file then run hostname -F /etc/hostname.
"""
HOSTNAME_FILE = '/etc/hostname'
def update_current_and_permanent_hostname(self):
self.update_permanent_hostname()
self.update_current_hostname()
return self.changed
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, '-F', self.HOSTNAME_FILE]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class SystemdStrategy(GenericStrategy):
"""
This is a Systemd hostname manipulation strategy class - it uses
the hostnamectl command.
"""
def get_current_hostname(self):
cmd = ['hostname']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--transient', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
cmd = ['hostnamectl', '--static', 'status']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--pretty', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
cmd = ['hostnamectl', '--static', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class OpenRCStrategy(GenericStrategy):
"""
This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
the /etc/conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/conf.d/hostname'
def get_permanent_hostname(self):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" % str(err))
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" % str(err))
finally:
f.close()
# ===========================================
class OpenBSDStrategy(GenericStrategy):
"""
This is a OpenBSD family Hostname manipulation strategy class - it edits
the /etc/myname file.
"""
HOSTNAME_FILE = '/etc/myname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class SolarisStrategy(GenericStrategy):
"""
This is a Solaris11 or later Hostname manipulation strategy class - it
execute hostname command.
"""
def set_current_hostname(self, name):
cmd_option = '-t'
cmd = [self.hostname_cmd, cmd_option, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
fmri = 'svc:/system/identity:node'
pattern = 'config/nodename'
cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class FreeBSDStrategy(GenericStrategy):
"""
This is a FreeBSD hostname manipulation strategy class - it edits
the /etc/rc.conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/rc.conf.d/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" % str(err))
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" % str(err))
finally:
f.close()
# ===========================================
class FedoraHostname(Hostname):
platform = 'Linux'
distribution = 'Fedora'
strategy_class = SystemdStrategy
class SLESHostname(Hostname):
platform = 'Linux'
distribution = 'Suse linux enterprise server '
distribution_version = get_distribution_version()
if distribution_version and LooseVersion("10") <= LooseVersion(distribution_version) <= LooseVersion("12"):
strategy_class = SLESStrategy
else:
strategy_class = UnimplementedStrategy
class OpenSUSEHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse '
strategy_class = SystemdStrategy
class ArchHostname(Hostname):
platform = 'Linux'
distribution = 'Arch'
strategy_class = SystemdStrategy
class RedHat5Hostname(Hostname):
platform = 'Linux'
distribution = 'Redhat'
strategy_class = RedHatStrategy
class RedHatServerHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux server'
strategy_class = RedHatStrategy
class RedHatWorkstationHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux workstation'
strategy_class = RedHatStrategy
class CentOSHostname(Hostname):
platform = 'Linux'
distribution = 'Centos'
strategy_class = RedHatStrategy
class CentOSLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Centos linux'
strategy_class = RedHatStrategy
class ScientificHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific'
strategy_class = RedHatStrategy
class ScientificLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux'
strategy_class = RedHatStrategy
class ScientificLinuxCERNHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux cern slc'
strategy_class = RedHatStrategy
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle linux server'
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Amazon'
strategy_class = RedHatStrategy
class DebianHostname(Hostname):
platform = 'Linux'
distribution = 'Debian'
strategy_class = DebianStrategy
class KaliHostname(Hostname):
platform = 'Linux'
distribution = 'Kali'
strategy_class = DebianStrategy
class UbuntuHostname(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
strategy_class = DebianStrategy
class LinuxmintHostname(Hostname):
platform = 'Linux'
distribution = 'Linuxmint'
strategy_class = DebianStrategy
class LinaroHostname(Hostname):
platform = 'Linux'
distribution = 'Linaro'
strategy_class = DebianStrategy
class GentooHostname(Hostname):
platform = 'Linux'
distribution = 'Gentoo base system'
strategy_class = OpenRCStrategy
class ALTLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Altlinux'
strategy_class = RedHatStrategy
class AlpineLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Alpine'
strategy_class = AlpineStrategy
class OpenBSDHostname(Hostname):
platform = 'OpenBSD'
distribution = None
strategy_class = OpenBSDStrategy
class SolarisHostname(Hostname):
platform = 'SunOS'
distribution = None
strategy_class = SolarisStrategy
class FreeBSDHostname(Hostname):
platform = 'FreeBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NetBSDHostname(Hostname):
platform = 'NetBSD'
distribution = None
strategy_class = FreeBSDStrategy
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True)
)
)
hostname = Hostname(module)
name = module.params['name']
changed = hostname.update_current_and_permanent_hostname()
module.exit_json(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
if __name__ == '__main__':
main()
| gpl-3.0 | -5,561,625,591,487,425,000 | 30.31746 | 130 | 0.559765 | false |
qtekfun/htcDesire820Kernel | external/chromium_org/chrome/test/mini_installer/verifier.py | 85 | 2063 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Verifier:
"""Verifies that the current machine states match the expectation."""
def VerifyInput(self, verifier_input, variable_expander):
"""Verifies that the current machine states match |verifier_input|.
Args:
verifier_input: An input to the verifier. It is a dictionary where each
key is an expectation name and the associated value is an expectation
dictionary. The expectation dictionary may contain an optional
'condition' property, a string that determines whether the expectation
should be verified. Each subclass can specify a different expectation
name and expectation dictionary.
variable_expander: A VariableExpander object.
"""
for expectation_name, expectation in verifier_input.iteritems():
if 'condition' in expectation:
condition = variable_expander.Expand(expectation['condition'])
if not self._EvaluateCondition(condition):
continue
self._VerifyExpectation(expectation_name, expectation, variable_expander)
def _VerifyExpectation(self, expectation_name, expectation,
variable_expander):
"""Verifies that the current machine states match |verifier_input|.
This is an abstract method for subclasses to override.
Args:
expectation_name: An expectation name. Each subclass can specify a
different expectation name format.
expectation: An expectation dictionary. Each subclass can specify a
different expectation dictionary format.
variable_expander: A VariableExpander object.
"""
raise NotImplementedError()
def _EvaluateCondition(self, condition):
"""Evaluates |condition| using eval().
Args:
condition: A condition string.
Returns:
The result of the evaluated condition.
"""
return eval(condition, {'__builtins__': None}, None)
| gpl-2.0 | -8,644,935,867,121,428,000 | 38.673077 | 80 | 0.705768 | false |
jpike88/crosswalk | tools/reflection_generator/java_method.py | 3 | 29922 | #!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from collections import OrderedDict
from string import Template
def ConvertClassExpressionToClassType(class_name):
""" Turn "final HashMap<String>" to HashMap.class. """
return '%s.class' % class_name.split()[-1].split('<')[0]
def ConvertPrimitiveTypeToObject(class_name):
primitive_map = {
'byte': 'Byte',
'short': 'Short',
'int': 'Integer',
'long': 'Long',
'float': 'Float',
'double': 'Double',
'char': 'Character',
'boolean': 'Boolean',
}
return primitive_map.get(class_name, class_name)
class ParamType(object):
"""Internal representation of the type of a parameter of a method."""
def __init__(self, expression, class_loader):
self._expression = expression
self._modifier = ''
self._generic_type = ''
self._generic_type_parameters = []
self._contains_internal_class = False
self.ParseType(class_loader)
self._contains_internal_class = self._contains_internal_class or\
class_loader.IsInternalClass(self._generic_type)
def ParseType(self, class_loader):
param_type_re = re.compile('(?P<modifier>(\w+ )*)'
'(?P<generic>(\w+))(?P<type_params>(<.*>)?)')
for match in re.finditer(param_type_re, self._expression):
self._modifier = match.group('modifier')
self._generic_type = match.group('generic')
type_params = match.group('type_params')
if len(type_params) > 1:
type_params = type_params[1:-1]
self._generic_type_parameters = [ParamType(param.strip(),
class_loader) for param in type_params.split(',')]
for type_param in self._generic_type_parameters:
if self.generic_type == 'ValueCallback':
print 'value callback with %s' % type_param.generic_type
if type_param.contains_internal_class:
self._contains_internal_class = True
break
@property
def expression(self):
return self._expression
@property
def modifier(self):
return self._modifier
@property
def generic_type(self):
return self._generic_type
@property
def generic_type_parameters(self):
return self._generic_type_parameters
@property
def contains_internal_class(self):
return self._contains_internal_class
class ParamStringType(object):
INTERNAL_DECLARE = 1
BRIDGE_DECLARE = 2
BRIDGE_DECLARE_FOR_WRAPPER = 3
BRIDGE_PASS_TO_SUPER = 4
BRIDGE_PASS_TO_WRAPPER = 5
INTERNAL_PASS_TO_BRIDGE = 6
BRIDGE_OVERRIDE_CONDITION = 7
WRAPPER_DECLARE = 8
WRAPPER_DECLARE_FOR_BRIDGE = 9
WRAPPER_PASS_TO_BRIDGE = 10
class MethodStringType(object):
BRIDGE_CONSTRUCTOR = 1
BRIDGE_STATIC = 2
BRIDGE_SUPER = 3
BRIDGE_OVERRIDE = 4
BRIDGE_WRAPPER = 5
WRAPPER_CONSTRUCTOR = 6
WRAPPER_STATIC = 7
WRAPPER_BRIDGE = 8
WRAPPER_INTERFACE = 9
class Method(object):
"""Internal representaion of a method."""
ANNOTATION_PRE_WRAPLINE = 'preWrapperLines'
ANNOTATION_POST_WRAPLINE = 'postWrapperLines'
def __init__(self, class_name, class_loader,
is_constructor, is_static, is_abstract,
method_name, method_return, params, annotation, doc=''):
self._class_name = class_name
self._class_loader = class_loader
self._is_constructor = is_constructor
self._is_static = is_static
self._is_abstract = is_abstract
self._method_name = method_name
self._method_return = method_return
self._params = OrderedDict() # Use OrderedDict to avoid parameter misorder.
self._typed_params = OrderedDict()
self._method_annotations = {}
self._method_doc = doc
self._class_java_data = ''
self._method_declare_name = ''
self._internal_params_declare = ''
self._bridge_params_declare = ''
self._bridge_params_declare_for_wrapper = ''
self._bridge_params_pass_to_super = ''
self._bridge_params_pass_to_wrapper = ''
self._internal_params_pass_to_bridge = ''
self._bridge_override_condition = ''
self._wrapper_params_declare = ''
self._wrapper_params_declare_for_bridge = ''
self._wrapper_params_pass_to_bridge = ''
self._is_reservable = False
self.ParseMethodParams(params)
self.ParseMethodAnnotation(annotation)
def IsInternalClass(self, clazz):
return self._class_loader.IsInternalClass(clazz)
def GetJavaData(self, clazz):
return self._class_loader.GetJavaData(clazz)
def GenerateDoc(self, doc):
return self._class_loader.GenerateDoc(doc)
@property
def is_constructor(self):
return self._is_constructor
@property
def is_static(self):
return self._is_static
@property
def is_abstract(self):
return self._is_abstract
@property
def is_reservable(self):
return self._is_reservable
@property
def method_name(self):
return self._method_name
@property
def method_return(self):
return self._method_return
@property
def params(self):
return self._params
@property
def typed_params(self):
return self._typed_params
@property
def method_annotations(self):
return self._method_annotations
@property
def method_doc(self):
return self._method_doc
def ParseMethodParams(self, params):
# TODO(shouqun): Currently, generic parameters are not supported.
# The support of generic types should be added if such cases happen.
if not params or params == '':
return
for param in params.split(','):
param = param.strip()
param_list = param.split()
param_type = ' '.join(param_list[:-1]) # To handle modifiers
param_name = param_list[-1]
self._params[param_name] = param_type
self._typed_params[param_name] = ParamType(param_type, self._class_loader)
def ParseMethodAnnotation(self, annotation):
if annotation.find('reservable = true') >= 0:
self._is_reservable = True
pre_wrapline_re = re.compile('preWrapperLines\s*=\s*\{\s*('
'?P<pre_wrapline>(".*")(,\s*".*")*)\s*\}')
for match in re.finditer(pre_wrapline_re, annotation):
pre_wrapline = self.FormatWrapperLine(match.group('pre_wrapline'))
self._method_annotations[self.ANNOTATION_PRE_WRAPLINE] = pre_wrapline
post_wrapline_re = re.compile('postWrapperLines\s*=\s*\{\s*('
'?P<post_wrapline>(".*")(,\s*".*")*)\s*\}')
for match in re.finditer(post_wrapline_re, annotation):
post_wrapline = self.FormatWrapperLine(match.group('post_wrapline'))
self._method_annotations[self.ANNOTATION_POST_WRAPLINE] = post_wrapline
def FormatWrapperLine(self, annotation_value):
""" annotaion_value is a java string array which each element is an
individual line. Probably like: ' "line1",\n "line2"'
This method is turnning it to ' line1\n line2'
"""
lines = []
exec('lines = [%s]' % annotation_value.replace('\n', ''))
template = Template('\n'.join(lines))
values = {}
for arg in range(1, len(self.params.keys())+1):
values['param%d' % arg] = self.params.keys()[arg-1]
return template.substitute(values)
def PrepareStrings(self):
self._class_java_data = self.GetJavaData(self._class_name)
self._method_declare_name = self.GenerateMethodDeclareName()
self._internal_params_declare = ', '.join(
self.GetFormattedParamArray(ParamStringType.INTERNAL_DECLARE))
self._bridge_params_declare = ', '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_DECLARE))
self._bridge_params_declare_for_wrapper = ', '.join(
self.GetFormattedParamArray(
ParamStringType.BRIDGE_DECLARE_FOR_WRAPPER, insert_empty=True))
self._bridge_params_pass_to_super = ', '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_PASS_TO_SUPER))
self._bridge_params_pass_to_wrapper = ', '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_PASS_TO_WRAPPER))
self._internal_params_pass_to_bridge = ', '.join(
self.GetFormattedParamArray(ParamStringType.INTERNAL_PASS_TO_BRIDGE))
self._bridge_override_condition = ' && '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_OVERRIDE_CONDITION))
self._wrapper_params_declare = ', '.join(
self.GetFormattedParamArray(ParamStringType.WRAPPER_DECLARE))
self._wrapper_params_declare_for_bridge = ', '.join(
self.GetFormattedParamArray(
ParamStringType.WRAPPER_DECLARE_FOR_BRIDGE, insert_empty=True))
self._wrapper_params_pass_to_bridge = ', '.join(
self.GetFormattedParamArray(ParamStringType.WRAPPER_PASS_TO_BRIDGE))
def GetFormattedParamArray(self, param_string_type,
append_empty=False, insert_empty=False):
""" Return the array of params with specified format.
append or insert an empty string on demand for cases
that need extra splitter when using the array.
"""
formatted_params = []
for param_name in self._params:
param_type = self._params[param_name]
formatted_param = self.FormatSingleParam(
param_type, param_name, param_string_type)
if formatted_param:
formatted_params.append(formatted_param)
if append_empty:
formatted_params.append('')
if insert_empty:
formatted_params.insert(0, '')
return formatted_params
def FormatSingleParam(self, param_type, param_name, param_string_type):
is_internal_class = self.IsInternalClass(param_type)
if is_internal_class:
java_data = self.GetJavaData(param_type)
typed_param = self._typed_params[param_name]
if param_string_type == ParamStringType.INTERNAL_DECLARE:
# the way internal declares its params, will be used in bridge's override
# call.
# XWalkViewInternal view => XWalkViewInternal view
return '%s %s' % (param_type, param_name)
elif param_string_type == ParamStringType.BRIDGE_DECLARE:
# the way bridge declares its params, will be used in bridge's wrapper
# call and super call.
# XWalkViewInternal view => XWalkViewBridge view
if is_internal_class:
return '%s %s'% (java_data.GetBridgeName(), param_name)
else:
return '%s %s' % (param_type, param_name)
elif param_string_type == ParamStringType.BRIDGE_DECLARE_FOR_WRAPPER:
# the way bridge declares its params for wrapper, will turn the param
# type to class<?> value for reflection to use.
# XWalkViewInternal view => coreBridge.getWrapperClass("XWalkView")
# DirectionInternal direnction =>
# coreBridge.getWrapperClass("XWalkView$Direction")
# String name => String.class
if is_internal_class:
return 'coreBridge.getWrapperClass("%s")' % java_data.GetWrapperName()
else:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
if param_type in self._class_java_data.enums:
return ('coreBridge.getWrapperClass("%s")' %
self._class_java_data.GetWrapperName(param_type))
else:
return ConvertClassExpressionToClassType(param_type)
elif param_string_type == ParamStringType.BRIDGE_PASS_TO_SUPER:
# the way bridge passes the param to super
# XWalkViewInternal view => view
if is_internal_class:
return java_data.UseAsInstanceInBridgeSuperCall(param_name)
else:
return param_name
elif param_string_type == ParamStringType.BRIDGE_PASS_TO_WRAPPER:
# the way bridge passes the param to wrapper
# XWalkViewInternal view => view.getWrapper()
# DirectionInternal direction => ConvertDirectionInternal(direction)
if is_internal_class:
return java_data.UseAsInstanceInBridgeCall(param_name)
elif (typed_param.generic_type == 'ValueCallback' and
typed_param.contains_internal_class):
assert len(typed_param.generic_type_parameters) == 1
internal_generic_type_param = typed_param.generic_type_parameters[0]
internal_generic_type_class = self.GetJavaData(
internal_generic_type_param.generic_type)
return ('new ValueCallback<Object>() {\n' +
' @Override\n' +
' public void onReceiveValue(Object value) {\n' +
' %sFinal.onReceiveValue((%s) ' % (
param_name, internal_generic_type_class.bridge_name) +
'coreBridge.getBridgeObject(value));\n' +
' }\n' +
' }')
else:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
if param_type in self._class_java_data.enums:
return 'Convert%s(%s)' % (param_type, param_name)
else:
return param_name
elif param_string_type == ParamStringType.INTERNAL_PASS_TO_BRIDGE:
# the way bridge accepts param from internal
# XWalkViewInternal view => (XWalkViewBridge) view
if is_internal_class:
return java_data.UseAsInstanceInBridgeOverrideCall(param_name)
else:
return param_name
elif param_string_type == ParamStringType.BRIDGE_OVERRIDE_CONDITION:
# the way bridge uses as the condition for whether call super or
# call wrapper in override call
# XWalkViewInternal view => (view instanceof XWalkViewBridge)
if (is_internal_class and
not java_data.HasInstanceCreateInternallyAnnotation()):
return'(%s instanceof %s)' % (param_name, java_data.GetBridgeName())
else:
return None
elif param_string_type == ParamStringType.WRAPPER_DECLARE:
# the way wrapper declare the param
# XWalkViewInternal view => XWalkView view
# DirectionInternal direction => Direction direction
if is_internal_class:
return '%s %s' % (java_data.UseAsTypeInWrapperCall(), param_name)
elif param_type in self._class_java_data.enums:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
return '%s %s' % (param_type.replace('Internal', ''), param_name)
else:
return '%s %s' % (param_type, param_name)
elif param_string_type == ParamStringType.WRAPPER_DECLARE_FOR_BRIDGE:
# the way wrapper declares its params for bridge, will turn the param
# type to class<?> value for reflection to use.
# XWalkViewInternal view =>
# coreWrapper.getBridgeClass("XWalkViewBridge")
# DirectionInternal direction => enumDirectionClass
# String name => String.class
# TODO(wang16): Currently there is no internal classes for static method.
# Need to support it in future.
if is_internal_class:
return 'coreWrapper.getBridgeClass("%s")' % java_data.GetBridgeName()
else:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
enums = self._class_java_data.enums
if param_type in enums:
return ('coreWrapper.getBridgeClass("%s")' %
self._class_java_data.GetBridgeName(param_type))
else:
return ConvertClassExpressionToClassType(param_type)
elif param_string_type == ParamStringType.WRAPPER_PASS_TO_BRIDGE:
# the way wrapper passes param to bridge
# XWalkViewInternal view => view.getBridge()
# DirectionInternal direction => ConvertDirection(direction)
if is_internal_class:
return java_data.UseAsInstanceInWrapperCall(param_name)
elif param_type in self._class_java_data.enums:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
return 'Convert%s(%s)' % (param_type.replace('Internal', ''),
param_name)
else:
return param_name
else:
pass
def GenerateMethodDeclareName(self):
name = self.method_name
for param_name in self.params:
# Remove modifier and generic type.
name += ConvertClassExpressionToClassType(
self.params[param_name]).replace('.class', '')
if self._is_constructor:
return '%sConstructor' % name
else:
return '%sMethod' % name
def GenerateBridgeConstructor(self):
template = Template("""\
public ${NAME}(${PARAMS}, Object wrapper) {
super(${PARAMS_PASSING});
this.wrapper = wrapper;
reflectionInit();
}
""")
value = {'NAME': self._class_java_data.bridge_name,
'PARAMS': self._bridge_params_declare,
'PARAMS_PASSING': self._bridge_params_pass_to_super}
return template.substitute(value)
def GenerateBridgeStaticMethod(self):
template = Template("""\
public static ${RETURN_TYPE} ${NAME}($PARAMS) {
${RETURN}${CLASS_NAME}.${NAME}(${PARAMS_PASSING});
}
""")
value = {'RETURN_TYPE': self.method_return,
'NAME': self.method_name,
'PARAMS': self._bridge_params_declare,
'RETURN': '' if self._method_return == 'void' else 'return ',
'CLASS_NAME': self._class_name,
'PARAMS_PASSING': self._bridge_params_pass_to_super}
return template.substitute(value)
def GenerateBridgeOverrideMethod(self):
if not self._bridge_override_condition:
return ' @Override'
template = Template("""\
@Override
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
if (${IF_CONDITION}) {
${RETURN}${NAME}(${BRIDGE_PARAMS_PASSING});
} else {
${RETURN}super.${NAME}(${PARAMS_PASSING});
}
}
""")
value = {'NAME': self.method_name,
'RETURN_TYPE': self.method_return,
'PARAMS': self._internal_params_declare,
'RETURN': '' if self._method_return == 'void' else 'return ',
'IF_CONDITION': self._bridge_override_condition,
'PARAMS_PASSING': self._bridge_params_pass_to_super,
'BRIDGE_PARAMS_PASSING': self._internal_params_pass_to_bridge}
return template.substitute(value)
def GenerateBridgeWrapperMethod(self):
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
if return_is_internal:
template = Template("""\
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
${GENERIC_TYPE_DECLARE}${RETURN}coreBridge.getBridgeObject(\
${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING}));
}
""")
else :
template = Template("""\
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
${GENERIC_TYPE_DECLARE}${RETURN}${METHOD_DECLARE_NAME}.invoke(\
${PARAMS_PASSING});
}
""")
if self._method_return == 'void':
return_statement = ''
elif return_is_internal:
return_statement = 'return (%s)' % return_type_java_data.bridge_name
else:
return_statement = ('return (%s)' %
ConvertPrimitiveTypeToObject(self.method_return))
# Handling generic types, current only ValueCallback will be handled.
generic_type_declare = ''
for param_name in self._typed_params:
typed_param = self._typed_params[param_name]
if typed_param.generic_type != 'ValueCallback':
continue
if typed_param.contains_internal_class:
generic_type_declare += 'final %s %sFinal = %s;\n ' % (
typed_param.expression, param_name, param_name)
value = {'RETURN_TYPE': self.method_return,
'NAME': self.method_name,
'METHOD_DECLARE_NAME': self._method_declare_name,
'PARAMS': self._bridge_params_declare,
'RETURN': return_statement,
'GENERIC_TYPE_DECLARE': generic_type_declare,
'PARAMS_PASSING': self._bridge_params_pass_to_wrapper}
return template.substitute(value)
def GenerateBridgeSuperMethod(self):
no_return_value = self._method_return == 'void'
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
if self._is_abstract:
return ''
if self._class_java_data.HasCreateInternallyAnnotation():
if no_return_value:
template = Template("""\
public void ${NAME}Super(${PARAMS}) {
if (internal == null) {
super.${NAME}(${PARAM_PASSING});
} else {
internal.${NAME}(${PARAM_PASSING});
}
}
""")
else:
template = Template("""\
public ${RETURN_TYPE} ${NAME}Super(${PARAMS}) {
${INTERNAL_RETURN_TYPE} ret;
if (internal == null) {
ret = super.${NAME}(${PARAM_PASSING});
} else {
ret = internal.${NAME}(${PARAM_PASSING});
}
${IF_NULL_RETURN_NULL}
return ${RETURN_VALUE};
}
""")
else:
if no_return_value:
template = Template("""\
public void ${NAME}Super(${PARAMS}) {
super.${NAME}(${PARAM_PASSING});
}
""")
else:
template = Template("""\
public ${RETURN_TYPE} ${NAME}Super(${PARAMS}) {
${INTERNAL_RETURN_TYPE} ret;
ret = super.${NAME}(${PARAM_PASSING});
${IF_NULL_RETURN_NULL}
return ${RETURN_VALUE};
}
""")
if return_is_internal:
return_value = return_type_java_data.UseAsReturnInBridgeSuperCall('ret')
method_return = return_type_java_data.bridge_name
else:
return_value = 'ret'
method_return = self._method_return
if ConvertPrimitiveTypeToObject(method_return) != method_return:
# it's returning prmitive type, so it can't be null.
if_null_return_null = ''
else:
if_null_return_null = 'if (ret == null) return null;'
value = {
'RETURN_TYPE': method_return,
'INTERNAL_RETURN_TYPE': self.method_return,
'NAME': self.method_name,
'PARAM_PASSING': self._bridge_params_pass_to_super,
'PARAMS': self._bridge_params_declare,
'IF_NULL_RETURN_NULL': if_null_return_null,
'RETURN_VALUE': return_value
}
return template.substitute(value)
def GenerateWrapperConstructor(self):
# TODO(wang16): Currently, only support pre/post wrapper lines for
# Constructors.
template = Template("""\
${DOC}
public ${CLASS_NAME}(${PARAMS}) {
${PRE_WRAP_LINES}
reflectionInit();
}
""")
pre_wrap_string = self._method_annotations.get(
self.ANNOTATION_PRE_WRAPLINE, '')
post_wrap_string = self._method_annotations.get(
self.ANNOTATION_POST_WRAPLINE, '')
if (pre_wrap_string != ''):
pre_wrap_string += "\n\n"
pre_wrap_string += " constructorTypes = new ArrayList<Object>();\n"
for param_type in self._wrapper_params_declare_for_bridge.split(', '):
if (param_type != ''):
param_type = param_type.replace('coreWrapper.getBridgeClass(', '')
param_type = param_type.replace(')', '')
pre_wrap_string += (" constructorTypes.add(%s);\n" % param_type)
pre_wrap_string += "\n"
pre_wrap_string += " constructorParams = new ArrayList<Object>();\n"
for param_name in self._wrapper_params_pass_to_bridge.split(', '):
param_name = param_name.replace('.getBridge()', '')
pre_wrap_string += " constructorParams.add(%s);\n" % param_name
if (post_wrap_string != ''):
pre_wrap_string += ("""
postWrapperMethod = new ReflectMethod(this,
\"post%s\");\n""" % self._method_declare_name)
value = {'DOC': self.GenerateDoc(self.method_doc),
'CLASS_NAME': self._class_java_data.wrapper_name,
'PARAMS': self._wrapper_params_declare,
'PRE_WRAP_LINES': pre_wrap_string}
ret = template.substitute(value)
if (post_wrap_string != ''):
template = Template("""\
public void post${POST_WRAP_METHOD}() {
${POST_WRAP_LINES}
}
""")
value = {'POST_WRAP_METHOD': self._method_declare_name,
'POST_WRAP_LINES': post_wrap_string}
ret += template.substitute(value)
return ret
def GenerateWrapperStaticMethod(self):
if self.is_reservable:
template = Template("""\
${DOC}
public static ${RETURN_TYPE} ${NAME}(${PARAMS}) {
reflectionInit();
if (${METHOD_DECLARE_NAME}.isNull()) {
${METHOD_DECLARE_NAME}.setArguments(${PARAMS_PASSING});
XWalkCoreWrapper.reserveReflectMethod(${METHOD_DECLARE_NAME});
return;
}
${RETURN}${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING});
}
""")
else:
template = Template("""\
${DOC}
public static ${RETURN_TYPE} ${NAME}(${PARAMS}) {
reflectionInit();
${RETURN}${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING});
}
""")
return_type = ConvertPrimitiveTypeToObject(self.method_return)
if self._method_return == 'void':
return_state = ''
else:
return_state = 'return (%s) ' % return_type
value = {'RETURN_TYPE': self.method_return,
'RETURN': return_state,
'DOC': self.GenerateDoc(self.method_doc),
'NAME': self.method_name,
'PARAMS': self._wrapper_params_declare,
'METHOD_DECLARE_NAME': self._method_declare_name,
'PARAMS_PASSING': self._wrapper_params_pass_to_bridge}
return template.substitute(value)
def GenerateWrapperBridgeMethod(self):
no_return_value = self._method_return == 'void'
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
if self.is_abstract:
template = Template(
'${DOC}\n' +
' public abstract ${RETURN_TYPE} ${NAME}(${PARAMS});\n\n')
elif return_is_internal:
template = Template("""\
${DOC}
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
return (${RETURN_TYPE}) coreWrapper.getWrapperObject(\
${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING}));
}
""")
elif self.is_reservable:
template = Template("""\
${DOC}
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
if (${METHOD_DECLARE_NAME}.isNull()) {
${METHOD_DECLARE_NAME}.setArguments(${PARAMS_RESERVING});
XWalkCoreWrapper.reserveReflectMethod(${METHOD_DECLARE_NAME});
return;
}
${RETURN}${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING});
}
""")
else:
template = Template("""\
${DOC}
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
${RETURN}${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING});
}
""")
if return_is_internal:
return_type = return_type_java_data.wrapper_name
else:
return_type = self.method_return
if no_return_value:
return_state = ''
else:
return_state = 'return (%s)' % ConvertPrimitiveTypeToObject(return_type)
params_reserving = []
for param in self._wrapper_params_pass_to_bridge.split(', '):
if (param.find("getBridge()") > 0):
param = param.replace('.getBridge()', '')
params_reserving.append(
'new ReflectMethod(%s, "getBridge")' % param)
else:
params_reserving.append(param)
value = {'RETURN_TYPE': return_type,
'RETURN': return_state,
'DOC': self.GenerateDoc(self.method_doc),
'NAME': self.method_name,
'PARAMS': re.sub(r'ValueCallback<([A-Za-z]+)Internal>',
r'ValueCallback<\1>',self._wrapper_params_declare),
'METHOD_DECLARE_NAME': self._method_declare_name,
'PARAMS_RESERVING': ', '.join(params_reserving),
'PARAMS_PASSING': self._wrapper_params_pass_to_bridge}
return template.substitute(value)
def GenerateWrapperInterface(self):
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
template = Template(
'${DOC}\n' +
' public ${RETURN_TYPE} ${NAME}(${PARAMS});\n\n')
if return_is_internal:
return_type = return_type_java_data.wrapper_name
else:
return_type = self.method_return
value = {'RETURN_TYPE': return_type,
'DOC': self.GenerateDoc(self.method_doc),
'NAME': self.method_name,
'PARAMS': self._wrapper_params_declare}
return template.substitute(value)
def GenerateMethodsStringForBridge(self):
if self._is_constructor:
return self.GenerateBridgeConstructor()
elif self._is_static:
return self.GenerateBridgeStaticMethod()
else:
return '%s\n%s\n%s\n%s\n' % (
self.GenerateBridgeOverrideMethod(),
self.GenerateBridgeWrapperMethod(),
self.GenerateBridgeSuperMethod(),
' private ReflectMethod %s = new ReflectMethod(null, "%s");\n' %
(self._method_declare_name, self._method_name))
def GenerateMethodsStringForWrapper(self):
if self._is_constructor:
return self.GenerateWrapperConstructor()
elif self._is_static:
return '%s\n%s\n' % (
self.GenerateWrapperStaticMethod(), """\
private static ReflectMethod %s = new ReflectMethod(null, "%s");\n""" %
(self._method_declare_name, self._method_name))
elif self._is_abstract:
return self.GenerateWrapperBridgeMethod()
else:
return '%s\n%s\n' % (
self.GenerateWrapperBridgeMethod(),
' private ReflectMethod %s = new ReflectMethod(null, "%s");\n' %
(self._method_declare_name, self._method_name))
def GenerateMethodsStringForInterface(self):
return self.GenerateWrapperInterface()
| bsd-3-clause | 5,073,345,167,352,662,000 | 35.940741 | 80 | 0.632344 | false |
polypmer/obligarcy | obligarcy/migrations/0040_action.py | 1 | 1184 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
('obligarcy', '0039_auto_20160406_2124'),
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('verb', models.CharField(max_length=255)),
('target_id', models.PositiveIntegerField(blank=True, null=True, db_index=True)),
('created', models.DateTimeField(db_index=True, auto_now_add=True)),
('actor', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='actions')),
('target_ct', models.ForeignKey(to='contenttypes.ContentType', related_name='target_obj', blank=True, null=True)),
],
options={
'ordering': ('-created',),
},
),
]
| gpl-3.0 | -7,166,020,798,836,472,000 | 37.193548 | 130 | 0.589527 | false |
pombredanne/disco | tests/test_util.py | 10 | 1840 | import os
from datetime import datetime
from disco.test import TestCase
from disco.util import flatten, iterify, urlsplit
def function(x):
return x + 0
sequence = 0, [1, [2, 3], [[4, [5, [6]]]]]
class UtilTestCase(TestCase):
def test_flatten(self):
self.assertEquals(list(range(7)), list(flatten(sequence)))
def test_iterify(self):
self.assertEquals([5], list(iterify(5)))
self.assertEquals([5], list(iterify([5])))
def test_urlsplit(self):
port = self.settings['DISCO_PORT']
ddfs = self.settings['DDFS_DATA']
data = self.settings['DISCO_DATA']
self.assertEquals(urlsplit('http://host/path'),
('http', ('host', ''), 'path'))
self.assertEquals(urlsplit('http://host:port/path'),
('http', ('host', 'port'), 'path'))
self.assertEquals(urlsplit('disco://master/long/path'),
('http', ('master', '{0}'.format(port)), 'long/path'))
self.assertEquals(urlsplit('disco://localhost/ddfs/path',
localhost='localhost',
ddfs_data=ddfs),
('file', ('', ''), os.path.join(ddfs, 'path')))
self.assertEquals(urlsplit('disco://localhost/data/path',
localhost='localhost',
disco_data=data),
('file', ('', ''), os.path.join(data, 'path')))
self.assertEquals(urlsplit('tag://tag', ''),
('tag', ('', ''), 'tag'))
self.assertEquals(urlsplit('tag://host/tag', ''),
('tag', ('host', ''), 'tag'))
self.assertEquals(urlsplit('tag://host:port/tag', ''),
('tag', ('host', 'port'), 'tag'))
| bsd-3-clause | -3,961,319,045,020,269,000 | 41.790698 | 80 | 0.488587 | false |
napsternxg/twitter_nlp | hbc/python/Vocab.py | 10 | 1046 | class Vocab:
def __init__(self, vocabFile=None):
self.nextId = 1
self.word2id = {}
self.id2word = {}
if vocabFile:
for line in open(vocabFile):
line = line.rstrip('\n')
(word, wid) = line.split('\t')
self.word2id[word] = int(wid)
self.id2word[wid] = word
self.nextId = max(self.nextId, int(wid) + 1)
def GetID(self, word):
if not self.word2id.has_key(word):
self.word2id[word] = self.nextId
self.nextId += 1
return self.word2id[word]
def HasWord(self, word):
return self.word2id.has_key(word)
def HasId(self, wid):
return self.id2word.has_key(wid)
def GetWord(self, wid):
return self.id2word[wid]
def SaveVocab(self, vocabFile):
fOut = open(vocabFile, 'w')
for word in self.word2id.keys():
fOut.write("%s\t%s\n" % (word, self.word2id[word]))
def GetVocabSize(self):
return self.nextId-1
| gpl-3.0 | 3,255,228,867,409,189,400 | 28.055556 | 63 | 0.533461 | false |
michaelWagner/oppia | extensions/triggers/trigger_classes.py | 19 | 2484 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for defining triggers.
Although this module is in extensions/, it is not provided as an extension
framework for third-party developers. This is because reacting to triggers
involves changes to core code.
"""
from extensions import domain
class BaseTrigger(object):
"""Base trigger definition class.
This class is not meant to be user-editable. The only methods on it should
be get()-type methods.
"""
# Customization arg specifications for the trigger, including their
# descriptions, schemas and default values. Overridden in subclasses.
_customization_arg_specs = []
@classmethod
def get_trigger_type(cls):
return cls.__name__
@property
def customization_arg_specs(self):
return [
domain.CustomizationArgSpec(**cas)
for cas in self._customization_arg_specs]
class NthResubmission(BaseTrigger):
"""This trigger is invoked when an answer is submitted to the same state
for the nth time in succession, and the destination that would result due
to normal evaluation would cause a further loop-around to the same state.
"""
_customization_arg_specs = [{
'name': 'num_submits',
'description': (
'The number of submissions after which to react, if the last '
'submission would result in a further loop-around'),
'schema': {
'type': 'int'
},
'default_value': 3,
}]
class ClickButton(BaseTrigger):
"""The presence of this trigger adds a button to the UI. The trigger is
invoked when the learner clicks this button.
"""
_customization_arg_specs = [{
'name': 'button_text',
'description': 'The text of the button',
'schema': {
'type': 'unicode',
},
'default_value': 'Help, I\'m stuck',
}]
| apache-2.0 | -3,891,276,192,605,168,000 | 31.684211 | 78 | 0.672303 | false |
jermowery/xos | xos/tosca/tests/computetest.py | 4 | 4688 | from basetest import BaseToscaTest
from core.models import Instance, Slice
class ComputeTest(BaseToscaTest):
tests = [ # "create_compute_m1_tiny", XXX m1.tiny does not exist on cloudlab
"create_compute_m1_small",
"create_compute_m1_large_8192MB",
"create_compute_m1_large_8GB",
"destroy_compute",
"create_compute_scalable",
"destroy_compute_scalable",
]
def cleanup(self):
self.try_to_delete(Instance, name="test_compute1")
self.try_to_delete(Instance, name="test_compute1-0")
self.try_to_delete(Instance, name="test_compute1-1")
self.try_to_delete(Instance, name="test_compute1-2")
self.try_to_delete(Instance, name="test_compute1-3")
self.try_to_delete(Slice, name="testsite_slice1")
def get_base_templates(self):
return self.make_nodetemplate("testsite", "tosca.nodes.Site") + \
self.make_nodetemplate("testsite_slice1", "tosca.nodes.Slice", reqs=[("testsite", "tosca.relationships.MemberOfSite")])
def create_compute_m1_tiny(self):
self.assert_noobj(Instance, "test_compute1")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", disk_size="1 GB", mem_size="500 MB"))
instance = self.assert_obj(Instance, "test_compute1")
assert(instance.flavor.name == "m1.tiny")
def create_compute_m1_small(self):
self.assert_noobj(Instance, "test_compute1")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", disk_size="1 GB", mem_size="513 MB"))
instance = self.assert_obj(Instance, "test_compute1")
assert(instance.flavor.name == "m1.small")
def create_compute_m1_large_8192MB(self):
self.assert_noobj(Instance, "test_compute1")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", mem_size="8192 MB"))
instance = self.assert_obj(Instance, "test_compute1")
assert(instance.flavor.name == "m1.large")
def create_compute_m1_large_8GB(self):
self.assert_noobj(Instance, "test_compute1")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", mem_size="8 GB"))
instance = self.assert_obj(Instance, "test_compute1")
assert(instance.flavor.name == "m1.large")
def destroy_compute(self):
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1"))
self.assert_obj(Instance, "test_compute1")
self.destroy(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1"))
self.assert_noobj(Instance, "test_compute1")
def create_compute_scalable(self):
self.assert_noobj(Instance, "test_compute1-1")
self.assert_noobj(Instance, "test_compute1-2")
self.assert_noobj(Instance, "test_compute1-3")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", mem_size="8 GB",
caps={"scalable": {"min_instances": 2, "max_instances": 3, "default_instances": 2}}))
# there should be two instances
instance0 = self.assert_obj(Instance, "test_compute1-0")
instance1 = self.assert_obj(Instance, "test_compute1-1")
self.assert_noobj(Instance, "test_compute1-2")
def destroy_compute_scalable(self):
self.assert_noobj(Instance, "test_compute1-1")
self.assert_noobj(Instance, "test_compute1-2")
self.assert_noobj(Instance, "test_compute1-3")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", mem_size="8 GB",
caps={"scalable": {"min_instances": 2, "max_instances": 3, "default_instances": 2}}))
# there should be two instances
instance0 = self.assert_obj(Instance, "test_compute1-0")
instance1 = self.assert_obj(Instance, "test_compute1-1")
self.destroy(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", mem_size="8 GB",
caps={"scalable": {"min_instances": 2, "max_instances": 3, "default_instances": 2}}))
self.assert_noobj(Instance, "test_compute1-0")
self.assert_noobj(Instance, "test_compute1-1")
if __name__ == "__main__":
ComputeTest()
| apache-2.0 | -3,877,471,315,966,018,000 | 47.833333 | 134 | 0.609002 | false |
yampiopl/Yamcoin | contrib/bitrpc/bitrpc.py | 2348 | 7835 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | mit | -8,455,746,696,829,122,000 | 23.185185 | 79 | 0.661774 | false |
ashray/VTK-EVM | ThirdParty/Twisted/twisted/web/test/test_static.py | 28 | 56293 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.static}.
"""
import inspect
import mimetypes
import os
import re
import StringIO
from zope.interface.verify import verifyObject
from twisted.internet import abstract, interfaces
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath
from twisted.python import log
from twisted.trial.unittest import TestCase
from twisted.web import static, http, script, resource
from twisted.web.server import UnsupportedMethod
from twisted.web.test.test_web import DummyRequest
from twisted.web.test._util import _render
class StaticDataTests(TestCase):
"""
Tests for L{Data}.
"""
def test_headRequest(self):
"""
L{Data.render} returns an empty response body for a I{HEAD} request.
"""
data = static.Data("foo", "bar")
request = DummyRequest([''])
request.method = 'HEAD'
d = _render(data, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), "")
d.addCallback(cbRendered)
return d
def test_invalidMethod(self):
"""
L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
data = static.Data("foo", "bar")
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(UnsupportedMethod, data.render, request)
class StaticFileTests(TestCase):
"""
Tests for the basic behavior of L{File}.
"""
def _render(self, resource, request):
return _render(resource, request)
def test_invalidMethod(self):
"""
L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
request = DummyRequest([''])
request.method = 'POST'
path = FilePath(self.mktemp())
path.setContent("foo")
file = static.File(path.path)
self.assertRaises(UnsupportedMethod, file.render, request)
def test_notFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which does not correspond to any file in the path the L{File} was
created with, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['foobar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_emptyChild(self):
"""
The C{''} child of a L{File} which corresponds to a directory in the
filesystem is a L{DirectoryLister}.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
self.assertIsInstance(child, static.DirectoryLister)
self.assertEqual(child.path, base.path)
def test_securityViolationNotFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which cannot be looked up in the filesystem due to security
considerations, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['..'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_forbiddenResource(self):
"""
If the file in the filesystem which would satisfy a request cannot be
read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.
"""
base = FilePath(self.mktemp())
base.setContent('')
# Make sure we can delete the file later.
self.addCleanup(base.chmod, 0700)
# Get rid of our own read permission.
base.chmod(0)
file = static.File(base.path)
request = DummyRequest([''])
d = self._render(file, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 403)
d.addCallback(cbRendered)
return d
if platform.isWindows():
test_forbiddenResource.skip = "Cannot remove read permission on Windows"
def test_indexNames(self):
"""
If a request is made which encounters a L{File} before a final empty
segment, a file in the L{File} instance's C{indexNames} list which
exists in the path the L{File} was created with is served as the
response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
file.indexNames = ['foo.bar']
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFile(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file in the path the L{File} was created with, that file
is served as the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
request = DummyRequest(['foo.bar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFileDeletedGetChild(self):
"""
A L{static.File} created for a directory which does not exist should
return childNotFound from L{static.File.getChild}.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
child = staticFile.getChild("foo.bar", request)
self.assertEqual(child, staticFile.childNotFound)
def test_staticFileDeletedRender(self):
"""
A L{static.File} created for a file which does not exist should render
its C{childNotFound} page.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
request2 = DummyRequest(['foo.bar'])
d = self._render(staticFile, request)
d2 = self._render(staticFile.childNotFound, request2)
def cbRendered2(ignored):
def cbRendered(ignored):
self.assertEqual(''.join(request.written),
''.join(request2.written))
d.addCallback(cbRendered)
return d
d2.addCallback(cbRendered2)
return d2
def test_headRequest(self):
"""
L{static.File.render} returns an empty response body for I{HEAD}
requests.
"""
path = FilePath(self.mktemp())
path.setContent("foo")
file = static.File(path.path)
request = DummyRequest([''])
request.method = 'HEAD'
d = _render(file, request)
def cbRendered(ignored):
self.assertEqual("".join(request.written), "")
d.addCallback(cbRendered)
return d
def test_processors(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file with an extension which is in the L{File}'s
C{processors} mapping, the processor associated with that extension is
used to serve the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(
"from twisted.web.static import Data\n"
"resource = Data('dynamic world','text/plain')\n")
file = static.File(base.path)
file.processors = {'.bar': script.ResourceScript}
request = DummyRequest(["foo.bar"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'dynamic world')
self.assertEqual(request.outgoingHeaders['content-length'], '13')
d.addCallback(cbRendered)
return d
def test_ignoreExt(self):
"""
The list of ignored extensions can be set by passing a value to
L{File.__init__} or by calling L{File.ignoreExt} later.
"""
file = static.File(".")
self.assertEqual(file.ignoredExts, [])
file.ignoreExt(".foo")
file.ignoreExt(".bar")
self.assertEqual(file.ignoredExts, [".foo", ".bar"])
file = static.File(".", ignoredExts=(".bar", ".baz"))
self.assertEqual(file.ignoredExts, [".bar", ".baz"])
def test_ignoredExtensionsIgnored(self):
"""
A request for the I{base} child of a L{File} succeeds with a resource
for the I{base<extension>} file in the path the L{File} was created
with if such a file exists and the L{File} has been configured to
ignore the I{<extension>} extension.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent('baz')
base.child('foo.quux').setContent('foobar')
file = static.File(base.path, ignoredExts=(".bar",))
request = DummyRequest(["foo"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
d.addCallback(cbRendered)
return d
class StaticMakeProducerTests(TestCase):
"""
Tests for L{File.makeProducer}.
"""
def makeResourceWithContent(self, content, type=None, encoding=None):
"""
Make a L{static.File} resource that has C{content} for its content.
@param content: The bytes to use as the contents of the resource.
@param type: Optional value for the content type of the resource.
"""
fileName = self.mktemp()
fileObject = open(fileName, 'w')
fileObject.write(content)
fileObject.close()
resource = static.File(fileName)
resource.encoding = encoding
resource.type = type
return resource
def contentHeaders(self, request):
"""
Extract the content-* headers from the L{DummyRequest} C{request}.
This returns the subset of C{request.outgoingHeaders} of headers that
start with 'content-'.
"""
contentHeaders = {}
for k, v in request.outgoingHeaders.iteritems():
if k.startswith('content-'):
contentHeaders[k] = v
return contentHeaders
def test_noRangeHeaderGivesNoRangeStaticProducer(self):
"""
makeProducer when no Range header is set returns an instance of
NoRangeStaticProducer.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.NoRangeStaticProducer)
def test_noRangeHeaderSets200OK(self):
"""
makeProducer when no Range header is set sets the responseCode on the
request to 'OK'.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(http.OK, request.responseCode)
def test_noRangeHeaderSetsContentHeaders(self):
"""
makeProducer when no Range header is set sets the Content-* headers
for the response.
"""
length = 123
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent(
'a'*length, type=contentType, encoding=contentEncoding)
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-length': str(length),
'content-encoding': contentEncoding},
self.contentHeaders(request))
def test_singleRangeGivesSingleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of SingleRangeStaticProducer.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the response code on the request to 'Partial Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_singleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent('abcdef', type=contentType, encoding=contentEncoding)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-encoding': contentEncoding,
'content-range': 'bytes 1-3/6', 'content-length': '3'},
self.contentHeaders(request))
def test_singleUnsatisfiableRangeReturnsSingleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests a single unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleUnsatisfiableRangeSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests a single
unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_singleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, unsatisfiable
byte range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
contentType = "text/plain"
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': 'text/plain', 'content-length': '0',
'content-range': 'bytes */3'},
self.contentHeaders(request))
def test_singlePartiallyOverlappingRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single byte range that
partly overlaps the resource sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=2-10'
contentType = "text/plain"
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': 'text/plain', 'content-length': '1',
'content-range': 'bytes 2-2/3'},
self.contentHeaders(request))
def test_multipleRangeGivesMultipleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of MultipleRangeStaticProducer.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent('abcdef')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a multiple satisfiable
byte ranges sets the response code on the request to 'Partial
Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_mutipleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent(
'abcdefghijkl', encoding='gzip')
producer = resource.makeProducer(request, resource.openForReading())
contentHeaders = self.contentHeaders(request)
# The only content-* headers set are content-type and content-length.
self.assertEqual(
set(['content-length', 'content-type']),
set(contentHeaders.keys()))
# The content-length depends on the boundary used in the response.
expectedLength = 5
for boundary, offset, size in producer.rangeInfo:
expectedLength += len(boundary)
self.assertEqual(expectedLength, contentHeaders['content-length'])
# Content-type should be set to a value indicating a multipart
# response and the boundary used to separate the parts.
self.assertIn('content-type', contentHeaders)
contentType = contentHeaders['content-type']
self.assertNotIdentical(
None, re.match(
'multipart/byteranges; boundary="[^"]*"\Z', contentType))
# Content-encoding is not set in the response to a multiple range
# response, which is a bit wussy but works well enough with the way
# static.File does content-encodings...
self.assertNotIn('content-encoding', contentHeaders)
def test_multipleUnsatisfiableRangesReturnsMultipleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests multiple ranges, none of which are
satisfiable.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleUnsatisfiableRangesSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests multiple ranges,
none of which are satisfiable.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_multipleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests multiple ranges, none of
which are satisfiable, sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
contentType = "text/plain"
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-length': '0', 'content-range': 'bytes */3'},
self.contentHeaders(request))
def test_oneSatisfiableRangeIsEnough(self):
"""
makeProducer when the Range header requests multiple ranges, at least
one of which matches, sets the response code to 'Partial Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,100-200'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
class StaticProducerTests(TestCase):
"""
Tests for the abstract L{StaticProducer}.
"""
def test_stopProducingClosesFile(self):
"""
L{StaticProducer.stopProducing} closes the file object the producer is
producing data from.
"""
fileObject = StringIO.StringIO()
producer = static.StaticProducer(None, fileObject)
producer.stopProducing()
self.assertTrue(fileObject.closed)
def test_stopProducingSetsRequestToNone(self):
"""
L{StaticProducer.stopProducing} sets the request instance variable to
None, which indicates to subclasses' resumeProducing methods that no
more data should be produced.
"""
fileObject = StringIO.StringIO()
producer = static.StaticProducer(DummyRequest([]), fileObject)
producer.stopProducing()
self.assertIdentical(None, producer.request)
class NoRangeStaticProducerTests(TestCase):
"""
Tests for L{NoRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{NoRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.NoRangeStaticProducer(None, None))
def test_resumeProducingProducesContent(self):
"""
L{NoRangeStaticProducer.resumeProducing} writes content from the
resource to the request.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual(content, ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{NoRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = 'a' * (2*bufferSize + 1)
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
expected = [
content[0:bufferSize],
content[bufferSize:2*bufferSize],
content[2*bufferSize:]
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{NoRangeStaticProducer.resumeProducing} calls finish() on the request
after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO('abcdef'))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class SingleRangeStaticProducerTests(TestCase):
"""
Tests for L{SingleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{SingleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.SingleRangeStaticProducer(None, None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{SingleRangeStaticProducer.resumeProducing} writes the given amount
of content, starting at the given offset, from the resource to the
request.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO(content), 1, 3)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual(content[1:4], ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{SingleRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = 'abc' * bufferSize
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO(content), 1, bufferSize+10)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
content[1:bufferSize+1],
content[bufferSize+1:bufferSize+11],
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{SingleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO('abcdef'), 1, 1)
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class MultipleRangeStaticProducerTests(TestCase):
"""
Tests for L{MultipleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{MultipleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.MultipleRangeStaticProducer(None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} writes the requested
chunks of content from the resource to the request, with the supplied
boundaries in between each chunk.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO(content), [('1', 1, 3), ('2', 5, 1)])
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual('1bcd2f', ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{MultipleRangeStaticProducer.start} writes about
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
To be specific about the 'about' above: it can write slightly more,
for example in the case where the first boundary plus the first chunk
is less than C{bufferSize} but first boundary plus the first chunk
plus the second boundary is more, but this is unimportant as in
practice the boundaries are fairly small. On the other side, it is
important for performance to bundle up several small chunks into one
call to request.write.
"""
request = DummyRequest([])
content = '0123456789' * 2
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO(content),
[('a', 0, 2), ('b', 5, 10), ('c', 0, 0)])
producer.bufferSize = 10
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
'a' + content[0:2] + 'b' + content[5:11],
content[11:15] + 'c',
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO('abcdef'), [('', 1, 2)])
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class RangeTests(TestCase):
"""
Tests for I{Range-Header} support in L{twisted.web.static.File}.
@type file: L{file}
@ivar file: Temporary (binary) file containing the content to be served.
@type resource: L{static.File}
@ivar resource: A leaf web resource using C{file} as content.
@type request: L{DummyRequest}
@ivar request: A fake request, requesting C{resource}.
@type catcher: L{list}
@ivar catcher: List which gathers all log information.
"""
def setUp(self):
"""
Create a temporary file with a fixed payload of 64 bytes. Create a
resource for that file and create a request which will be for that
resource. Each test can set a different range header to test different
aspects of the implementation.
"""
path = FilePath(self.mktemp())
# This is just a jumble of random stuff. It's supposed to be a good
# set of data for this test, particularly in order to avoid
# accidentally seeing the right result by having a byte sequence
# repeated at different locations or by having byte values which are
# somehow correlated with their position in the string.
self.payload = ('\xf8u\xf3E\x8c7\xce\x00\x9e\xb6a0y0S\xf0\xef\xac\xb7'
'\xbe\xb5\x17M\x1e\x136k{\x1e\xbe\x0c\x07\x07\t\xd0'
'\xbckY\xf5I\x0b\xb8\x88oZ\x1d\x85b\x1a\xcdk\xf2\x1d'
'&\xfd%\xdd\x82q/A\x10Y\x8b')
path.setContent(self.payload)
self.file = path.open()
self.resource = static.File(self.file.name)
self.resource.isLeaf = 1
self.request = DummyRequest([''])
self.request.uri = self.file.name
self.catcher = []
log.addObserver(self.catcher.append)
def tearDown(self):
"""
Clean up the resource file and the log observer.
"""
self.file.close()
log.removeObserver(self.catcher.append)
def _assertLogged(self, expected):
"""
Asserts that a given log message occurred with an expected message.
"""
logItem = self.catcher.pop()
self.assertEqual(logItem["message"][0], expected)
self.assertEqual(
self.catcher, [], "An additional log occured: %r" % (logItem,))
def test_invalidRanges(self):
"""
L{File._parseRangeHeader} raises L{ValueError} when passed
syntactically invalid byte ranges.
"""
f = self.resource._parseRangeHeader
# there's no =
self.assertRaises(ValueError, f, 'bytes')
# unknown isn't a valid Bytes-Unit
self.assertRaises(ValueError, f, 'unknown=1-2')
# there's no - in =stuff
self.assertRaises(ValueError, f, 'bytes=3')
# both start and end are empty
self.assertRaises(ValueError, f, 'bytes=-')
# start isn't an integer
self.assertRaises(ValueError, f, 'bytes=foo-')
# end isn't an integer
self.assertRaises(ValueError, f, 'bytes=-foo')
# end isn't equal to or greater than start
self.assertRaises(ValueError, f, 'bytes=5-4')
def test_rangeMissingStop(self):
"""
A single bytes range without an explicit stop position is parsed into a
two-tuple giving the start position and C{None}.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=0-'), [(0, None)])
def test_rangeMissingStart(self):
"""
A single bytes range without an explicit start position is parsed into
a two-tuple of C{None} and the end position.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=-3'), [(None, 3)])
def test_range(self):
"""
A single bytes range with explicit start and stop positions is parsed
into a two-tuple of those positions.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=2-5'), [(2, 5)])
def test_rangeWithSpace(self):
"""
A single bytes range with whitespace in allowed places is parsed in
the same way as it would be without the whitespace.
"""
self.assertEqual(
self.resource._parseRangeHeader(' bytes=1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes =1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes= 1-2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1 -2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1- 2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2 '), [(1, 2)])
def test_nullRangeElements(self):
"""
If there are multiple byte ranges but only one is non-null, the
non-null range is parsed and its start and stop returned.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2,\r\n, ,\t'), [(1, 2)])
def test_multipleRanges(self):
"""
If multiple byte ranges are specified their starts and stops are
returned.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2,3-4'),
[(1, 2), (3, 4)])
def test_bodyLength(self):
"""
A correct response to a range request is as long as the length of the
requested range.
"""
self.request.headers['range'] = 'bytes=0-43'
self.resource.render(self.request)
self.assertEqual(len(''.join(self.request.written)), 44)
def test_invalidRangeRequest(self):
"""
An incorrect range request (RFC 2616 defines a correct range request as
a Bytes-Unit followed by a '=' character followed by a specific range.
Only 'bytes' is defined) results in the range header value being logged
and a normal 200 response being sent.
"""
self.request.headers['range'] = range = 'foobar=0-43'
self.resource.render(self.request)
expected = "Ignoring malformed Range header %r" % (range,)
self._assertLogged(expected)
self.assertEqual(''.join(self.request.written), self.payload)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(
self.request.outgoingHeaders['content-length'],
str(len(self.payload)))
def parseMultipartBody(self, body, boundary):
"""
Parse C{body} as a multipart MIME response separated by C{boundary}.
Note that this with fail the calling test on certain syntactic
problems.
"""
sep = "\r\n--" + boundary
parts = ''.join(body).split(sep)
self.assertEqual('', parts[0])
self.assertEqual('--\r\n', parts[-1])
parsed_parts = []
for part in parts[1:-1]:
before, header1, header2, blank, partBody = part.split('\r\n', 4)
headers = header1 + '\n' + header2
self.assertEqual('', before)
self.assertEqual('', blank)
partContentTypeValue = re.search(
'^content-type: (.*)$', headers, re.I|re.M).group(1)
start, end, size = re.search(
'^content-range: bytes ([0-9]+)-([0-9]+)/([0-9]+)$',
headers, re.I|re.M).groups()
parsed_parts.append(
{'contentType': partContentTypeValue,
'contentRange': (start, end, size),
'body': partBody})
return parsed_parts
def test_multipleRangeRequest(self):
"""
The response to a request for multipe bytes ranges is a MIME-ish
multipart response.
"""
startEnds = [(0, 2), (20, 30), (40, 50)]
rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds])
self.request.headers['range'] = 'bytes=' + rangeHeaderValue
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
'^multipart/byteranges; boundary="(.*)"$',
self.request.outgoingHeaders['content-type']).group(1)
parts = self.parseMultipartBody(''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(self.resource.type, part['contentType'])
start, end, size = part['contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), e)
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part['body'])
def test_multipleRangeRequestWithRangeOverlappingEnd(self):
"""
The response to a request for multipe bytes ranges is a MIME-ish
multipart response, even when one of the ranged falls off the end of
the resource.
"""
startEnds = [(0, 2), (40, len(self.payload) + 10)]
rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds])
self.request.headers['range'] = 'bytes=' + rangeHeaderValue
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
'^multipart/byteranges; boundary="(.*)"$',
self.request.outgoingHeaders['content-type']).group(1)
parts = self.parseMultipartBody(''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(self.resource.type, part['contentType'])
start, end, size = part['contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), min(e, self.resource.getFileSize()-1))
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part['body'])
def test_implicitEnd(self):
"""
If the end byte position is omitted, then it is treated as if the
length of the resource was specified by the end byte position.
"""
self.request.headers['range'] = 'bytes=23-'
self.resource.render(self.request)
self.assertEqual(''.join(self.request.written), self.payload[23:])
self.assertEqual(len(''.join(self.request.written)), 41)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 23-63/64')
self.assertEqual(self.request.outgoingHeaders['content-length'], '41')
def test_implicitStart(self):
"""
If the start byte position is omitted but the end byte position is
supplied, then the range is treated as requesting the last -N bytes of
the resource, where N is the end byte position.
"""
self.request.headers['range'] = 'bytes=-17'
self.resource.render(self.request)
self.assertEqual(''.join(self.request.written), self.payload[-17:])
self.assertEqual(len(''.join(self.request.written)), 17)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 47-63/64')
self.assertEqual(self.request.outgoingHeaders['content-length'], '17')
def test_explicitRange(self):
"""
A correct response to a bytes range header request from A to B starts
with the A'th byte and ends with (including) the B'th byte. The first
byte of a page is numbered with 0.
"""
self.request.headers['range'] = 'bytes=3-43'
self.resource.render(self.request)
written = ''.join(self.request.written)
self.assertEqual(written, self.payload[3:44])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 3-43/64')
self.assertEqual(
str(len(written)), self.request.outgoingHeaders['content-length'])
def test_explicitRangeOverlappingEnd(self):
"""
A correct response to a bytes range header request from A to B when B
is past the end of the resource starts with the A'th byte and ends
with the last byte of the resource. The first byte of a page is
numbered with 0.
"""
self.request.headers['range'] = 'bytes=40-100'
self.resource.render(self.request)
written = ''.join(self.request.written)
self.assertEqual(written, self.payload[40:])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 40-63/64')
self.assertEqual(
str(len(written)), self.request.outgoingHeaders['content-length'])
def test_statusCodeRequestedRangeNotSatisfiable(self):
"""
If a range is syntactically invalid due to the start being greater than
the end, the range header is ignored (the request is responded to as if
it were not present).
"""
self.request.headers['range'] = 'bytes=20-13'
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(''.join(self.request.written), self.payload)
self.assertEqual(
self.request.outgoingHeaders['content-length'],
str(len(self.payload)))
def test_invalidStartBytePos(self):
"""
If a range is unsatisfiable due to the start not being less than the
length of the resource, the response is 416 (Requested range not
satisfiable) and no data is written to the response body (RFC 2616,
section 14.35.1).
"""
self.request.headers['range'] = 'bytes=67-108'
self.resource.render(self.request)
self.assertEqual(
self.request.responseCode, http.REQUESTED_RANGE_NOT_SATISFIABLE)
self.assertEqual(''.join(self.request.written), '')
self.assertEqual(self.request.outgoingHeaders['content-length'], '0')
# Sections 10.4.17 and 14.16
self.assertEqual(
self.request.outgoingHeaders['content-range'],
'bytes */%d' % (len(self.payload),))
class DirectoryListerTest(TestCase):
"""
Tests for L{static.DirectoryLister}.
"""
def _request(self, uri):
request = DummyRequest([''])
request.uri = uri
return request
def test_renderHeader(self):
"""
L{static.DirectoryLister} prints the request uri as header of the
rendered content.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
self.assertIn("<h1>Directory listing for foo</h1>", data)
self.assertIn("<title>Directory listing for foo</title>", data)
def test_renderUnquoteHeader(self):
"""
L{static.DirectoryLister} unquote the request uri before printing it.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo%20bar'))
self.assertIn("<h1>Directory listing for foo bar</h1>", data)
self.assertIn("<title>Directory listing for foo bar</title>", data)
def test_escapeHeader(self):
"""
L{static.DirectoryLister} escape "&", "<" and ">" after unquoting the
request uri.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo%26bar'))
self.assertIn("<h1>Directory listing for foo&bar</h1>", data)
self.assertIn("<title>Directory listing for foo&bar</title>", data)
def test_renderFiles(self):
"""
L{static.DirectoryLister} is able to list all the files inside a
directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1').setContent("content1")
path.child('file2').setContent("content2" * 1000)
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="file1">file1</a></td>
<td>8B</td>
<td>[text/html]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="file2">file2</a></td>
<td>7K</td>
<td>[text/html]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderDirectories(self):
"""
L{static.DirectoryLister} is able to list all the directories inside
a directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2 & 3').makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir2%20%26%203/">dir2 & 3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderFiltered(self):
"""
L{static.DirectoryLister} takes a optional C{dirs} argument that
filter out the list of of directories and files printed.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2').makedirs()
path.child('dir3').makedirs()
lister = static.DirectoryLister(path.path, dirs=["dir1", "dir3"])
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir3/">dir3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_oddAndEven(self):
"""
L{static.DirectoryLister} gives an alternate class for each odd and
even rows in the table.
"""
lister = static.DirectoryLister(None)
elements = [{"href": "", "text": "", "size": "", "type": "",
"encoding": ""} for i in xrange(5)]
content = lister._buildTableContent(elements)
self.assertEqual(len(content), 5)
self.assertTrue(content[0].startswith('<tr class="odd">'))
self.assertTrue(content[1].startswith('<tr class="even">'))
self.assertTrue(content[2].startswith('<tr class="odd">'))
self.assertTrue(content[3].startswith('<tr class="even">'))
self.assertTrue(content[4].startswith('<tr class="odd">'))
def test_contentType(self):
"""
L{static.DirectoryLister} produces a MIME-type that indicates that it is
HTML, and includes its charset (UTF-8).
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
req = self._request('')
lister.render(req)
self.assertEqual(req.outgoingHeaders['content-type'],
"text/html; charset=utf-8")
def test_mimeTypeAndEncodings(self):
"""
L{static.DirectoryLister} is able to detect mimetype and encoding of
listed files.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1.txt').setContent("file1")
path.child('file2.py').setContent("python")
path.child('file3.conf.gz').setContent("conf compressed")
path.child('file4.diff.bz2').setContent("diff compressed")
directory = os.listdir(path.path)
directory.sort()
contentTypes = {
".txt": "text/plain",
".py": "text/python",
".conf": "text/configuration",
".diff": "text/diff"
}
lister = static.DirectoryLister(path.path, contentTypes=contentTypes)
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [
{'encoding': '',
'href': 'file1.txt',
'size': '5B',
'text': 'file1.txt',
'type': '[text/plain]'},
{'encoding': '',
'href': 'file2.py',
'size': '6B',
'text': 'file2.py',
'type': '[text/python]'},
{'encoding': '[gzip]',
'href': 'file3.conf.gz',
'size': '15B',
'text': 'file3.conf.gz',
'type': '[text/configuration]'},
{'encoding': '[bzip2]',
'href': 'file4.diff.bz2',
'size': '15B',
'text': 'file4.diff.bz2',
'type': '[text/diff]'}])
def test_brokenSymlink(self):
"""
If on the file in the listing points to a broken symlink, it should not
be returned by L{static.DirectoryLister._getFilesAndDirectories}.
"""
path = FilePath(self.mktemp())
path.makedirs()
file1 = path.child('file1')
file1.setContent("file1")
file1.linkTo(path.child("file2"))
file1.remove()
lister = static.DirectoryLister(path.path)
directory = os.listdir(path.path)
directory.sort()
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [])
if getattr(os, "symlink", None) is None:
test_brokenSymlink.skip = "No symlink support"
def test_childrenNotFound(self):
"""
Any child resource of L{static.DirectoryLister} renders an HTTP
I{NOT FOUND} response code.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
request = self._request('')
child = resource.getChildForRequest(lister, request)
result = _render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, http.NOT_FOUND)
result.addCallback(cbRendered)
return result
def test_repr(self):
"""
L{static.DirectoryLister.__repr__} gives the path of the lister.
"""
path = FilePath(self.mktemp())
lister = static.DirectoryLister(path.path)
self.assertEqual(repr(lister),
"<DirectoryLister of %r>" % (path.path,))
self.assertEqual(str(lister),
"<DirectoryLister of %r>" % (path.path,))
def test_formatFileSize(self):
"""
L{static.formatFileSize} format an amount of bytes into a more readable
format.
"""
self.assertEqual(static.formatFileSize(0), "0B")
self.assertEqual(static.formatFileSize(123), "123B")
self.assertEqual(static.formatFileSize(4567), "4K")
self.assertEqual(static.formatFileSize(8900000), "8M")
self.assertEqual(static.formatFileSize(1234000000), "1G")
self.assertEqual(static.formatFileSize(1234567890000), "1149G")
class LoadMimeTypesTests(TestCase):
"""
Tests for the MIME type loading routine.
@cvar UNSET: A sentinel to signify that C{self.paths} has not been set by
the mock init.
"""
UNSET = object()
def setUp(self):
self.paths = self.UNSET
def _fakeInit(self, paths):
"""
A mock L{mimetypes.init} that records the value of the passed C{paths}
argument.
@param paths: The paths that will be recorded.
"""
self.paths = paths
def test_defaultArgumentIsNone(self):
"""
By default, C{None} is passed to C{mimetypes.init}.
"""
static.loadMimeTypes(init=self._fakeInit)
self.assertIdentical(self.paths, None)
def test_extraLocationsWork(self):
"""
Passed MIME type files are passed to C{mimetypes.init}.
"""
paths = ["x", "y", "z"]
static.loadMimeTypes(paths, init=self._fakeInit)
self.assertIdentical(self.paths, paths)
def test_usesGlobalInitFunction(self):
"""
By default, C{mimetypes.init} is called.
"""
# Checking mimetypes.inited doesn't always work, because
# something, somewhere, calls mimetypes.init. Yay global
# mutable state :)
args, _, _, defaults = inspect.getargspec(static.loadMimeTypes)
defaultInit = defaults[args.index("init")]
self.assertIdentical(defaultInit, mimetypes.init)
| bsd-3-clause | 3,713,034,365,914,449,400 | 35.553896 | 101 | 0.616684 | false |
liukaijv/XlsxWriter | xlsxwriter/test/worksheet/test_worksheet03.py | 8 | 4143 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...format import Format
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with column formatting set."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
worksheet.set_column(1, 3, 5)
worksheet.set_column(5, 5, 8, None, {'hidden': True})
worksheet.set_column(7, 7, None, cell_format)
worksheet.set_column(9, 9, 2)
worksheet.set_column(11, 11, None, None, {'hidden': True})
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="F1:H1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<cols>
<col min="2" max="4" width="5.7109375" customWidth="1"/>
<col min="6" max="6" width="8.7109375" hidden="1" customWidth="1"/>
<col min="8" max="8" width="9.140625" style="1"/>
<col min="10" max="10" width="2.7109375" customWidth="1"/>
<col min="12" max="12" width="0" hidden="1" customWidth="1"/>
</cols>
<sheetData/>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
def test_assemble_xml_file_A1(self):
"""
Test writing a worksheet with column formatting set using
A1 Notation.
"""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
worksheet.set_column('B:D', 5)
worksheet.set_column('F:F', 8, None, {'hidden': True})
worksheet.set_column('H:H', None, cell_format)
worksheet.set_column('J:J', 2)
worksheet.set_column('L:L', None, None, {'hidden': True})
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="F1:H1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<cols>
<col min="2" max="4" width="5.7109375" customWidth="1"/>
<col min="6" max="6" width="8.7109375" hidden="1" customWidth="1"/>
<col min="8" max="8" width="9.140625" style="1"/>
<col min="10" max="10" width="2.7109375" customWidth="1"/>
<col min="12" max="12" width="0" hidden="1" customWidth="1"/>
</cols>
<sheetData/>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| bsd-2-clause | -57,700,026,657,502,620 | 38.457143 | 171 | 0.522327 | false |
petertodd/bitcoin | contrib/devtools/update-translations.py | 54 | 2334 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
- remove 'unfinished' translation items
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'bitcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def postprocess_translations():
print('Postprocessing...')
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts') or filename == SOURCE_LANG:
continue
filepath = os.path.join(LOCALE_DIR, filename)
with open(filepath, 'rb') as f:
data = f.read()
# remove non-allowed control characters
data = re.sub('[\x00-\x09\x0b\x0c\x0e-\x1f]', '', data)
data = data.split('\n')
# strip locations from non-origin translation
# location tags are used to guide translators, they are not necessary for compilation
# TODO: actually process XML instead of relying on Transifex's one-tag-per-line output format
data = [line for line in data if not '<location' in line]
with open(filepath, 'wb') as f:
f.write('\n'.join(data))
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| mit | -5,883,293,023,253,031,000 | 34.363636 | 101 | 0.682519 | false |
traveloka/ansible | lib/ansible/modules/network/eos/eos_config.py | 25 | 12504 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = """
---
module: eos_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage Arista EOS configuration sections
description:
- Arista EOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with eos configuration sections in
a deterministic way. This module works with either CLI or eAPI
transports.
extends_documentation_fragment: eos
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
required: false
default: null
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block', 'config']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: ['yes', 'no']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
defaults:
description:
- The I(defaults) argument will influence how the running-config
is collected from the device. When the value is set to true,
the command used to collect the running-config is append with
the all keyword. When the value is set to false, the command
is issued without the all keyword
required: false
default: false
version_added: "2.2"
save:
description:
- The C(save) argument instructs the module to save the
running-config to startup-config. This operation is performed
after any changes are made to the current running config. If
no changes are made, the configuration is still saved to the
startup config. This option will always cause the module to
return changed.
required: false
default: false
version_added: "2.2"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
- eos_config:
lines: hostname {{ inventory_hostname }}
provider: "{{ cli }}"
- eos_config:
lines:
- 10 permit ip 1.1.1.1/32 any log
- 20 permit ip 2.2.2.2/32 any log
- 30 permit ip 3.3.3.3/32 any log
- 40 permit ip 4.4.4.4/32 any log
- 50 permit ip 5.5.5.5/32 any log
parents: ip access-list test
before: no ip access-list test
match: exact
provider: "{{ cli }}"
- eos_config:
lines:
- 10 permit ip 1.1.1.1/32 any log
- 20 permit ip 2.2.2.2/32 any log
- 30 permit ip 3.3.3.3/32 any log
- 40 permit ip 4.4.4.4/32 any log
parents: ip access-list test
before: no ip access-list test
replace: block
provider: "{{ cli }}"
- name: load configuration from file
eos_config:
src: eos.cfg
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/eos_config.2016-07-16@22:28:34
"""
import time
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.eos import NetworkModule, NetworkError
from ansible.module_utils.basic import get_exception
def check_args(module, warnings):
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
'removed in the future')
if not module.connection.supports_sessions():
warnings.append('The current version of EOS on the remote device does '
'not support configuration sessions. The commit '
'argument will be ignored')
def get_candidate(module):
candidate = NetworkConfig(indent=3)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def get_config(module, defaults=False):
contents = module.params['config']
if not contents:
defaults = module.params['defaults']
contents = module.config.get_config(include_defaults=defaults)
return NetworkConfig(indent=3, contents=contents)
def load_config(module, commands, result):
replace = module.params['replace'] == 'config'
commit = not module.check_mode
diff = module.config.load_config(commands, replace=replace, commit=commit)
if diff and module.connection.supports_sessions():
result['diff'] = dict(prepared=diff)
result['changed'] = True
elif diff:
result['changed'] = True
def run(module, result):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
config = get_config(module)
configobjs = candidate.difference(config, match=match, replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['updates'] = commands
module.log('commands: %s' % commands)
load_config(module, commands, result)
if module.params['save']:
if not module.check_mode:
module.config.save_config()
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
# this argument is deprecated in favor of setting match: none
# it will be removed in a future version
force=dict(default=False, type='bool'),
config=dict(),
defaults=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save=dict(default=False, type='bool'),
)
mutually_exclusive = [('lines', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['src'])]
module = NetworkModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
if module.params['force'] is True:
module.params['match'] = 'none'
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = module.config.get_config()
try:
run(module, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 2,057,121,600,524,428,000 | 34.322034 | 81 | 0.651871 | false |
0k/odoo | addons/web_tip/__openerp__.py | 31 | 1291 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Tips',
'category': 'Usability',
'description': """
OpenERP Web tips.
========================
""",
'version': '0.1',
'author': 'OpenERP SA',
'depends': ['web'],
'data': [
'security/ir.model.access.csv',
'views/tip.xml',
'web_tip_view.xml'
],
'auto_install': True
}
| agpl-3.0 | -2,850,283,671,045,597,700 | 33.891892 | 78 | 0.563129 | false |
alex/warehouse | tests/unit/test_sessions.py | 1 | 21090 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import msgpack
import redis
import pretend
import pytest
from pyramid import viewderivers
import warehouse.sessions
from warehouse.sessions import (
InvalidSession, Session, SessionFactory, includeme, session_view,
)
from warehouse.utils import crypto
class TestInvalidSession:
@pytest.mark.parametrize(
"method",
[
# IDict methods
"__contains__",
"__delitem__",
"__getitem__",
"__iter__",
"__len__",
"__setitem__",
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
# ISession methods
"invalidate",
"flash",
"changed",
"get_csrf_token",
"peek_flash",
"new_csrf_token",
"pop_flash",
# Our custom methods.
"should_save",
],
)
def test_methods_raise(self, method):
session = InvalidSession()
with pytest.raises(RuntimeError):
getattr(session, method)()
@pytest.mark.parametrize("name", ["created", "new", "sid"])
def test_propery_raises(self, name):
session = InvalidSession()
with pytest.raises(RuntimeError):
getattr(session, name)
class TestSession:
@pytest.mark.parametrize(
("data", "expected"),
[
(None, {}),
({}, {}),
({"foo": "bar"}, {"foo": "bar"}),
]
)
def test_create_new(self, monkeypatch, data, expected):
monkeypatch.setattr(time, "time", lambda: 100)
monkeypatch.setattr(crypto, "random_token", lambda: "123456")
session = Session(data)
assert session == expected
assert session.sid == "123456"
assert session.new
assert session.created == 100
assert not session.invalidated
@pytest.mark.parametrize(
("data", "expected", "new"),
[
(None, {}, True),
({}, {}, True),
({"foo": "bar"}, {"foo": "bar"}, True),
(None, {}, False),
({}, {}, False),
({"foo": "bar"}, {"foo": "bar"}, False),
]
)
def test_create_with_session_id(self, monkeypatch, data, expected, new):
monkeypatch.setattr(time, "time", lambda: 100)
session = Session(data, "wat", new)
assert session == expected
assert session.sid == "wat"
assert session.new is new
assert session.created == 100
assert not session.invalidated
def test_changed_marks_as_changed(self):
session = Session()
assert not session._changed
session.changed()
assert session._changed
def test_invalidate(self, monkeypatch):
session_ids = iter(["123456", "7890"])
monkeypatch.setattr(crypto, "random_token", lambda: next(session_ids))
session = Session({"foo": "bar"}, "original id", False)
assert session == {"foo": "bar"}
assert session.sid == "original id"
assert not session.new
assert not session.invalidated
session.invalidate()
assert session == {}
assert session.sid == "123456"
assert session.new
assert session.invalidated == {"original id"}
session.invalidate()
assert session == {}
assert session.sid == "7890"
assert session.new
assert session.invalidated == {"original id", "123456"}
def test_invalidate_empty(self):
session = Session({"foo": "bar"})
session.invalidate()
assert session == {}
assert session.invalidated == set()
def test_should_save(self):
session = Session()
assert not session.should_save()
session.changed()
assert session.should_save()
@pytest.mark.parametrize(
("data", "method", "args"),
[
({"foo": "bar"}, "__delitem__", ["foo"]),
({}, "__setitem__", ["foo", "bar"]),
({}, "clear", []),
({"foo": "bar"}, "pop", ["foo"]),
({"foo": "bar"}, "popitem", []),
({}, "setdefault", ["foo", "bar"]),
({}, "update", [{"foo": "bar"}]),
],
)
def test_methods_call_changed(self, data, method, args):
session = Session(data)
session.changed = pretend.call_recorder(lambda: None)
getattr(session, method)(*args)
assert session.changed.calls == [pretend.call()]
@pytest.mark.parametrize(
("queue", "expected"),
[
(None, "_flash_messages"),
("foobar", "_flash_messages.foobar"),
],
)
def test_generate_flash_key(self, queue, expected):
session = Session()
assert session._get_flash_queue_key(queue) == expected
def test_flash_messages(self):
session = Session()
assert session.peek_flash() == []
assert session.peek_flash(queue="foo") == []
assert session.pop_flash() == []
assert session.pop_flash(queue="foo") == []
session.flash("A Flash Message")
assert session.peek_flash() == ["A Flash Message"]
assert session.peek_flash(queue="foo") == []
session.flash("Another Flash Message", queue="foo")
assert session.peek_flash() == ["A Flash Message"]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message")
assert session.peek_flash() == ["A Flash Message", "A Flash Message"]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message", allow_duplicate=True)
assert session.peek_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message", allow_duplicate=False)
assert session.peek_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
assert session.pop_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.pop_flash(queue="foo") == ["Another Flash Message"]
assert session.peek_flash() == []
assert session.peek_flash(queue="foo") == []
assert session.pop_flash() == []
assert session.pop_flash(queue="foo") == []
def test_csrf_token(self, monkeypatch):
tokens = iter(["123456", "7890"])
monkeypatch.setattr(crypto, "random_token", lambda: next(tokens))
session = Session()
assert session._csrf_token_key not in session
assert session.new_csrf_token() == "123456"
assert session._csrf_token_key in session
assert session.get_csrf_token() == "123456"
assert session.new_csrf_token() == "7890"
assert session._csrf_token_key in session
assert session.get_csrf_token() == "7890"
def test_get_csrf_token_empty(self):
session = Session()
session.new_csrf_token = pretend.call_recorder(lambda: "123456")
assert session.get_csrf_token() == "123456"
assert session.new_csrf_token.calls == [pretend.call()]
class TestSessionFactory:
def test_initialize(self, monkeypatch):
timestamp_signer_obj = pretend.stub()
timestamp_signer_create = pretend.call_recorder(
lambda secret, salt: timestamp_signer_obj
)
monkeypatch.setattr(crypto, "TimestampSigner", timestamp_signer_create)
strict_redis_obj = pretend.stub()
strict_redis_cls = pretend.stub(
from_url=pretend.call_recorder(lambda url: strict_redis_obj),
)
monkeypatch.setattr(redis, "StrictRedis", strict_redis_cls)
session_factory = SessionFactory("mysecret", "my url")
assert session_factory.signer is timestamp_signer_obj
assert session_factory.redis is strict_redis_obj
assert timestamp_signer_create.calls == [
pretend.call("mysecret", salt="session"),
]
assert strict_redis_cls.from_url.calls == [pretend.call("my url")]
def test_redis_key(self):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
assert session_factory._redis_key("my_session_id") == \
"warehouse/session/data/my_session_id"
def test_no_current_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_invalid_session_id(self, pyramid_request):
pyramid_request.cookies["session_id"] = "invalid!"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_no_data(self, pyramid_request):
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: None),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_invalid_data(self, pyramid_request):
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: b"invalid data"),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_valid_data(self, monkeypatch, pyramid_request):
msgpack_unpackb = pretend.call_recorder(
lambda bdata, encoding, use_list: {"foo": "bar"}
)
monkeypatch.setattr(msgpack, "unpackb", msgpack_unpackb)
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: b"valid data"),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert msgpack_unpackb.calls == [
pretend.call(b"valid data", encoding="utf8", use_list=True),
]
assert isinstance(session, Session)
assert session == {"foo": "bar"}
assert session.sid == "123456"
assert not session.new
def test_no_save_invalid_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub()
pyramid_request.session = InvalidSession()
response = pretend.stub()
session_factory._process_response(pyramid_request, response)
def test_noop_unused_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub()
pyramid_request.session.invalidated = set()
pyramid_request.session.should_save = pretend.call_recorder(
lambda: False
)
response = pretend.stub()
session_factory._process_response(pyramid_request, response)
assert pyramid_request.session.should_save.calls == [pretend.call()]
def test_invalidated_deletes_no_save(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub(
delete=pretend.call_recorder(lambda key: None)
)
pyramid_request.session.invalidated = ["1", "2"]
pyramid_request.session.should_save = pretend.call_recorder(
lambda: False
)
response = pretend.stub(
delete_cookie=pretend.call_recorder(lambda cookie: None),
)
session_factory._process_response(pyramid_request, response)
assert session_factory.redis.delete.calls == [
pretend.call("warehouse/session/data/1"),
pretend.call("warehouse/session/data/2"),
]
assert pyramid_request.session.should_save.calls == [
pretend.call(),
pretend.call(),
]
assert response.delete_cookie.calls == [pretend.call("session_id")]
def test_invalidated_deletes_save_non_secure(self, monkeypatch,
pyramid_request):
msgpack_packb = pretend.call_recorder(
lambda data, encoding, use_bin_type: b"msgpack data"
)
monkeypatch.setattr(msgpack, "packb", msgpack_packb)
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub(
delete=pretend.call_recorder(lambda key: None),
setex=pretend.call_recorder(lambda key, age, data: None),
)
session_factory.signer.sign = pretend.call_recorder(
lambda data: "cookie data"
)
pyramid_request.scheme = "http"
pyramid_request.session.sid = "123456"
pyramid_request.session.invalidated = ["1", "2"]
pyramid_request.session.should_save = pretend.call_recorder(
lambda: True
)
response = pretend.stub(
set_cookie=pretend.call_recorder(
lambda cookie, data, max_age, httponly, secure: None
)
)
session_factory._process_response(pyramid_request, response)
assert session_factory.redis.delete.calls == [
pretend.call("warehouse/session/data/1"),
pretend.call("warehouse/session/data/2"),
]
assert msgpack_packb.calls == [
pretend.call(
pyramid_request.session,
encoding="utf8",
use_bin_type=True,
),
]
assert session_factory.redis.setex.calls == [
pretend.call(
"warehouse/session/data/123456",
12 * 60 * 60,
b"msgpack data",
),
]
assert pyramid_request.session.should_save.calls == [
pretend.call(),
pretend.call(),
]
assert session_factory.signer.sign.calls == [pretend.call(b"123456")]
assert response.set_cookie.calls == [
pretend.call(
"session_id",
"cookie data",
max_age=12 * 60 * 60,
httponly=True,
secure=False,
),
]
class TestSessionView:
def test_has_options(self):
assert set(session_view.options) == {"uses_session"}
@pytest.mark.parametrize("uses_session", [False, None])
def test_invalid_session(self, uses_session):
context = pretend.stub()
request = pretend.stub(session=pretend.stub())
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.session, InvalidSession)
return response
info = pretend.stub(options={}, exception_only=False)
if uses_session is not None:
info.options["uses_session"] = uses_session
derived_view = session_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
def test_valid_session(self, monkeypatch):
add_vary_cb = pretend.call_recorder(lambda fn: fn)
add_vary = pretend.call_recorder(lambda vary: add_vary_cb)
monkeypatch.setattr(warehouse.sessions, "add_vary", add_vary)
context = pretend.stub()
request = pretend.stub(session=Session())
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.session, Session)
return response
info = pretend.stub(options={"uses_session": True})
derived_view = session_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
assert add_vary.calls == [pretend.call("Cookie")]
assert add_vary_cb.calls == [pretend.call(view)]
def test_includeme(monkeypatch):
session_factory_obj = pretend.stub()
session_factory_cls = pretend.call_recorder(
lambda secret, url: session_factory_obj
)
monkeypatch.setattr(
warehouse.sessions,
"SessionFactory",
session_factory_cls,
)
config = pretend.stub(
set_session_factory=pretend.call_recorder(lambda factory: None),
registry=pretend.stub(
settings={
"sessions.secret": "my secret",
"sessions.url": "my url",
},
),
add_view_deriver=pretend.call_recorder(lambda *a, **kw: None),
)
includeme(config)
assert config.set_session_factory.calls == [
pretend.call(session_factory_obj),
]
assert session_factory_cls.calls == [pretend.call("my secret", "my url")]
assert config.add_view_deriver.calls == [
pretend.call(
session_view,
over="csrf_view",
under=viewderivers.INGRESS,
),
]
| apache-2.0 | -3,131,396,116,381,256,700 | 33.016129 | 79 | 0.580986 | false |
tensorflow/tensorflow | tensorflow/python/ops/structured/structured_array_ops.py | 6 | 20742 | # Lint as python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""StructuredTensor array ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Sequence
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged.row_partition import RowPartition
from tensorflow.python.ops.structured.structured_tensor import StructuredTensor
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
@dispatch.dispatch_for_types(array_ops.expand_dims, StructuredTensor)
@deprecation.deprecated_args(None, 'Use the `axis` argument instead', 'dim')
def expand_dims(input, axis=None, name=None, dim=None): # pylint: disable=redefined-builtin
"""Creates a StructuredTensor with a length 1 axis inserted at index `axis`.
This is an implementation of tf.expand_dims for StructuredTensor. Note
that the `axis` must be less than or equal to rank.
>>> st = StructuredTensor.from_pyval([[{"x": 1}, {"x": 2}], [{"x": 3}]])
>>> tf.expand_dims(st, 0).to_pyval()
[[[{'x': 1}, {'x': 2}], [{'x': 3}]]]
>>> tf.expand_dims(st, 1).to_pyval()
[[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, 2).to_pyval()
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
Args:
input: the original StructuredTensor.
axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank`
name: the name of the op.
dim: deprecated: use axis.
Returns:
a new structured tensor with larger rank.
Raises:
an error if `axis < -(rank + 1)` or `rank < axis`.
"""
axis = deprecation.deprecated_argument_lookup('axis', axis, 'dim', dim)
return _expand_dims_impl(input, axis, name=name)
@dispatch.dispatch_for_types(array_ops.expand_dims_v2, StructuredTensor)
def expand_dims_v2(input, axis, name=None): # pylint: disable=redefined-builtin
"""Creates a StructuredTensor with a length 1 axis inserted at index `axis`.
This is an implementation of tf.expand_dims for StructuredTensor. Note
that the `axis` must be less than or equal to rank.
>>> st = StructuredTensor.from_pyval([[{"x": 1}, {"x": 2}], [{"x": 3}]])
>>> tf.expand_dims(st, 0).to_pyval()
[[[{'x': 1}, {'x': 2}], [{'x': 3}]]]
>>> tf.expand_dims(st, 1).to_pyval()
[[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, 2).to_pyval()
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
Args:
input: the original StructuredTensor.
axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank`
name: the name of the op.
Returns:
a new structured tensor with larger rank.
Raises:
an error if `axis < -(rank + 1)` or `rank < axis`.
"""
return _expand_dims_impl(input, axis, name=name)
@dispatch.dispatch_for_types(array_ops.gather, StructuredTensor)
def gather(params,
indices,
validate_indices=None,
name=None,
axis=None,
batch_dims=0):
"""tf.gather for structured tensors.
Does not support (yet) checks on illegal axis values, et cetera.
Indices must be a ragged or dense tensor.
Args:
params: a structured tensor to be gathered
indices: a ragged tensor or tensor to gather by.
validate_indices: whether to validate the indices
name: the name of the op(s).
axis: the axis in params to gather on.
batch_dims: the number of batch dimensions.
Returns:
the params reorganized according to indices.
"""
if name is None:
name = 'gather'
with ops.name_scope(name):
if axis is None:
axis = batch_dims
axis = array_ops.get_positive_axis(axis, params.shape.rank,
ndims_name='params.shape.rank')
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices')
def leaf_op(p):
return array_ops.gather(
p,
indices,
validate_indices=validate_indices,
axis=axis,
batch_dims=batch_dims,
name=None)
return _extend_op_single(params, leaf_op)
@dispatch.dispatch_for_types(array_ops.concat, StructuredTensor)
def concat(values, axis, name: str = 'concat'):
"""tf.concat for structured tensors.
Does not support (yet) checks on illegal axis values, et cetera.
Args:
values: a sequence of StructuredTensors.
axis: an axis to concatenate upon.
name: the name of the op(s).
Returns:
the params reorganized according to indices.
"""
if name is None:
name = 'concat'
_assert_concat_compatible_structured_tensors(values)
def leaf_op(values):
return array_ops.concat(values, axis)
# TODO(martinz): handle axis when it is a tensor.
axis = array_ops.get_positive_axis(axis, values[0].rank)
with ops.name_scope(name, 'StructuredConcat', values):
return _extend_op(values, leaf_op)
@dispatch.dispatch_for_types(random_ops.random_shuffle, StructuredTensor)
def random_shuffle(value, seed=None, name=None):
"""Shuffle a structured tensor on the zeroth axis.
Args:
value: a structured tensor of rank at least one.
seed: the seed for shuffling.
name: the name for shuffle.
Returns:
The shuffled structured tensor.
"""
with ops.name_scope(name, 'shuffle', [value, seed]):
if value.rank == 0:
raise ValueError('Cannot shuffle a scalar StructuredTensor')
first_dimension = value.nrows()
index = random_ops.random_shuffle(math_ops.range(first_dimension),
seed=seed)
return gather(value, index, axis=0)
@dispatch.dispatch_for_types(array_ops.size_v2, StructuredTensor)
def size_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor."""
return size(input, name=name, out_type=out_type)
# pylint: disable=protected-access
@dispatch.dispatch_for_types(array_ops.size, StructuredTensor)
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor."""
with ops.name_scope(name, 'size', [input]) as name:
if not input._row_partitions:
if input._nrows is not None:
return math_ops.cast(input._nrows, out_type) # vector.
else:
return math_ops.cast(1, out_type) # scalar.
# 2D and up.
last_row_partition = input._row_partitions[-1]
return last_row_partition.nvals(out_type)
# pylint: disable=protected-access
@dispatch.dispatch_for_types(array_ops.zeros_like, StructuredTensor)
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Implementation of zeros_like for StructuredTensor for TF v1."""
del optimize
return zeros_like_v2(tensor, dtype=dtype, name=name)
# pylint: disable=protected-access
@dispatch.dispatch_for_types(array_ops.zeros_like_v2, StructuredTensor)
def zeros_like_v2(input, dtype=None, name=None): # pylint: disable=redefined-builtin
"""Replace every object with a zero.
Example:
>>> st = StructuredTensor.from_pyval([{"x":[3]}, {"x":[4,5]}])
>>> tf.zeros_like(st)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([0.0, 0.0], dtype=float32)>
>>> st = StructuredTensor.from_pyval([[{"x":[3]}], [{"x":[4,5]}, {"x":[]}]])
>>> tf.zeros_like(st, dtype=tf.int32)
<tf.RaggedTensor [[0], [0, 0]]>
Args:
input: a structured tensor.
dtype: the dtype of the resulting zeros. (default is tf.float32)
name: a name for the op.
Returns:
a tensor of zeros of the same shape.
"""
if dtype is None:
dtype = dtypes.float32
with ops.name_scope(name, 'zeros_like', [input]) as name:
if not input._row_partitions:
if input._nrows is not None:
return array_ops.zeros([input._nrows], dtype) # vector.
else:
return array_ops.zeros([], dtype) # scalar.
# 2D and up.
last_row_partition = input._row_partitions[-1]
result = ragged_tensor.RaggedTensor._from_nested_row_partitions(
array_ops.zeros(last_row_partition.nvals(), dtype=dtype),
input._row_partitions)
return result
# pylint: disable=protected-access
@dispatch.dispatch_for_types(array_ops.ones_like, StructuredTensor)
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Implementation of zeros_like for StructuredTensor for TF v1."""
del optimize
return ones_like_v2(tensor, dtype=dtype, name=name)
# pylint: disable=protected-access
@dispatch.dispatch_for_types(array_ops.ones_like_v2, StructuredTensor)
def ones_like_v2(input, dtype=None, name=None): # pylint: disable=redefined-builtin
"""Replace every object with a zero.
Example:
>>> st = StructuredTensor.from_pyval([{"x":[3]}, {"x":[4,5]}])
>>> tf.ones_like(st)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1.0, 1.0], dtype=float32)>
>>> st = StructuredTensor.from_pyval([[{"x":[3]}], [{"x":[4,5]}, {"x":[]}]])
>>> tf.ones_like(st, dtype=tf.int32)
<tf.RaggedTensor [[1], [1, 1]]>
Args:
input: a structured tensor.
dtype: the dtype of the resulting zeros. (default is tf.float32)
name: a name for the op.
Returns:
a tensor of zeros of the same shape.
"""
if dtype is None:
dtype = dtypes.float32
with ops.name_scope(name, 'ones_like', [input]) as name:
if not input._row_partitions:
if input._nrows is not None:
return array_ops.ones([input._nrows], dtype) # vector.
else:
return array_ops.ones([], dtype) # scalar.
# 2D and up.
last_row_partition = input._row_partitions[-1]
result = ragged_tensor.RaggedTensor._from_nested_row_partitions(
array_ops.ones(last_row_partition.nvals(), dtype=dtype),
input._row_partitions)
return result
@dispatch.dispatch_for_types(array_ops.rank, StructuredTensor)
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor."""
with ops.name_scope(name, 'rank', [input]) as name:
return constant_op.constant(input.rank, dtype=dtypes.int32)
def _expand_dims_impl(st, axis, name=None): # pylint: disable=redefined-builtin
"""Creates a StructuredTensor with a length 1 axis inserted at index `axis`.
This is an implementation of tf.expand_dims for StructuredTensor. Note
that the `axis` must be less than or equal to rank.
>>> st = StructuredTensor.from_pyval([[{"x": 1}, {"x": 2}], [{"x": 3}]])
>>> tf.expand_dims(st, 0).to_pyval()
[[[{'x': 1}, {'x': 2}], [{'x': 3}]]]
>>> tf.expand_dims(st, 1).to_pyval()
[[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, 2).to_pyval()
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
Args:
st: the original StructuredTensor.
axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank`
name: the name of the op.
Returns:
a new structured tensor with larger rank.
Raises:
an error if `axis < -(rank + 1)` or `rank < axis`.
"""
axis = array_ops.get_positive_axis(
axis, st.rank + 1, axis_name='axis', ndims_name='rank(st)')
with ops.name_scope(name, 'ExpandDims', [st, axis]):
new_fields = {
k: array_ops.expand_dims(v, axis) for (k, v) in st._fields.items()
}
new_shape = st.shape[:axis] + (1,) + st.shape[axis:]
new_row_partitions = _expand_st_row_partitions(st, axis)
new_nrows = st.nrows() if (axis > 0) else 1
return StructuredTensor.from_fields(
new_fields,
shape=new_shape,
row_partitions=new_row_partitions,
nrows=new_nrows)
def _expand_st_row_partitions(st, axis):
"""Create the row_partitions for expand_dims."""
if axis == 0:
if st.shape.rank == 0:
return ()
nvals = st.nrows()
new_partition = RowPartition.from_uniform_row_length(
nvals, nvals, nrows=1, validate=False)
return (new_partition,) + st.row_partitions
elif axis == st.rank:
nvals = (
st.row_partitions[axis - 2].nvals() if (axis - 2 >= 0) else st.nrows())
return st.row_partitions + (RowPartition.from_uniform_row_length(
1, nvals, nrows=nvals, validate=False),)
else:
nvals = (
st.row_partitions[axis - 1].nrows() if (axis - 1 >= 0) else st.nrows())
return st.row_partitions[:axis - 1] + (RowPartition.from_uniform_row_length(
1, nvals, nrows=nvals, validate=False),) + st.row_partitions[axis - 1:]
# TODO(martinz): consider allowing values to be nested.
def _extend_op(values, leaf_op, empty_st_op=None):
"""Extend an op from RaggedTensor and Tensor to StructuredTensor.
Visits all children of the structured tensor, and children of children,
applying leaf_op whenever it reaches a leaf, and empty_st_op whenever
it reaches an internal node without children.
Args:
values: a list of structured tensors, ragged tensors, or tensors. All must
have the same type. If they are structured tensors, they must have the
same paths.
leaf_op: an op for handling non-structured tensor.
empty_st_op: op to create a structured tensor without fields.
Returns:
the result of the extended op (a StructuredTensor, RaggedTensor, or Tensor)
Raises:
ValueError:
If values is not a Sequence or is empty.
"""
if not isinstance(values, Sequence):
raise ValueError('Expected a list')
if not values:
raise ValueError('List cannot be empty')
if empty_st_op is None:
empty_st_op = empty_st_op_like_zeros(leaf_op)
# Use the structure of the first StructuredTensor. They are all assumed to
# be the same.
value = values[0]
if isinstance(value, StructuredTensor):
# TODO(martinz): Calling empty_st_op may add unnecessary ops. Revisit later.
empty_result = empty_st_op(values)
if not value.field_names():
return empty_result
new_fields = {}
for k in value.field_names():
new_fields[k] = _extend_op([v.field_value(k) for v in values], leaf_op,
empty_st_op)
return StructuredTensor.from_fields(new_fields, shape=empty_result.shape)
else:
return leaf_op(values)
def _extend_op_single(value, leaf_op, empty_st_op=None):
"""Extend an op to a value instead of a list of values."""
def to_list_op(element_op):
if element_op is None:
return None
def list_op(values):
[value] = values
return element_op(value)
return list_op
return _extend_op([value], to_list_op(leaf_op), to_list_op(empty_st_op))
def empty_st_op_like_zeros(leaf_op):
def empty_st_op(values):
as_zeros = [
zeros_like_v2(value, dtype=dtypes.int32) for value in values
]
result = leaf_op(as_zeros)
return _structured_tensor_like(result)
return empty_st_op
def _structured_tensor_from_dense_tensor(t):
"""Create a structured tensor with the shape of a dense tensor."""
# Note: If a tensor will have rank 0,
# it either has a fully defined shape or has unknown rank.
if t.shape.is_fully_defined():
return StructuredTensor.from_fields({}, shape=t.shape)
elif t.shape.rank is None:
raise ValueError("Can't build StructuredTensor w/ unknown rank")
elif t.shape.rank == 1:
return StructuredTensor.from_fields({}, shape=t.shape,
nrows=array_ops.shape(t)[0])
else:
rt = ragged_tensor.RaggedTensor.from_tensor(t)
return _structured_tensor_from_row_partitions(t.shape,
rt._nested_row_partitions)
def _structured_tensor_from_row_partitions(shape, row_partitions):
return StructuredTensor.from_fields({},
shape=shape,
row_partitions=row_partitions)
# pylint: disable=protected_access
def _all_nested_row_partitions(rt):
"""Returns all nested row partitions in rt, including for dense dimensions."""
if isinstance(rt, ops.Tensor):
if rt.shape.rank <= 1:
return ()
else:
rt2 = ragged_tensor.RaggedTensor.from_tensor(rt)
return rt2._nested_row_partitions
else:
tail_partitions = _all_nested_row_partitions(rt.flat_values)
head_partitions = rt._nested_row_partitions # pylint: disable=protected_access
return head_partitions + tail_partitions
def _structured_tensor_like(t):
"""Create a StructuredTensor with the shape of a (composite) tensor."""
if isinstance(t, ops.Tensor):
return _structured_tensor_from_dense_tensor(t)
if ragged_tensor.is_ragged(t):
return StructuredTensor.from_fields(
{}, shape=t.get_shape(), row_partitions=_all_nested_row_partitions(t))
# here, it is a StructuredTensor
return StructuredTensor.from_fields({},
shape=t.shape,
row_partitions=t.row_partitions,
nrows=t.nrows())
def _get_all_paths(st):
"""Get all the paths from a StructuredTensor."""
fields = st.field_names()
all_paths = {()}
for k in fields:
v = st.field_value(k)
if isinstance(v, StructuredTensor):
all_paths = all_paths.union([(k,) + p for p in _get_all_paths(v)])
else:
all_paths.add((k,))
return all_paths
def _get_all_ranks(st):
"""Get ranks of all submessages of a StructuredTensor."""
fields = st.field_names()
all_ranks = {(): st.rank}
for k in fields:
v = st.field_value(k)
if isinstance(v, StructuredTensor):
for (k2, v2) in _get_all_ranks(v).items():
all_ranks[(k,) + k2] = v2
return all_ranks
def _assert_all_paths_match(values):
"""Raises an error if the paths are not identical."""
paths = [_get_all_paths(st) for st in values]
path_diff = set()
for other_paths in paths[1:]:
path_diff = path_diff.union(paths[0].symmetric_difference(other_paths))
if path_diff:
raise ValueError(
'Some paths are present in some, but not all, structured tensors: %r' %
(path_diff,))
def _assert_all_ranks_match(values):
"""Raises an error if the ranks of submessages are not identical."""
ranks = [_get_all_ranks(st) for st in values]
for other_ranks in ranks[1:]:
if other_ranks != ranks[0]:
# TODO(martinz): If this becomes common, we can provide more detail.
# e.g.: which path is inconsistent.
raise ValueError('Ranks of sub-message do not match')
def _assert_concat_compatible_structured_tensors(values):
"""Sometimes raises an error if concat doesn't make sense statically on values.
values must be a sequence, and each element in values must be a structured
tensor, and must have the same paths. Additionally, each path that is a
submessage must have the same rank.
These constraints are sufficient for concat on the fields to be the same
as concat on structured tensors. This is meant to capture scenarios like
paths that are not in the first structured tensor, but are in later
structured tensors, which will just be ignored by the recursive algorithm.
If the rank of a submessage was different for two structured tensors,
then that is also a non-sensical merge.
Note that all of these checks are static, as paths and submessage ranks
are known.
Args:
values: a Sequence of StructuredTensors.
Raises:
ValueError: if there is any inconsistency as described above.
"""
if not isinstance(values, Sequence):
raise ValueError('values must be a list of StructuredTensors (not a list)')
if not values:
raise ValueError('values must not be an empty list')
for st in values:
if not isinstance(st, StructuredTensor):
raise ValueError('values must be a list of StructuredTensors')
_assert_all_paths_match(values)
_assert_all_ranks_match(values)
| apache-2.0 | -6,387,066,496,174,278,000 | 34.45641 | 92 | 0.654566 | false |
theheros/kbengine | kbe/res/scripts/common/Lib/test/test_winreg.py | 3 | 18930 | # Test the windows specific win32reg module.
# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
import os, sys
import unittest
from test import support
threading = support.import_module("threading")
from platform import machine
# Do this first so test will be skipped if module doesn't exist
support.import_module('winreg')
# Now import everything
from winreg import *
try:
REMOTE_NAME = sys.argv[sys.argv.index("--remote")+1]
except (IndexError, ValueError):
REMOTE_NAME = None
# tuple of (major, minor)
WIN_VER = sys.getwindowsversion()[:2]
# Some tests should only run on 64-bit architectures where WOW64 will be.
WIN64_MACHINE = True if machine() == "AMD64" else False
# Starting with Windows 7 and Windows Server 2008 R2, WOW64 no longer uses
# registry reflection and formerly reflected keys are shared instead.
# Windows 7 and Windows Server 2008 R2 are version 6.1. Due to this, some
# tests are only valid up until 6.1
HAS_REFLECTION = True if WIN_VER < (6, 1) else False
test_key_name = "SOFTWARE\\Python Registry Test Key - Delete Me"
# On OS'es that support reflection we should test with a reflected key
test_reflect_key_name = "SOFTWARE\\Classes\\Python Test Key - Delete Me"
test_data = [
("Int Value", 45, REG_DWORD),
("String Val", "A string value", REG_SZ),
("StringExpand", "The path is %path%", REG_EXPAND_SZ),
("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
("Raw Data", b"binary\x00data", REG_BINARY),
("Big String", "x"*(2**14-1), REG_SZ),
("Big Binary", b"x"*(2**14), REG_BINARY),
# Two and three kanjis, meaning: "Japan" and "Japanese")
("Japanese 日本", "日本語", REG_SZ),
]
class BaseWinregTests(unittest.TestCase):
def setUp(self):
# Make sure that the test key is absent when the test
# starts.
self.delete_tree(HKEY_CURRENT_USER, test_key_name)
def delete_tree(self, root, subkey):
try:
hkey = OpenKey(root, subkey, KEY_ALL_ACCESS)
except WindowsError:
# subkey does not exist
return
while True:
try:
subsubkey = EnumKey(hkey, 0)
except WindowsError:
# no more subkeys
break
self.delete_tree(hkey, subsubkey)
CloseKey(hkey)
DeleteKey(root, subkey)
def _write_test_data(self, root_key, subkeystr="sub_key",
CreateKey=CreateKey):
# Set the default value for this key.
SetValue(root_key, test_key_name, REG_SZ, "Default value")
key = CreateKey(root_key, test_key_name)
self.assertTrue(key.handle != 0)
# Create a sub-key
sub_key = CreateKey(key, subkeystr)
# Give the sub-key some named values
for value_name, value_data, value_type in test_data:
SetValueEx(sub_key, value_name, 0, value_type, value_data)
# Check we wrote as many items as we thought.
nkeys, nvalues, since_mod = QueryInfoKey(key)
self.assertEqual(nkeys, 1, "Not the correct number of sub keys")
self.assertEqual(nvalues, 1, "Not the correct number of values")
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "Not the correct number of sub keys")
self.assertEqual(nvalues, len(test_data),
"Not the correct number of values")
# Close this key this way...
# (but before we do, copy the key as an integer - this allows
# us to test that the key really gets closed).
int_sub_key = int(sub_key)
CloseKey(sub_key)
try:
QueryInfoKey(int_sub_key)
self.fail("It appears the CloseKey() function does "
"not close the actual key!")
except EnvironmentError:
pass
# ... and close that key that way :-)
int_key = int(key)
key.Close()
try:
QueryInfoKey(int_key)
self.fail("It appears the key.Close() function "
"does not close the actual key!")
except EnvironmentError:
pass
def _read_test_data(self, root_key, subkeystr="sub_key", OpenKey=OpenKey):
# Check we can get default value for this key.
val = QueryValue(root_key, test_key_name)
self.assertEqual(val, "Default value",
"Registry didn't give back the correct value")
key = OpenKey(root_key, test_key_name)
# Read the sub-keys
with OpenKey(key, subkeystr) as sub_key:
# Check I can enumerate over the values.
index = 0
while 1:
try:
data = EnumValue(sub_key, index)
except EnvironmentError:
break
self.assertEqual(data in test_data, True,
"Didn't read back the correct test data")
index = index + 1
self.assertEqual(index, len(test_data),
"Didn't read the correct number of items")
# Check I can directly access each item
for value_name, value_data, value_type in test_data:
read_val, read_typ = QueryValueEx(sub_key, value_name)
self.assertEqual(read_val, value_data,
"Could not directly read the value")
self.assertEqual(read_typ, value_type,
"Could not directly read the value")
sub_key.Close()
# Enumerate our main key.
read_val = EnumKey(key, 0)
self.assertEqual(read_val, subkeystr, "Read subkey value wrong")
try:
EnumKey(key, 1)
self.fail("Was able to get a second key when I only have one!")
except EnvironmentError:
pass
key.Close()
def _delete_test_data(self, root_key, subkeystr="sub_key"):
key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
sub_key = OpenKey(key, subkeystr, 0, KEY_ALL_ACCESS)
# It is not necessary to delete the values before deleting
# the key (although subkeys must not exist). We delete them
# manually just to prove we can :-)
for value_name, value_data, value_type in test_data:
DeleteValue(sub_key, value_name)
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "subkey not empty before delete")
self.assertEqual(nvalues, 0, "subkey not empty before delete")
sub_key.Close()
DeleteKey(key, subkeystr)
try:
# Shouldnt be able to delete it twice!
DeleteKey(key, subkeystr)
self.fail("Deleting the key twice succeeded")
except EnvironmentError:
pass
key.Close()
DeleteKey(root_key, test_key_name)
# Opening should now fail!
try:
key = OpenKey(root_key, test_key_name)
self.fail("Could open the non-existent key")
except WindowsError: # Use this error name this time
pass
def _test_all(self, root_key, subkeystr="sub_key"):
self._write_test_data(root_key, subkeystr)
self._read_test_data(root_key, subkeystr)
self._delete_test_data(root_key, subkeystr)
def _test_named_args(self, key, sub_key):
with CreateKeyEx(key=key, sub_key=sub_key, reserved=0,
access=KEY_ALL_ACCESS) as ckey:
self.assertTrue(ckey.handle != 0)
with OpenKeyEx(key=key, sub_key=sub_key, reserved=0,
access=KEY_ALL_ACCESS) as okey:
self.assertTrue(okey.handle != 0)
class LocalWinregTests(BaseWinregTests):
def test_registry_works(self):
self._test_all(HKEY_CURRENT_USER)
self._test_all(HKEY_CURRENT_USER, "日本-subkey")
def test_registry_works_extended_functions(self):
# Substitute the regular CreateKey and OpenKey calls with their
# extended counterparts.
# Note: DeleteKeyEx is not used here because it is platform dependent
cke = lambda key, sub_key: CreateKeyEx(key, sub_key, 0, KEY_ALL_ACCESS)
self._write_test_data(HKEY_CURRENT_USER, CreateKey=cke)
oke = lambda key, sub_key: OpenKeyEx(key, sub_key, 0, KEY_READ)
self._read_test_data(HKEY_CURRENT_USER, OpenKey=oke)
self._delete_test_data(HKEY_CURRENT_USER)
def test_named_arguments(self):
self._test_named_args(HKEY_CURRENT_USER, test_key_name)
# Use the regular DeleteKey to clean up
# DeleteKeyEx takes named args and is tested separately
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_connect_registry_to_local_machine_works(self):
# perform minimal ConnectRegistry test which just invokes it
h = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
self.assertNotEqual(h.handle, 0)
h.Close()
self.assertEqual(h.handle, 0)
def test_inexistant_remote_registry(self):
connect = lambda: ConnectRegistry("abcdefghijkl", HKEY_CURRENT_USER)
self.assertRaises(WindowsError, connect)
def testExpandEnvironmentStrings(self):
r = ExpandEnvironmentStrings("%windir%\\test")
self.assertEqual(type(r), str)
self.assertEqual(r, os.environ["windir"] + "\\test")
def test_context_manager(self):
# ensure that the handle is closed if an exception occurs
try:
with ConnectRegistry(None, HKEY_LOCAL_MACHINE) as h:
self.assertNotEqual(h.handle, 0)
raise WindowsError
except WindowsError:
self.assertEqual(h.handle, 0)
def test_changing_value(self):
# Issue2810: A race condition in 2.6 and 3.1 may cause
# EnumValue or QueryValue to throw "WindowsError: More data is
# available"
done = False
class VeryActiveThread(threading.Thread):
def run(self):
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
use_short = True
long_string = 'x'*2000
while not done:
s = 'x' if use_short else long_string
use_short = not use_short
SetValue(key, 'changing_value', REG_SZ, s)
thread = VeryActiveThread()
thread.start()
try:
with CreateKey(HKEY_CURRENT_USER,
test_key_name+'\\changing_value') as key:
for _ in range(1000):
num_subkeys, num_values, t = QueryInfoKey(key)
for i in range(num_values):
name = EnumValue(key, i)
QueryValue(key, name[0])
finally:
done = True
thread.join()
DeleteKey(HKEY_CURRENT_USER, test_key_name+'\\changing_value')
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_long_key(self):
# Issue2810, in 2.6 and 3.1 when the key name was exactly 256
# characters, EnumKey threw "WindowsError: More data is
# available"
name = 'x'*256
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
SetValue(key, name, REG_SZ, 'x')
num_subkeys, num_values, t = QueryInfoKey(key)
EnumKey(key, 0)
finally:
DeleteKey(HKEY_CURRENT_USER, '\\'.join((test_key_name, name)))
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_dynamic_key(self):
# Issue2810, when the value is dynamically generated, these
# throw "WindowsError: More data is available" in 2.6 and 3.1
EnumValue(HKEY_PERFORMANCE_DATA, 0)
QueryValueEx(HKEY_PERFORMANCE_DATA, "")
# Reflection requires XP x64/Vista at a minimum. XP doesn't have this stuff
# or DeleteKeyEx so make sure their use raises NotImplementedError
@unittest.skipUnless(WIN_VER < (5, 2), "Requires Windows XP")
def test_reflection_unsupported(self):
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
key = OpenKey(HKEY_CURRENT_USER, test_key_name)
self.assertNotEqual(key.handle, 0)
with self.assertRaises(NotImplementedError):
DisableReflectionKey(key)
with self.assertRaises(NotImplementedError):
EnableReflectionKey(key)
with self.assertRaises(NotImplementedError):
QueryReflectionKey(key)
with self.assertRaises(NotImplementedError):
DeleteKeyEx(HKEY_CURRENT_USER, test_key_name)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
@unittest.skipUnless(REMOTE_NAME, "Skipping remote registry tests")
class RemoteWinregTests(BaseWinregTests):
def test_remote_registry_works(self):
remote_key = ConnectRegistry(REMOTE_NAME, HKEY_CURRENT_USER)
self._test_all(remote_key)
@unittest.skipUnless(WIN64_MACHINE, "x64 specific registry tests")
class Win64WinregTests(BaseWinregTests):
def test_named_arguments(self):
self._test_named_args(HKEY_CURRENT_USER, test_key_name)
# Clean up and also exercise the named arguments
DeleteKeyEx(key=HKEY_CURRENT_USER, sub_key=test_key_name,
access=KEY_ALL_ACCESS, reserved=0)
def test_reflection_functions(self):
# Test that we can call the query, enable, and disable functions
# on a key which isn't on the reflection list with no consequences.
with OpenKey(HKEY_LOCAL_MACHINE, "Software") as key:
# HKLM\Software is redirected but not reflected in all OSes
self.assertTrue(QueryReflectionKey(key))
self.assertIsNone(EnableReflectionKey(key))
self.assertIsNone(DisableReflectionKey(key))
self.assertTrue(QueryReflectionKey(key))
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_reflection(self):
# Test that we can create, open, and delete keys in the 32-bit
# area. Because we are doing this in a key which gets reflected,
# test the differences of 32 and 64-bit keys before and after the
# reflection occurs (ie. when the created key is closed).
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
self.assertNotEqual(created_key.handle, 0)
# The key should now be available in the 32-bit area
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
# Write a value to what currently is only in the 32-bit area
SetValueEx(created_key, "", 0, REG_SZ, "32KEY")
# The key is not reflected until created_key is closed.
# The 64-bit version of the key should not be available yet.
open_fail = lambda: OpenKey(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(WindowsError, open_fail)
# Now explicitly open the 64-bit version of the key
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_64KEY) as key:
self.assertNotEqual(key.handle, 0)
# Make sure the original value we set is there
self.assertEqual("32KEY", QueryValue(key, ""))
# Set a new value, which will get reflected to 32-bit
SetValueEx(key, "", 0, REG_SZ, "64KEY")
# Reflection uses a "last-writer wins policy, so the value we set
# on the 64-bit key should be the same on 32-bit
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertEqual("64KEY", QueryValue(key, ""))
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_disable_reflection(self):
# Make use of a key which gets redirected and reflected
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
# QueryReflectionKey returns whether or not the key is disabled
disabled = QueryReflectionKey(created_key)
self.assertEqual(type(disabled), bool)
# HKCU\Software\Classes is reflected by default
self.assertFalse(disabled)
DisableReflectionKey(created_key)
self.assertTrue(QueryReflectionKey(created_key))
# The key is now closed and would normally be reflected to the
# 64-bit area, but let's make sure that didn't happen.
open_fail = lambda: OpenKeyEx(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(WindowsError, open_fail)
# Make sure the 32-bit key is actually there
with OpenKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
def test_main():
support.run_unittest(LocalWinregTests, RemoteWinregTests,
Win64WinregTests)
if __name__ == "__main__":
if not REMOTE_NAME:
print("Remote registry calls can be tested using",
"'test_winreg.py --remote \\\\machine_name'")
test_main()
| lgpl-3.0 | 7,014,132,232,162,777,000 | 41.888631 | 79 | 0.576496 | false |
vhaupert/mitmproxy | mitmproxy/proxy/protocol/http2.py | 1 | 30035 | import threading
import time
import functools
from typing import Dict, Callable, Any, List, Optional # noqa
import h2.exceptions
from h2 import connection
from h2 import events
import queue
from mitmproxy import connections # noqa
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy.proxy.protocol import base
from mitmproxy.proxy.protocol import http as httpbase
import mitmproxy.net.http
from mitmproxy.net import tcp
from mitmproxy.coretypes import basethread
from mitmproxy.net.http import http2, headers, url
from mitmproxy.utils import human
class SafeH2Connection(connection.H2Connection):
def __init__(self, conn, *args, **kwargs):
super().__init__(*args, **kwargs)
self.conn = conn
self.lock = threading.RLock()
def safe_acknowledge_received_data(self, acknowledged_size: int, stream_id: int):
if acknowledged_size == 0:
return
with self.lock:
self.acknowledge_received_data(acknowledged_size, stream_id)
self.conn.send(self.data_to_send())
def safe_reset_stream(self, stream_id: int, error_code: int):
with self.lock:
try:
self.reset_stream(stream_id, error_code)
except h2.exceptions.StreamClosedError: # pragma: no cover
# stream is already closed - good
pass
self.conn.send(self.data_to_send())
def safe_update_settings(self, new_settings: Dict[int, Any]):
with self.lock:
self.update_settings(new_settings)
self.conn.send(self.data_to_send())
def safe_send_headers(self, raise_zombie: Callable, stream_id: int, headers: headers.Headers, **kwargs):
with self.lock:
raise_zombie()
self.send_headers(stream_id, headers.fields, **kwargs)
self.conn.send(self.data_to_send())
def safe_send_body(self, raise_zombie: Callable, stream_id: int, chunks: List[bytes], end_stream=True):
for chunk in chunks:
position = 0
while position < len(chunk):
self.lock.acquire()
raise_zombie(self.lock.release)
max_outbound_frame_size = self.max_outbound_frame_size
frame_chunk = chunk[position:position + max_outbound_frame_size]
if self.local_flow_control_window(stream_id) < len(frame_chunk): # pragma: no cover
self.lock.release()
time.sleep(0.1)
continue
self.send_data(stream_id, frame_chunk)
try:
self.conn.send(self.data_to_send())
except Exception as e: # pragma: no cover
raise e
finally:
self.lock.release()
position += max_outbound_frame_size
if end_stream:
with self.lock:
raise_zombie()
self.end_stream(stream_id)
self.conn.send(self.data_to_send())
class Http2Layer(base.Layer):
if False:
# mypy type hints
client_conn: connections.ClientConnection = None
class H2ConnLogger:
def __init__(self, name, log):
self.name = name
self.log = log
def debug(self, fmtstr, *args):
msg = "H2Conn {}: {}".format(self.name, fmtstr % args)
self.log(msg, "debug")
def trace(self, fmtstr, *args):
pass
def __init__(self, ctx, mode: str) -> None:
super().__init__(ctx)
self.mode = mode
self.streams: Dict[int, Http2SingleStreamLayer] = dict()
self.server_to_client_stream_ids: Dict[int, int] = dict([(0, 0)])
self.connections: Dict[object, SafeH2Connection] = {}
config = h2.config.H2Configuration(
client_side=False,
header_encoding=False,
validate_outbound_headers=False,
validate_inbound_headers=False,
logger=self.H2ConnLogger("client", self.log))
self.connections[self.client_conn] = SafeH2Connection(self.client_conn, config=config)
def _initiate_server_conn(self):
if self.server_conn.connected():
config = h2.config.H2Configuration(
client_side=True,
header_encoding=False,
validate_outbound_headers=False,
validate_inbound_headers=False,
logger=self.H2ConnLogger("server", self.log))
self.connections[self.server_conn] = SafeH2Connection(self.server_conn, config=config)
self.connections[self.server_conn].initiate_connection()
self.server_conn.send(self.connections[self.server_conn].data_to_send())
def _complete_handshake(self):
preamble = self.client_conn.rfile.read(24)
self.connections[self.client_conn].initiate_connection()
self.connections[self.client_conn].receive_data(preamble)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
def next_layer(self): # pragma: no cover
# WebSocket over HTTP/2?
# CONNECT for proxying?
raise NotImplementedError()
def _handle_event(self, event, source_conn, other_conn, is_server):
self.log(
"HTTP2 Event from {}".format("server" if is_server else "client"),
"debug",
[repr(event)]
)
eid = None
if hasattr(event, 'stream_id'):
if is_server and event.stream_id % 2 == 1:
eid = self.server_to_client_stream_ids[event.stream_id]
else:
eid = event.stream_id
if isinstance(event, events.RequestReceived):
return self._handle_request_received(eid, event)
elif isinstance(event, events.ResponseReceived):
return self._handle_response_received(eid, event)
elif isinstance(event, events.DataReceived):
return self._handle_data_received(eid, event, source_conn)
elif isinstance(event, events.StreamEnded):
return self._handle_stream_ended(eid)
elif isinstance(event, events.StreamReset):
return self._handle_stream_reset(eid, event, is_server, other_conn)
elif isinstance(event, events.RemoteSettingsChanged):
return self._handle_remote_settings_changed(event, other_conn)
elif isinstance(event, events.ConnectionTerminated):
return self._handle_connection_terminated(event, is_server)
elif isinstance(event, events.PushedStreamReceived):
return self._handle_pushed_stream_received(event)
elif isinstance(event, events.PriorityUpdated):
return self._handle_priority_updated(eid, event)
elif isinstance(event, events.TrailersReceived):
return self._handle_trailers(eid, event, is_server, other_conn)
# fail-safe for unhandled events
return True
def _handle_request_received(self, eid, event):
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid] = Http2SingleStreamLayer(self, self.connections[self.client_conn], eid, headers)
self.streams[eid].timestamp_start = time.time()
if event.priority_updated is not None:
self.streams[eid].priority_exclusive = event.priority_updated.exclusive
self.streams[eid].priority_depends_on = event.priority_updated.depends_on
self.streams[eid].priority_weight = event.priority_updated.weight
self.streams[eid].handled_priority_event = event.priority_updated
self.streams[eid].start()
self.streams[eid].request_message.arrived.set()
return True
def _handle_response_received(self, eid, event):
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid].queued_data_length = 0
self.streams[eid].timestamp_start = time.time()
self.streams[eid].response_message.headers = headers
self.streams[eid].response_message.arrived.set()
return True
def _handle_data_received(self, eid, event, source_conn):
bsl = human.parse_size(self.config.options.body_size_limit)
if bsl and self.streams[eid].queued_data_length > bsl:
self.streams[eid].kill()
self.connections[source_conn].safe_reset_stream(
event.stream_id,
h2.errors.ErrorCodes.REFUSED_STREAM
)
self.log("HTTP body too large. Limit is {}.".format(bsl), "info")
else:
self.streams[eid].data_queue.put(event.data)
self.streams[eid].queued_data_length += len(event.data)
# always acknowledge receved data with a WINDOW_UPDATE frame
self.connections[source_conn].safe_acknowledge_received_data(
event.flow_controlled_length,
event.stream_id
)
return True
def _handle_stream_ended(self, eid):
self.streams[eid].timestamp_end = time.time()
self.streams[eid].stream_ended.set()
return True
def _handle_stream_reset(self, eid, event, is_server, other_conn):
if eid in self.streams:
self.streams[eid].kill()
if is_server:
other_stream_id = self.streams[eid].client_stream_id
else:
other_stream_id = self.streams[eid].server_stream_id
if other_stream_id is not None:
self.connections[other_conn].safe_reset_stream(other_stream_id, event.error_code)
return True
def _handle_trailers(self, eid, event, is_server, other_conn):
trailers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid].trailers = trailers
return True
def _handle_remote_settings_changed(self, event, other_conn):
new_settings = dict([(key, cs.new_value) for (key, cs) in event.changed_settings.items()])
self.connections[other_conn].safe_update_settings(new_settings)
return True
def _handle_connection_terminated(self, event, is_server):
self.log("HTTP/2 connection terminated by {}: error code: {}, last stream id: {}, additional data: {}".format(
"server" if is_server else "client",
event.error_code,
event.last_stream_id,
event.additional_data), "info")
if event.error_code != h2.errors.ErrorCodes.NO_ERROR:
# Something terrible has happened - kill everything!
self.connections[self.client_conn].close_connection(
error_code=event.error_code,
last_stream_id=event.last_stream_id,
additional_data=event.additional_data
)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
self._kill_all_streams()
else:
"""
Do not immediately terminate the other connection.
Some streams might be still sending data to the client.
"""
return False
def _handle_pushed_stream_received(self, event):
# pushed stream ids should be unique and not dependent on race conditions
# only the parent stream id must be looked up first
parent_eid = self.server_to_client_stream_ids[event.parent_stream_id]
with self.connections[self.client_conn].lock:
self.connections[self.client_conn].push_stream(parent_eid, event.pushed_stream_id, event.headers)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
layer = Http2SingleStreamLayer(self, self.connections[self.client_conn], event.pushed_stream_id, headers)
self.streams[event.pushed_stream_id] = layer
self.streams[event.pushed_stream_id].timestamp_start = time.time()
self.streams[event.pushed_stream_id].pushed = True
self.streams[event.pushed_stream_id].parent_stream_id = parent_eid
self.streams[event.pushed_stream_id].timestamp_end = time.time()
self.streams[event.pushed_stream_id].request_message.arrived.set()
self.streams[event.pushed_stream_id].request_message.stream_ended.set()
self.streams[event.pushed_stream_id].start()
return True
def _handle_priority_updated(self, eid, event):
if not self.config.options.http2_priority:
self.log("HTTP/2 PRIORITY frame suppressed. Use --http2-priority to enable forwarding.", "debug")
return True
if eid in self.streams and self.streams[eid].handled_priority_event is event:
# this event was already handled during stream creation
# HeadersFrame + Priority information as RequestReceived
return True
with self.connections[self.server_conn].lock:
mapped_stream_id = event.stream_id
if mapped_stream_id in self.streams and self.streams[mapped_stream_id].server_stream_id:
# if the stream is already up and running and was sent to the server,
# use the mapped server stream id to update priority information
mapped_stream_id = self.streams[mapped_stream_id].server_stream_id
if eid in self.streams:
self.streams[eid].priority_exclusive = event.exclusive
self.streams[eid].priority_depends_on = event.depends_on
self.streams[eid].priority_weight = event.weight
self.connections[self.server_conn].prioritize(
mapped_stream_id,
weight=event.weight,
depends_on=self._map_depends_on_stream_id(mapped_stream_id, event.depends_on),
exclusive=event.exclusive
)
self.server_conn.send(self.connections[self.server_conn].data_to_send())
return True
def _map_depends_on_stream_id(self, stream_id, depends_on):
mapped_depends_on = depends_on
if mapped_depends_on in self.streams and self.streams[mapped_depends_on].server_stream_id:
# if the depends-on-stream is already up and running and was sent to the server
# use the mapped server stream id to update priority information
mapped_depends_on = self.streams[mapped_depends_on].server_stream_id
if stream_id == mapped_depends_on:
# looks like one of the streams wasn't opened yet
# prevent self-dependent streams which result in ProtocolError
mapped_depends_on += 2
return mapped_depends_on
def _cleanup_streams(self):
death_time = time.time() - 10
zombie_streams = [(stream_id, stream) for stream_id, stream in list(self.streams.items()) if stream.zombie]
outdated_streams = [stream_id for stream_id, stream in zombie_streams if stream.zombie <= death_time]
for stream_id in outdated_streams: # pragma: no cover
self.streams.pop(stream_id, None)
def _kill_all_streams(self):
for stream in self.streams.values():
stream.kill()
def __call__(self):
self._initiate_server_conn()
self._complete_handshake()
conns = [c.connection for c in self.connections.keys()]
try:
while True:
r = tcp.ssl_read_select(conns, 0.1)
for conn in r:
source_conn = self.client_conn if conn == self.client_conn.connection else self.server_conn
other_conn = self.server_conn if conn == self.client_conn.connection else self.client_conn
is_server = (source_conn == self.server_conn)
with self.connections[source_conn].lock:
try:
raw_frame = b''.join(http2.read_raw_frame(source_conn.rfile))
except:
# read frame failed: connection closed
self._kill_all_streams()
return
if self.connections[source_conn].state_machine.state == h2.connection.ConnectionState.CLOSED:
self.log("HTTP/2 connection entered closed state already", "debug")
return
incoming_events = self.connections[source_conn].receive_data(raw_frame)
source_conn.send(self.connections[source_conn].data_to_send())
for event in incoming_events:
if not self._handle_event(event, source_conn, other_conn, is_server):
# connection terminated: GoAway
self._kill_all_streams()
return
self._cleanup_streams()
except Exception as e: # pragma: no cover
self.log(repr(e), "info")
self._kill_all_streams()
def detect_zombie_stream(func): # pragma: no cover
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.raise_zombie()
result = func(self, *args, **kwargs)
self.raise_zombie()
return result
return wrapper
class Http2SingleStreamLayer(httpbase._HttpTransmissionLayer, basethread.BaseThread):
class Message:
def __init__(self, headers=None):
self.headers: Optional[mitmproxy.net.http.Headers] = headers # headers are the first thing to be received on a new stream
self.data_queue: queue.Queue[bytes] = queue.Queue() # contains raw contents of DATA frames
self.queued_data_length = 0 # used to enforce mitmproxy's config.options.body_size_limit
self.trailers: Optional[mitmproxy.net.http.Headers] = None # trailers are received after stream_ended is set
self.arrived = threading.Event() # indicates the HEADERS+CONTINUTATION frames have been received
self.stream_ended = threading.Event() # indicates the a frame with the END_STREAM flag has been received
def __init__(self, ctx, h2_connection, stream_id: int, request_headers: mitmproxy.net.http.Headers) -> None:
super().__init__(
ctx, name="Http2SingleStreamLayer-{}".format(stream_id)
)
self.h2_connection = h2_connection
self.zombie: Optional[float] = None
self.client_stream_id: int = stream_id
self.server_stream_id: Optional[int] = None
self.pushed = False
self.timestamp_start: Optional[float] = None
self.timestamp_end: Optional[float] = None
self.request_message = self.Message(request_headers)
self.response_message = self.Message()
self.priority_exclusive: bool
self.priority_depends_on: Optional[int] = None
self.priority_weight: Optional[int] = None
self.handled_priority_event: Any = None
def kill(self):
if not self.zombie:
self.zombie = time.time()
self.request_message.stream_ended.set()
self.request_message.arrived.set()
self.response_message.arrived.set()
self.response_message.stream_ended.set()
def connect(self): # pragma: no cover
raise exceptions.Http2ProtocolException("HTTP2 layer should already have a connection.")
def disconnect(self): # pragma: no cover
raise exceptions.Http2ProtocolException("Cannot dis- or reconnect in HTTP2 connections.")
def set_server(self, address): # pragma: no cover
raise exceptions.SetServerNotAllowedException(repr(address))
def check_close_connection(self, flow):
# This layer only handles a single stream.
# RFC 7540 8.1: An HTTP request/response exchange fully consumes a single stream.
return True
@property
def data_queue(self):
if self.response_message.arrived.is_set():
return self.response_message.data_queue
else:
return self.request_message.data_queue
@property
def queued_data_length(self):
if self.response_message.arrived.is_set():
return self.response_message.queued_data_length
else:
return self.request_message.queued_data_length
@queued_data_length.setter
def queued_data_length(self, v):
self.request_message.queued_data_length = v
@property
def stream_ended(self):
# This indicates that all message headers, the full message body, and all trailers have been received
# https://tools.ietf.org/html/rfc7540#section-8.1
if self.response_message.arrived.is_set():
return self.response_message.stream_ended
else:
return self.request_message.stream_ended
@property
def trailers(self):
if self.response_message.arrived.is_set():
return self.response_message.trailers
else:
return self.request_message.trailers
@trailers.setter
def trailers(self, v):
if self.response_message.arrived.is_set():
self.response_message.trailers = v
else:
self.request_message.trailers = v
def raise_zombie(self, pre_command=None): # pragma: no cover
connection_closed = self.h2_connection.state_machine.state == h2.connection.ConnectionState.CLOSED
if self.zombie is not None or connection_closed:
if pre_command is not None:
pre_command()
raise exceptions.Http2ZombieException("Connection or stream already dead: {}, {}".format(self.zombie, connection_closed))
@detect_zombie_stream
def read_request_headers(self, flow):
self.request_message.arrived.wait()
self.raise_zombie()
if self.pushed:
flow.metadata['h2-pushed-stream'] = True
# pseudo header must be present, see https://http2.github.io/http2-spec/#rfc.section.8.1.2.3
authority = self.request_message.headers.pop(':authority', "")
method = self.request_message.headers.pop(':method')
scheme = self.request_message.headers.pop(':scheme')
path = self.request_message.headers.pop(':path')
host, port = url.parse_authority(authority, check=True)
port = port or url.default_port(scheme) or 0
return http.HTTPRequest(
host,
port,
method.encode(),
scheme.encode(),
authority.encode(),
path.encode(),
b"HTTP/2.0",
self.request_message.headers,
None,
None,
self.timestamp_start,
self.timestamp_end,
)
@detect_zombie_stream
def read_request_body(self, request):
if not request.stream:
self.request_message.stream_ended.wait()
while True:
try:
yield self.request_message.data_queue.get(timeout=0.1)
except queue.Empty: # pragma: no cover
pass
if self.request_message.stream_ended.is_set():
self.raise_zombie()
while self.request_message.data_queue.qsize() > 0:
yield self.request_message.data_queue.get()
break
self.raise_zombie()
@detect_zombie_stream
def read_request_trailers(self, request):
return self.request_message.trailers
@detect_zombie_stream
def send_request_headers(self, request):
if self.pushed:
# nothing to do here
return
while True:
self.raise_zombie()
self.connections[self.server_conn].lock.acquire()
max_streams = self.connections[self.server_conn].remote_settings.max_concurrent_streams
if self.connections[self.server_conn].open_outbound_streams + 1 >= max_streams:
# wait until we get a free slot for a new outgoing stream
self.connections[self.server_conn].lock.release()
time.sleep(0.1)
continue
# keep the lock
break
# We must not assign a stream id if we are already a zombie.
self.raise_zombie()
self.server_stream_id = self.connections[self.server_conn].get_next_available_stream_id()
self.server_to_client_stream_ids[self.server_stream_id] = self.client_stream_id
headers = request.headers.copy()
if request.authority:
headers.insert(0, ":authority", request.authority)
headers.insert(0, ":path", request.path)
headers.insert(0, ":method", request.method)
headers.insert(0, ":scheme", request.scheme)
priority_exclusive = None
priority_depends_on = None
priority_weight = None
if self.handled_priority_event:
# only send priority information if they actually came with the original HeadersFrame
# and not if they got updated before/after with a PriorityFrame
if not self.config.options.http2_priority:
self.log("HTTP/2 PRIORITY information in HEADERS frame suppressed. Use --http2-priority to enable forwarding.", "debug")
else:
priority_exclusive = self.priority_exclusive
priority_depends_on = self._map_depends_on_stream_id(self.server_stream_id, self.priority_depends_on)
priority_weight = self.priority_weight
try:
self.connections[self.server_conn].safe_send_headers(
self.raise_zombie,
self.server_stream_id,
headers,
priority_exclusive=priority_exclusive,
priority_depends_on=priority_depends_on,
priority_weight=priority_weight,
)
except Exception as e: # pragma: no cover
raise e
finally:
self.raise_zombie()
self.connections[self.server_conn].lock.release()
@detect_zombie_stream
def send_request_body(self, request, chunks):
if self.pushed:
# nothing to do here
return
self.connections[self.server_conn].safe_send_body(
self.raise_zombie,
self.server_stream_id,
chunks,
end_stream=(request.trailers is None),
)
@detect_zombie_stream
def send_request_trailers(self, request):
self._send_trailers(self.server_conn, request.trailers)
@detect_zombie_stream
def send_request(self, request):
self.send_request_headers(request)
self.send_request_body(request, [request.content])
self.send_request_trailers(request)
@detect_zombie_stream
def read_response_headers(self):
self.response_message.arrived.wait()
self.raise_zombie()
status_code = int(self.response_message.headers.get(':status', 502))
headers = self.response_message.headers.copy()
headers.pop(":status", None)
return http.HTTPResponse(
http_version=b"HTTP/2.0",
status_code=status_code,
reason=b'',
headers=headers,
content=None,
trailers=None,
timestamp_start=self.timestamp_start,
timestamp_end=self.timestamp_end,
)
@detect_zombie_stream
def read_response_body(self, request, response):
while True:
try:
yield self.response_message.data_queue.get(timeout=0.1)
except queue.Empty: # pragma: no cover
pass
if self.response_message.stream_ended.is_set():
self.raise_zombie()
while self.response_message.data_queue.qsize() > 0:
yield self.response_message.data_queue.get()
break
self.raise_zombie()
@detect_zombie_stream
def read_response_trailers(self, request, response):
return self.response_message.trailers
@detect_zombie_stream
def send_response_headers(self, response):
headers = response.headers.copy()
headers.insert(0, ":status", str(response.status_code))
with self.connections[self.client_conn].lock:
self.connections[self.client_conn].safe_send_headers(
self.raise_zombie,
self.client_stream_id,
headers
)
@detect_zombie_stream
def send_response_body(self, response, chunks):
self.connections[self.client_conn].safe_send_body(
self.raise_zombie,
self.client_stream_id,
chunks,
end_stream=(response.trailers is None),
)
@detect_zombie_stream
def send_response_trailers(self, response):
self._send_trailers(self.client_conn, response.trailers)
def _send_trailers(self, conn, trailers):
if not trailers:
return
with self.connections[conn].lock:
self.connections[conn].safe_send_headers(
self.raise_zombie,
self.client_stream_id,
trailers,
end_stream=True
)
def __call__(self): # pragma: no cover
raise EnvironmentError('Http2SingleStreamLayer must be run as thread')
def run(self):
layer = httpbase.HttpLayer(self, self.mode)
try:
layer()
except exceptions.Http2ZombieException: # pragma: no cover
# zombies can be safely terminated - no need to kill them twice
return
except exceptions.ProtocolException as e: # pragma: no cover
self.log(repr(e), "info")
except exceptions.SetServerNotAllowedException as e: # pragma: no cover
self.log("Changing the Host server for HTTP/2 connections not allowed: {}".format(e), "info")
except exceptions.Kill: # pragma: no cover
self.log("Connection killed", "info")
self.kill()
| mit | 6,689,100,719,969,246,000 | 40.143836 | 136 | 0.610488 | false |
creasyw/IMTAphy | documentation/doctools/tags/0.4.3/sphinx/linkcheck.py | 11 | 3581 | # -*- coding: utf-8 -*-
"""
sphinx.linkcheck
~~~~~~~~~~~~~~~~
The CheckExternalLinksBuilder class.
:copyright: 2008 by Georg Brandl, Thomas Lamb.
:license: BSD.
"""
import socket
from os import path
from urllib2 import build_opener, HTTPError
from docutils import nodes
from sphinx.builder import Builder
from sphinx.util.console import purple, red, darkgreen
# create an opener that will simulate a browser user-agent
opener = build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
class CheckExternalLinksBuilder(Builder):
"""
Checks for broken external links.
"""
name = 'linkcheck'
def init(self):
self.good = set()
self.broken = {}
self.redirected = {}
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
open(path.join(self.outdir, 'output.txt'), 'w').close()
def get_target_uri(self, docname, typ=None):
return ''
def get_outdated_docs(self):
return self.env.found_docs
def prepare_writing(self, docnames):
return
def write_doc(self, docname, doctree):
self.info()
for node in doctree.traverse(nodes.reference):
try:
self.check(node, docname)
except KeyError:
continue
def check(self, node, docname):
uri = node['refuri']
if '#' in uri:
uri = uri.split('#')[0]
if uri in self.good:
return
lineno = None
while lineno is None and node:
node = node.parent
lineno = node.line
if uri[0:5] == 'http:' or uri[0:6] == 'https:':
self.info(uri, nonl=1)
if uri in self.broken:
(r, s) = self.broken[uri]
elif uri in self.redirected:
(r, s) = self.redirected[uri]
else:
(r, s) = self.resolve(uri)
if r == 0:
self.info(' - ' + darkgreen('working'))
self.good.add(uri)
elif r == 2:
self.info(' - ' + red('broken: ') + s)
self.broken[uri] = (r, s)
self.write_entry('broken', docname, lineno, uri + ': ' + s)
else:
self.info(' - ' + purple('redirected') + ' to ' + s)
self.redirected[uri] = (r, s)
self.write_entry('redirected', docname, lineno, uri + ' to ' + s)
elif len(uri) == 0 or uri[0:7] == 'mailto:' or uri[0:4] == 'ftp:':
return
else:
self.info(uri + ' - ' + red('malformed!'))
self.write_entry('malformed', docname, lineno, uri)
return
def write_entry(self, what, docname, line, uri):
output = open(path.join(self.outdir, 'output.txt'), 'a')
output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
line, what, uri))
output.close()
def resolve(self, uri):
try:
f = opener.open(uri)
f.close()
except HTTPError, err:
#if err.code == 403 and uri.startswith('http://en.wikipedia.org/'):
# # Wikipedia blocks requests from urllib User-Agent
# return (0, 0)
return (2, str(err))
except Exception, err:
return (2, str(err))
if f.url.rstrip('/') == uri.rstrip('/'):
return (0, 0)
else:
return (1, f.url)
def finish(self):
return
| gpl-2.0 | -7,146,254,761,796,531,000 | 27.648 | 81 | 0.51131 | false |
ahmadiga/min_edx | common/test/acceptance/tests/studio/test_studio_asset.py | 37 | 1708 | """
Acceptance tests for Studio related to the asset index page.
"""
from ...pages.studio.asset_index import AssetIndexPage
from .base_studio_test import StudioCourseTest
from ...fixtures.base import StudioApiLoginError
class AssetIndexTest(StudioCourseTest):
"""
Tests for the Asset index page.
"""
def setUp(self, is_staff=False):
super(AssetIndexTest, self).setUp()
self.asset_page = AssetIndexPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.course_fixture.add_asset(['image.jpg', 'textbook.pdf'])
def test_page_existence(self):
"""
Make sure that the page is accessible.
"""
self.asset_page.visit()
def test_type_filter_exists(self):
"""
Make sure type filter is on the page.
"""
self.asset_page.visit()
assert self.asset_page.type_filter_on_page() is True
def test_filter_results(self):
"""
Make sure type filter actually filters the results.
"""
self.asset_page.visit()
all_results = len(self.asset_page.return_results_set())
if self.asset_page.select_type_filter(1):
filtered_results = len(self.asset_page.return_results_set())
assert self.asset_page.type_filter_header_label_visible()
assert all_results > filtered_results
else:
msg = "Could not open select Type filter"
raise StudioApiLoginError(msg)
| agpl-3.0 | 6,913,080,152,349,685,000 | 29.5 | 72 | 0.608899 | false |
renfredxh/pylletTown | pylletTown.py | 1 | 8123 | import pygame
import tmx
class Player(pygame.sprite.Sprite):
def __init__(self, location, orientation, *groups):
super(Player, self).__init__(*groups)
self.image = pygame.image.load('sprites/player.png')
self.imageDefault = self.image.copy()
self.rect = pygame.Rect(location, (64,64))
self.orient = orientation
self.holdTime = 0
self.walking = False
self.dx = 0
self.step = 'rightFoot'
# Set default orientation
self.setSprite()
def setSprite(self):
# Resets the player sprite sheet to its default position
# and scrolls it to the necessary position for the current orientation
self.image = self.imageDefault.copy()
if self.orient == 'up':
self.image.scroll(0, -64)
elif self.orient == 'down':
self.image.scroll(0, 0)
elif self.orient == 'left':
self.image.scroll(0, -128)
elif self.orient == 'right':
self.image.scroll(0, -192)
def update(self, dt, game):
key = pygame.key.get_pressed()
# Setting orientation and sprite based on key input:
if key[pygame.K_UP]:
if not self.walking:
if self.orient != 'up':
self.orient = 'up'
self.setSprite()
self.holdTime += dt
elif key[pygame.K_DOWN]:
if not self.walking:
if self.orient != 'down':
self.orient = 'down'
self.setSprite()
self.holdTime += dt
elif key[pygame.K_LEFT]:
if not self.walking:
if self.orient != 'left':
self.orient = 'left'
self.setSprite()
self.holdTime += dt
elif key[pygame.K_RIGHT]:
if not self.walking:
if self.orient != 'right':
self.orient = 'right'
self.setSprite()
self.holdTime += dt
else:
self.holdTime = 0
self.step = 'rightFoot'
# Walking mode enabled if a button is held for 0.1 seconds
if self.holdTime >= 100:
self.walking = True
lastRect = self.rect.copy()
# Walking at 8 pixels per frame in the direction the player is facing
if self.walking and self.dx < 64:
if self.orient == 'up':
self.rect.y -= 8
elif self.orient == 'down':
self.rect.y += 8
elif self.orient == 'left':
self.rect.x -= 8
elif self.orient == 'right':
self.rect.x += 8
self.dx += 8
# Collision detection:
# Reset to the previous rectangle if player collides
# with anything in the foreground layer
if len(game.tilemap.layers['triggers'].collide(self.rect,
'solid')) > 0:
self.rect = lastRect
# Area entry detection:
elif len(game.tilemap.layers['triggers'].collide(self.rect,
'entry')) > 0:
entryCell = game.tilemap.layers['triggers'].find('entry')[0]
game.fadeOut()
game.initArea(entryCell['entry'])
return
# Switch to the walking sprite after 32 pixels
if self.dx == 32:
# Self.step keeps track of when to flip the sprite so that
# the character appears to be taking steps with different feet.
if (self.orient == 'up' or
self.orient == 'down') and self.step == 'leftFoot':
self.image = pygame.transform.flip(self.image, True, False)
self.step = 'rightFoot'
else:
self.image.scroll(-64, 0)
self.step = 'leftFoot'
# After traversing 64 pixels, the walking animation is done
if self.dx == 64:
self.walking = False
self.setSprite()
self.dx = 0
game.tilemap.set_focus(self.rect.x, self.rect.y)
class SpriteLoop(pygame.sprite.Sprite):
"""A simple looped animated sprite.
SpriteLoops require certain properties to be defined in the relevant
tmx tile:
src - the source of the image that contains the sprites
width, height - the width and height of each section of the sprite that
will be displayed on-screen during animation
mspf - milliseconds per frame, or how many milliseconds must pass to
advance onto the next frame in the sprite's animation
frames - the number individual frames that compose the animation
"""
def __init__(self, location, cell, *groups):
super(SpriteLoop, self).__init__(*groups)
self.image = pygame.image.load(cell['src'])
self.defaultImage = self.image.copy()
self.width = int(cell['width'])
self.height = int(cell['height'])
self.rect = pygame.Rect(location, (self.width,self.height))
self.frames = int(cell['frames'])
self.frameCount = 0
self.mspf = int(cell['mspf']) # milliseconds per frame
self.timeCount = 0
def update(self, dt, game):
self.timeCount += dt
if self.timeCount > self.mspf:
# Advance animation to the appropriate frame
self.image = self.defaultImage.copy()
self.image.scroll(-1*self.width*self.frameCount, 0)
self.timeCount = 0
self.frameCount += 1
if self.frameCount == self.frames:
self.frameCount = 0
class Game(object):
def __init__(self, screen):
self.screen = screen
def fadeOut(self):
"""Animate the screen fading to black for entering a new area"""
clock = pygame.time.Clock()
blackRect = pygame.Surface(self.screen.get_size())
blackRect.set_alpha(100)
blackRect.fill((0,0,0))
# Continuously draw a transparent black rectangle over the screen
# to create a fadeout effect
for i in range(0,5):
clock.tick(15)
self.screen.blit(blackRect, (0,0))
pygame.display.flip()
clock.tick(15)
screen.fill((255,255,255,50))
pygame.display.flip()
def initArea(self, mapFile):
"""Load maps and initialize sprite layers for each new area"""
self.tilemap = tmx.load(mapFile, screen.get_size())
self.players = tmx.SpriteLayer()
self.objects = tmx.SpriteLayer()
# Initializing other animated sprites
try:
for cell in self.tilemap.layers['sprites'].find('src'):
SpriteLoop((cell.px,cell.py), cell, self.objects)
# In case there is no sprite layer for the current map
except KeyError:
pass
else:
self.tilemap.layers.append(self.objects)
# Initializing player sprite
startCell = self.tilemap.layers['triggers'].find('playerStart')[0]
self.player = Player((startCell.px, startCell.py),
startCell['playerStart'], self.players)
self.tilemap.layers.append(self.players)
self.tilemap.set_focus(self.player.rect.x, self.player.rect.y)
def main(self):
clock = pygame.time.Clock()
self.initArea('palletTown.tmx')
while 1:
dt = clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
return
self.tilemap.update(dt, self)
screen.fill((0,0,0))
self.tilemap.draw(self.screen)
pygame.display.flip()
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption("Pyllet Town")
Game(screen).main() | mit | 4,085,456,416,287,287,000 | 37.870813 | 81 | 0.543395 | false |
carlmw/oscar-wager | django/contrib/gis/gdal/field.py | 264 | 6059 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"A class that wraps an OGR Field, needs to be instantiated from a Feature object."
#### Python 'magic' routines ####
def __init__(self, feat, index):
"""
Initializes on the feature pointer and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat, index)
if not fld_ptr:
raise OGRException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
#### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat, self._index)
def as_int(self):
"Retrieves the Field's value as an integer."
return capi.get_field_as_integer(self._feat, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
return capi.get_field_as_string(self._feat, self._index)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(self._feat, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise OGRException('Unable to retrieve date & time information from the field.')
#### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
return capi.get_field_name(self.ptr)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
@property
def value(self):
"Returns an integer contained in this field."
return self.as_int()
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field): pass
class OFTWideString(Field): pass
class OFTBinary(Field): pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, OGRException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.maptools.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
# List fields are also just subclasses
class OFTIntegerList(Field): pass
class OFTRealList(Field): pass
class OFTStringList(Field): pass
class OFTWideStringList(Field): pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = { 0 : OFTInteger,
1 : OFTIntegerList,
2 : OFTReal,
3 : OFTRealList,
4 : OFTString,
5 : OFTStringList,
6 : OFTWideString,
7 : OFTWideStringList,
8 : OFTBinary,
9 : OFTDate,
10 : OFTTime,
11 : OFTDateTime,
}
ROGRFieldTypes = dict([(cls, num) for num, cls in OGRFieldTypes.items()])
| bsd-3-clause | 4,473,317,701,176,517,000 | 33.039326 | 101 | 0.606536 | false |
DJMelonz/basic-blog | django/views/generic/simple.py | 245 | 2319 | from django.template import loader, RequestContext
from django.http import HttpResponse, HttpResponseRedirect, HttpResponsePermanentRedirect, HttpResponseGone
from django.utils.log import getLogger
import warnings
warnings.warn(
'Function-based generic views have been deprecated; use class-based views instead.',
PendingDeprecationWarning
)
logger = getLogger('django.request')
def direct_to_template(request, template, extra_context=None, mimetype=None, **kwargs):
"""
Render a given template with any extra URL parameters in the context as
``{{ params }}``.
"""
if extra_context is None: extra_context = {}
dictionary = {'params': kwargs}
for key, value in extra_context.items():
if callable(value):
dictionary[key] = value()
else:
dictionary[key] = value
c = RequestContext(request, dictionary)
t = loader.get_template(template)
return HttpResponse(t.render(c), mimetype=mimetype)
def redirect_to(request, url, permanent=True, query_string=False, **kwargs):
"""
Redirect to a given URL.
The given url may contain dict-style string formatting, which will be
interpolated against the params in the URL. For example, to redirect from
``/foo/<id>/`` to ``/bar/<id>/``, you could use the following URLconf::
urlpatterns = patterns('',
('^foo/(?P<id>\d+)/$', 'django.views.generic.simple.redirect_to', {'url' : '/bar/%(id)s/'}),
)
If the given url is ``None``, a HttpResponseGone (410) will be issued.
If the ``permanent`` argument is False, then the response will have a 302
HTTP status code. Otherwise, the status code will be 301.
If the ``query_string`` argument is True, then the GET query string
from the request is appended to the URL.
"""
args = request.META["QUERY_STRING"]
if args and query_string and url is not None:
url = "%s?%s" % (url, args)
if url is not None:
klass = permanent and HttpResponsePermanentRedirect or HttpResponseRedirect
return klass(url % kwargs)
else:
logger.warning('Gone: %s' % request.path,
extra={
'status_code': 410,
'request': request
})
return HttpResponseGone()
| bsd-3-clause | -6,451,735,320,082,775,000 | 35.234375 | 107 | 0.644674 | false |
RockRaidersInc/ROS-Main | gps/src/FSM.py | 1 | 1234 | """TODO."""
from enum import Enum
import UBX
def isObj(obj, cls):
"""Test if UBX message obj is of class cls."""
return obj._class == cls._class and obj._id == cls._id
def isACK(obj):
"""Test whether message obj is a ACK."""
return isObj(obj, UBX.ACK.ACK)
def isNAK(obj):
"""Test whether message obj is a NAK."""
return isObj(obj, UBX.ACK.NAK)
def FSM_Get(msgCls):
"""Decorator that makes a getter FSM for use in Manager."""
def decorator(FSMCls):
# 1. class STATE
class STATE(Enum):
START = 0
DONE = 1
setattr(FSMCls, "STATE", STATE)
# 2. function __init__
def __init__(self):
self.state = FSMCls.STATE.START
self.ver = None
setattr(FSMCls, "__init__", __init__)
# 3. function done
def done(self):
return self.state == FSMCls.STATE.DONE
setattr(FSMCls, "done", done)
# 4. function onUBX
def onUBX(self, obj, manager):
if obj._class == msgCls._class and obj._id == msgCls._id:
print(obj)
self.state = FSMCls.STATE.DONE
setattr(FSMCls, "onUBX", onUBX)
return FSMCls
return decorator
| gpl-3.0 | 5,613,455,981,996,897,000 | 26.422222 | 69 | 0.550243 | false |
incaser/odoo-odoo | doc/_extensions/odoo/translator.py | 207 | 26718 | # -*- coding: utf-8 -*-
import os.path
import posixpath
import re
import urllib
from docutils import nodes
from sphinx import addnodes, util
from sphinx.locale import admonitionlabels
def _parents(node):
while node.parent:
node = node.parent
yield node
class BootstrapTranslator(nodes.NodeVisitor, object):
head_prefix = 'head_prefix'
head = 'head'
stylesheet = 'stylesheet'
body_prefix = 'body_prefix'
body_pre_docinfo = 'body_pre_docinfo'
docinfo = 'docinfo'
body_suffix = 'body_suffix'
subtitle = 'subtitle'
header = 'header'
footer = 'footer'
html_prolog = 'html_prolog'
html_head = 'html_head'
html_title = 'html_title'
html_subtitle = 'html_subtitle'
# <meta> tags
meta = [
'<meta http-equiv="X-UA-Compatible" content="IE=edge">',
'<meta name="viewport" content="width=device-width, initial-scale=1">'
]
def __init__(self, builder, document):
super(BootstrapTranslator, self).__init__(document)
self.builder = builder
self.body = []
self.fragment = self.body
self.html_body = self.body
# document title
self.title = []
self.start_document_title = 0
self.first_title = False
self.context = []
self.section_level = 0
self.highlightlang = self.highlightlang_base = self.builder.config.highlight_language
self.highlightopts = getattr(builder.config, 'highlight_options', {})
self.first_param = 1
self.optional_param_level = 0
self.required_params_left = 0
self.param_separator = ','
def encode(self, text):
return unicode(text).translate({
ord('&'): u'&',
ord('<'): u'<',
ord('"'): u'"',
ord('>'): u'>',
0xa0: u' '
})
def starttag(self, node, tagname, **attributes):
tagname = unicode(tagname).lower()
# extract generic attributes
attrs = {name.lower(): value for name, value in attributes.iteritems()}
attrs.update(
(name, value) for name, value in node.attributes.iteritems()
if name.startswith('data-')
)
prefix = []
postfix = []
# handle possibly multiple ids
assert 'id' not in attrs, "starttag can't be passed a single id attribute, use a list of ids"
ids = node.get('ids', []) + attrs.pop('ids', [])
if ids:
_ids = iter(ids)
attrs['id'] = next(_ids)
postfix.extend(u'<i id="{}"></i>'.format(_id) for _id in _ids)
# set CSS class
classes = set(node.get('classes', []) + attrs.pop('class', '').split())
if classes:
attrs['class'] = u' '.join(classes)
return u'{prefix}<{tag} {attrs}>{postfix}'.format(
prefix=u''.join(prefix),
tag=tagname,
attrs=u' '.join(u'{}="{}"'.format(name, self.attval(value))
for name, value in attrs.iteritems()),
postfix=u''.join(postfix),
)
# only "space characters" SPACE, CHARACTER TABULATION, LINE FEED,
# FORM FEED and CARRIAGE RETURN should be collapsed, not al White_Space
def attval(self, value, whitespace=re.compile(u'[ \t\n\f\r]')):
return self.encode(whitespace.sub(u' ', unicode(value)))
def astext(self):
return u''.join(self.body)
def unknown_visit(self, node):
print "unknown node", node.__class__.__name__
self.body.append(u'[UNKNOWN NODE {}]'.format(node.__class__.__name__))
raise nodes.SkipNode
def visit_highlightlang(self, node):
self.highlightlang = node['lang']
def depart_highlightlang(self, node):
pass
def visit_document(self, node):
self.first_title = True
def depart_document(self, node):
pass
def visit_section(self, node):
# close "parent" or preceding section, unless this is the opening of
# the first section
if self.section_level:
self.body.append(u'</section>')
self.section_level += 1
self.body.append(self.starttag(node, 'section'))
def depart_section(self, node):
self.section_level -= 1
# close last section of document
if not self.section_level:
self.body.append(u'</section>')
def is_compact_paragraph(self, node):
parent = node.parent
if isinstance(parent, (nodes.document, nodes.compound,
addnodes.desc_content,
addnodes.versionmodified)):
# Never compact paragraphs in document or compound.
return False
for key, value in node.attlist():
# we can ignore a few specific classes, all other non-default
# attributes require that a <p> node remains
if key != 'classes' or value not in ([], ['first'], ['last'], ['first', 'last']):
return False
first = isinstance(node.parent[0], nodes.label)
for child in parent.children[first:]:
# only first paragraph can be compact
if isinstance(child, nodes.Invisible):
continue
if child is node:
break
return False
parent_length = len([
1 for n in parent
if not isinstance(n, (nodes.Invisible, nodes.label))
])
return parent_length == 1
def visit_paragraph(self, node):
if self.is_compact_paragraph(node):
self.context.append(u'')
return
self.body.append(self.starttag(node, 'p'))
self.context.append(u'</p>')
def depart_paragraph(self, node):
self.body.append(self.context.pop())
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_literal_block(self, node):
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
self.body.append(self.starttag(node, 'pre'))
return
lang = self.highlightlang
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
linenos = node.get('linenos', False)
if lang is self.highlightlang_base:
# only pass highlighter options for original language
opts = self.highlightopts
else:
opts = {}
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.builder.highlighter.highlight_block(
node.rawsource, lang, opts=opts, warn=warner, linenos=linenos,
**highlight_args)
self.body.append(self.starttag(node, 'div', CLASS='highlight-%s' % lang))
self.body.append(highlighted)
self.body.append(u'</div>\n')
raise nodes.SkipNode
def depart_literal_block(self, node):
self.body.append(u'</pre>')
def visit_bullet_list(self, node):
self.body.append(self.starttag(node, 'ul'))
def depart_bullet_list(self, node):
self.body.append(u'</ul>')
def visit_enumerated_list(self, node):
self.body.append(self.starttag(node, 'ol'))
def depart_enumerated_list(self, node):
self.body.append(u'</ol>')
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li'))
def depart_list_item(self, node):
self.body.append(u'</li>')
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl'))
def depart_definition_list(self, node):
self.body.append(u'</dl>')
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
self.body.append(self.starttag(node, 'dt'))
def depart_term(self, node):
self.body.append(u'</dt>')
def visit_termsep(self, node):
self.body.append(self.starttag(node, 'br'))
raise nodes.SkipNode
def visit_definition(self, node):
self.body.append(self.starttag(node, 'dd'))
def depart_definition(self, node):
self.body.append(u'</dd>')
def visit_admonition(self, node, type=None):
clss = {
# ???: 'alert-success',
'note': 'alert-info',
'hint': 'alert-info',
'tip': 'alert-info',
'seealso': 'alert-info',
'warning': 'alert-warning',
'attention': 'alert-warning',
'caution': 'alert-warning',
'important': 'alert-warning',
'danger': 'alert-danger',
'error': 'alert-danger',
'exercise': 'alert-exercise',
}
self.body.append(self.starttag(node, 'div', role='alert', CLASS='alert {}'.format(
clss.get(type, '')
)))
if 'alert-dismissible' in node.get('classes', []):
self.body.append(
u'<button type="button" class="close" data-dismiss="alert" aria-label="Close">'
u'<span aria-hidden="true">×</span>'
u'</button>')
if type:
node.insert(0, nodes.title(type, admonitionlabels[type]))
def depart_admonition(self, node):
self.body.append(u'</div>')
visit_note = lambda self, node: self.visit_admonition(node, 'note')
visit_warning = lambda self, node: self.visit_admonition(node, 'warning')
visit_attention = lambda self, node: self.visit_admonition(node, 'attention')
visit_caution = lambda self, node: self.visit_admonition(node, 'caution')
visit_danger = lambda self, node: self.visit_admonition(node, 'danger')
visit_error = lambda self, node: self.visit_admonition(node, 'error')
visit_hint = lambda self, node: self.visit_admonition(node, 'hint')
visit_important = lambda self, node: self.visit_admonition(node, 'important')
visit_tip = lambda self, node: self.visit_admonition(node, 'tip')
visit_exercise = lambda self, node: self.visit_admonition(node, 'exercise')
visit_seealso = lambda self, node: self.visit_admonition(node, 'seealso')
depart_note = depart_admonition
depart_warning = depart_admonition
depart_attention = depart_admonition
depart_caution = depart_admonition
depart_danger = depart_admonition
depart_error = depart_admonition
depart_hint = depart_admonition
depart_important = depart_admonition
depart_tip = depart_admonition
depart_exercise = depart_admonition
depart_seealso = depart_admonition
def visit_versionmodified(self, node):
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node):
self.body.append(u'</div>')
def visit_title(self, node):
parent = node.parent
closing = u'</p>'
if isinstance(parent, nodes.Admonition):
self.body.append(self.starttag(node, 'p', CLASS='alert-title'))
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h1'))
closing = u'</h1>'
self.start_document_title = len(self.body)
else:
assert isinstance(parent, nodes.section), "expected a section node as parent to the title, found {}".format(parent)
if self.first_title:
self.first_title = False
raise nodes.SkipNode()
nodename = 'h{}'.format(self.section_level)
self.body.append(self.starttag(node, nodename))
closing = u'</{}>'.format(nodename)
self.context.append(closing)
def depart_title(self, node):
self.body.append(self.context.pop())
if self.start_document_title:
self.title = self.body[self.start_document_title:-1]
self.start_document_title = 0
del self.body[:]
# the rubric should be a smaller heading than the current section, up to
# h6... maybe "h7" should be a ``p`` instead?
def visit_rubric(self, node):
self.body.append(self.starttag(node, 'h{}'.format(min(self.section_level + 1, 6))))
def depart_rubric(self, node):
self.body.append(u'</h{}>'.format(min(self.section_level + 1, 6)))
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote'))
def depart_block_quote(self, node):
self.body.append(u'</blockquote>')
def visit_attribution(self, node):
self.body.append(self.starttag(node, 'footer'))
def depart_attribution(self, node):
self.body.append(u'</footer>')
def visit_container(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_container(self, node):
self.body.append(u'</div>')
def visit_compound(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_compound(self, node):
self.body.append(u'</div>')
def visit_image(self, node):
uri = node['uri']
if uri in self.builder.images:
uri = posixpath.join(self.builder.imgpath,
self.builder.images[uri])
attrs = {'src': uri, 'class': 'img-responsive'}
if 'alt' in node:
attrs['alt'] = node['alt']
if 'align' in node:
if node['align'] == 'center':
attrs['class'] += ' center-block'
else:
doc = None
if node.source:
doc = node.source
if node.line:
doc += ':%d' % node.line
self.builder.app.warn(
"Unsupported alignment value \"%s\"" % node['align'],
location=doc
)
# todo: explicit width/height/scale?
self.body.append(self.starttag(node, 'img', **attrs))
def depart_image(self, node): pass
def visit_figure(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_figure(self, node):
self.body.append(u'</div>')
def visit_caption(self, node):
# first paragraph of figure content
self.body.append(self.starttag(node, 'h4'))
def depart_caption(self, node):
self.body.append(u'</h4>')
def visit_legend(self, node): pass
def depart_legend(self, node): pass
def visit_line(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line'))
# ensure the line still takes the room it needs
if not len(node): self.body.append(u'<br />')
def depart_line(self, node):
self.body.append(u'</div>')
def visit_line_block(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line-block'))
def depart_line_block(self, node):
self.body.append(u'</div>')
def visit_table(self, node):
self.body.append(self.starttag(node, 'table', CLASS='table'))
def depart_table(self, node):
self.body.append(u'</table>')
def visit_tgroup(self, node): pass
def depart_tgroup(self, node): pass
def visit_colspec(self, node): raise nodes.SkipNode
def visit_thead(self, node):
self.body.append(self.starttag(node, 'thead'))
def depart_thead(self, node):
self.body.append(u'</thead>')
def visit_tbody(self, node):
self.body.append(self.starttag(node, 'tbody'))
def depart_tbody(self, node):
self.body.append(u'</tbody>')
def visit_row(self, node):
self.body.append(self.starttag(node, 'tr'))
def depart_row(self, node):
self.body.append(u'</tr>')
def visit_entry(self, node):
if isinstance(node.parent.parent, nodes.thead):
tagname = 'th'
else:
tagname = 'td'
self.body.append(self.starttag(node, tagname))
self.context.append(tagname)
def depart_entry(self, node):
self.body.append(u'</{}>'.format(self.context.pop()))
def visit_Text(self, node):
self.body.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_literal(self, node):
self.body.append(self.starttag(node, 'code'))
def depart_literal(self, node):
self.body.append(u'</code>')
visit_literal_emphasis = visit_literal
depart_literal_emphasis = depart_literal
def visit_emphasis(self, node):
self.body.append(self.starttag(node, 'em'))
def depart_emphasis(self, node):
self.body.append(u'</em>')
def visit_strong(self, node):
self.body.append(self.starttag(node, 'strong'))
def depart_strong(self, node):
self.body.append(u'</strong>')
visit_literal_strong = visit_strong
depart_literal_strong = depart_strong
def visit_inline(self, node):
self.body.append(self.starttag(node, 'span'))
def depart_inline(self, node):
self.body.append(u'</span>')
def visit_abbreviation(self, node):
attrs = {}
if 'explanation' in node:
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', **attrs))
def depart_abbreviation(self, node):
self.body.append(u'</abbr>')
def visit_reference(self, node):
attrs = {
'class': 'reference',
'href': node['refuri'] if 'refuri' in node else '#' + node['refid']
}
attrs['class'] += ' internal' if (node.get('internal') or 'refuri' not in node) else ' external'
if any(isinstance(ancestor, nodes.Admonition) for ancestor in _parents(node)):
attrs['class'] += ' alert-link'
if 'reftitle' in node:
attrs['title'] = node['reftitle']
self.body.append(self.starttag(node, 'a', **attrs))
def depart_reference(self, node):
self.body.append(u'</a>')
def visit_target(self, node): pass
def depart_target(self, node): pass
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'div', CLASS='footnote'))
self.footnote_backrefs(node)
def depart_footnote(self, node):
self.body.append(u'</div>')
def visit_footnote_reference(self, node):
self.body.append(self.starttag(
node, 'a', href='#' + node['refid'], CLASS="footnote-ref"))
def depart_footnote_reference(self, node):
self.body.append(u'</a>')
def visit_label(self, node):
self.body.append(self.starttag(node, 'span', CLASS='footnote-label'))
self.body.append(u'%s[' % self.context.pop())
def depart_label(self, node):
# Context added in footnote_backrefs.
self.body.append(u']%s</span> %s' % (self.context.pop(), self.context.pop()))
def footnote_backrefs(self, node):
# should store following data on context stack (in that order since
# they'll be popped so LIFO)
#
# * outside (after) label
# * after label text
# * before label text
backrefs = node['backrefs']
if not backrefs:
self.context.extend(['', '', ''])
elif len(backrefs) == 1:
self.context.extend([
'',
'</a>',
'<a class="footnote-backref" href="#%s">' % backrefs[0]
])
else:
backlinks = (
'<a class="footnote-backref" href="#%s">%s</a>' % (backref, i)
for i, backref in enumerate(backrefs, start=1)
)
self.context.extend([
'<em class="footnote-backrefs">(%s)</em> ' % ', '.join(backlinks),
'',
''
])
def visit_desc(self, node):
self.body.append(self.starttag(node, 'section', CLASS='code-' + node['objtype']))
def depart_desc(self, node):
self.body.append(u'</section>')
def visit_desc_signature(self, node):
self.body.append(self.starttag(node, 'h6'))
self.body.append(u'<code>')
def depart_desc_signature(self, node):
self.body.append(u'</code>')
self.body.append(u'</h6>')
def visit_desc_addname(self, node): pass
def depart_desc_addname(self, node): pass
def visit_desc_type(self, node): pass
def depart_desc_type(self, node): pass
def visit_desc_returns(self, node):
self.body.append(u' → ')
def depart_desc_returns(self, node):
pass
def visit_desc_name(self, node): pass
def depart_desc_name(self, node): pass
def visit_desc_parameterlist(self, node):
self.body.append(u'(')
self.first_param = True
self.optional_param_level = 0
# How many required parameters are left.
self.required_params_left = sum(isinstance(c, addnodes.desc_parameter) for c in node.children)
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append(u')')
# If required parameters are still to come, then put the comma after
# the parameter. Otherwise, put the comma before. This ensures that
# signatures like the following render correctly (see issue #1001):
#
# foo([a, ]b, c[, d])
#
def visit_desc_parameter(self, node):
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
self.body.append(self.param_separator)
if self.optional_param_level == 0:
self.required_params_left -= 1
if 'noemph' not in node: self.body.append(u'<em>')
def depart_desc_parameter(self, node):
if 'noemph' not in node: self.body.append(u'</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node):
self.optional_param_level += 1
self.body.append(u'[')
def depart_desc_optional(self, node):
self.optional_param_level -= 1
self.body.append(u']')
def visit_desc_annotation(self, node):
self.body.append(self.starttag(node, 'em'))
def depart_desc_annotation(self, node):
self.body.append(u'</em>')
def visit_desc_content(self, node): pass
def depart_desc_content(self, node): pass
def visit_field_list(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-fields'))
def depart_field_list(self, node):
self.body.append(u'</div>')
def visit_field(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field'))
def depart_field(self, node):
self.body.append(u'</div>')
def visit_field_name(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field-name'))
def depart_field_name(self, node):
self.body.append(u'</div>')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field-body'))
def depart_field_body(self, node):
self.body.append(u'</div>')
def visit_glossary(self, node): pass
def depart_glossary(self, node): pass
def visit_comment(self, node): raise nodes.SkipNode
def visit_toctree(self, node):
# div class=row {{ section_type }}
# h2 class=col-sm-12
# {{ section title }}
# div class=col-sm-6 col-md-3
# figure class=card
# a href=current_link style=background-image: document-image-attribute class=card-img
# figcaption
# {{ card title }}
env = self.builder.env
conf = self.builder.app.config
for title, ref in ((e[0], e[1]) for e in node['entries']):
# external URL, no toc, can't recurse into
if ref not in env.tocs:
continue
toc = env.tocs[ref].traverse(addnodes.toctree)
classes = env.metadata[ref].get('types', 'tutorials')
classes += ' toc-single-entry' if not toc else ' toc-section'
self.body.append(self.starttag(node, 'div', CLASS="row " + classes))
self.body.append(u'<h2 class="col-sm-12">')
self.body.append(title if title else util.nodes.clean_astext(env.titles[ref]))
self.body.append(u'</h2>')
entries = [(title, ref)] if not toc else ((e[0], e[1]) for e in toc[0]['entries'])
for subtitle, subref in entries:
baseuri = self.builder.get_target_uri(node['parent'])
if subref in env.metadata:
cover = env.metadata[subref].get('banner', conf.odoo_cover_default)
elif subref in conf.odoo_cover_external:
cover = conf.odoo_cover_external[subref]
else:
cover = conf.odoo_cover_default_external
if cover:
banner = '_static/' + cover
base, ext = os.path.splitext(banner)
small = "{}.small{}".format(base, ext)
if os.path.isfile(urllib.url2pathname(small)):
banner = small
style = u"background-image: url('{}')".format(
util.relative_uri(baseuri, banner) or '#')
else:
style = u''
self.body.append(u"""
<div class="col-sm-6 col-md-3">
<figure class="card">
<a href="{link}" class="card-img">
<span style="{style}"></span>
<figcaption>{title}</figcaption>
</a>
</figure>
</div>
""".format(
link=subref if util.url_re.match(subref) else util.relative_uri(
baseuri, self.builder.get_target_uri(subref)),
style=style,
title=subtitle if subtitle else util.nodes.clean_astext(env.titles[subref]),
))
self.body.append(u'</div>')
raise nodes.SkipNode
def visit_index(self, node): raise nodes.SkipNode
def visit_raw(self, node):
if 'html' in node.get('format', '').split():
t = 'span' if isinstance(node.parent, nodes.TextElement) else 'div'
if node['classes']:
self.body.append(self.starttag(node, t))
self.body.append(node.astext())
if node['classes']:
self.body.append('</%s>' % t)
# Keep non-HTML raw text out of output:
raise nodes.SkipNode
# internal node
def visit_substitution_definition(self, node): raise nodes.SkipNode
| agpl-3.0 | -5,262,659,755,275,446,000 | 38.346097 | 127 | 0.576246 | false |
brettatoms/cerberus | docs/conf.py | 1 | 8056 | # -*- coding: utf-8 -*-
#
# Cerberus documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 11 15:52:25 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cerberus'
copyright = u'2012-2015, Nicola Iarocci'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __import__('cerberus').__version__
# The short X.Y version.
version = release.split('-dev')[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Cerberusdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Cerberus.tex', u'Cerberus Documentation',
u'Nicola Iarocci', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cerberus', u'Cerberus Documentation',
[u'Nicola Iarocci'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Cerberus', u'Cerberus Documentation',
u'Nicola Iarocci', 'Cerberus', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| isc | 2,494,742,778,687,457,300 | 31.615385 | 102 | 0.705437 | false |
aam-at/tensorflow | tensorflow/python/ops/ragged/ragged_tensor.py | 1 | 117296 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for storing ragged tensors and their values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_ragged_conversion_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_config
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged.row_partition import RowPartition
from tensorflow.python.types import internal as internal_types
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
# pylint: disable=protected-access
_convert_row_partition = RowPartition._convert_row_partition
# pylint: enable=protected-access
#===============================================================================
# RaggedTensor
#===============================================================================
@tf_export("RaggedTensor")
class RaggedTensor(composite_tensor.CompositeTensor,
internal_types.NativeObject):
"""Represents a ragged tensor.
A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are
dimensions whose slices may have different lengths. For example, the inner
(column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged,
since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths.
Dimensions whose slices all have the same length are called *uniform
dimensions*. The outermost dimension of a `RaggedTensor` is always uniform,
since it consists of a single slice (and so there is no possibility for
differing slice lengths).
The total number of dimensions in a `RaggedTensor` is called its *rank*,
and the number of ragged dimensions in a `RaggedTensor` is called its
*ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation
time: it can't depend on the runtime values of `Tensor`s, and can't vary
dynamically for different session runs.
Note that the `__init__` constructor is private. Please use one of the
following methods to construct a `RaggedTensor`:
* `tf.RaggedTensor.from_row_lengths`
* `tf.RaggedTensor.from_value_rowids`
* `tf.RaggedTensor.from_row_splits`
* `tf.RaggedTensor.from_row_starts`
* `tf.RaggedTensor.from_row_limits`
* `tf.RaggedTensor.from_nested_row_splits`
* `tf.RaggedTensor.from_nested_row_lengths`
* `tf.RaggedTensor.from_nested_value_rowids`
### Potentially Ragged Tensors
Many ops support both `Tensor`s and `RaggedTensor`s. The term "potentially
ragged tensor" may be used to refer to a tensor that might be either a
`Tensor` or a `RaggedTensor`. The ragged-rank of a `Tensor` is zero.
### Documenting RaggedTensor Shapes
When documenting the shape of a RaggedTensor, ragged dimensions can be
indicated by enclosing them in parentheses. For example, the shape of
a 3-D `RaggedTensor` that stores the fixed-size word embedding for each
word in a sentence, for each sentence in a batch, could be written as
`[num_sentences, (num_words), embedding_size]`. The parentheses around
`(num_words)` indicate that dimension is ragged, and that the length
of each element list in that dimension may vary for each item.
### Component Tensors
Internally, a `RaggedTensor` consists of a concatenated list of values that
are partitioned into variable-length rows. In particular, each `RaggedTensor`
consists of:
* A `values` tensor, which concatenates the variable-length rows into a
flattened list. For example, the `values` tensor for
`[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`.
* A `row_splits` vector, which indicates how those flattened values are
divided into rows. In particular, the values for row `rt[i]` are stored
in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Example:
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
### Alternative Row-Partitioning Schemes
In addition to `row_splits`, ragged tensors provide support for five other
row-partitioning schemes:
* `row_lengths`: a vector with shape `[nrows]`, which specifies the length
of each row.
* `value_rowids` and `nrows`: `value_rowids` is a vector with shape
`[nvals]`, corresponding one-to-one with `values`, which specifies
each value's row index. In particular, the row `rt[row]` consists of the
values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an
integer scalar that specifies the number of rows in the
`RaggedTensor`. (`nrows` is used to indicate trailing empty rows.)
* `row_starts`: a vector with shape `[nrows]`, which specifies the start
offset of each row. Equivalent to `row_splits[:-1]`.
* `row_limits`: a vector with shape `[nrows]`, which specifies the stop
offset of each row. Equivalent to `row_splits[1:]`.
* `uniform_row_length`: A scalar tensor, specifying the length of every
row. This row-partitioning scheme may only be used if all rows have
the same length.
Example: The following ragged tensors are equivalent, and all represent the
nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`.
>>> values = [3, 1, 4, 1, 5, 9, 2, 6]
>>> rt1 = RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])
>>> rt2 = RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])
>>> rt3 = RaggedTensor.from_value_rowids(
... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
>>> rt4 = RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])
>>> rt5 = RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])
### Multiple Ragged Dimensions
`RaggedTensor`s with multiple ragged dimensions can be defined by using
a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor`
adds a single ragged dimension.
>>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above
... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
>>> outer_rt = RaggedTensor.from_row_splits(
... values=inner_rt, row_splits=[0, 3, 3, 5])
>>> print(outer_rt.to_list())
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
>>> print(outer_rt.ragged_rank)
2
The factory function `RaggedTensor.from_nested_row_splits` may be used to
construct a `RaggedTensor` with multiple ragged dimensions directly, by
providing a list of `row_splits` tensors:
>>> RaggedTensor.from_nested_row_splits(
... flat_values=[3, 1, 4, 1, 5, 9, 2, 6],
... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list()
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
### Uniform Inner Dimensions
`RaggedTensor`s with uniform inner dimensions can be defined
by using a multidimensional `Tensor` for `values`.
>>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3], tf.int32),
... row_splits=[0, 2, 5])
>>> print(rt.to_list())
[[[1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]]
>>> print(rt.shape)
(2, None, 3)
### Uniform Outer Dimensions
`RaggedTensor`s with uniform outer dimensions can be defined by using
one or more `RaggedTensor` with a `uniform_row_length` row-partitioning
tensor. For example, a `RaggedTensor` with shape `[2, 2, None]` can be
constructed with this method from a `RaggedTensor` values with shape
`[4, None]`:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(values.shape)
(4, None)
>>> rt6 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> print(rt6)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt6.shape)
(2, 2, None)
Note that `rt6` only contains one ragged dimension (the innermost
dimension). In contrast, if `from_row_splits` is used to construct a similar
`RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:
>>> rt7 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])
>>> print(rt7.shape)
(2, None, None)
Uniform and ragged outer dimensions may be interleaved, meaning that a
tensor with any combination of ragged and uniform dimensions may be created.
For example, a RaggedTensor `t4` with shape `[3, None, 4, 8, None, 2]` could
be constructed as follows:
```python
t0 = tf.zeros([1000, 2]) # Shape: [1000, 2]
t1 = RaggedTensor.from_row_lengths(t0, [...]) # [160, None, 2]
t2 = RaggedTensor.from_uniform_row_length(t1, 8) # [20, 8, None, 2]
t3 = RaggedTensor.from_uniform_row_length(t2, 4) # [5, 4, 8, None, 2]
t4 = RaggedTensor.from_row_lengths(t3, [...]) # [3, None, 4, 8, None, 2]
```
"""
#=============================================================================
# Constructor (private)
#=============================================================================
@doc_controls.do_not_generate_docs
def __init__(self, values, row_partition, internal=False):
"""Creates a `RaggedTensor` with a specified partitioning for `values`.
This constructor is private -- please use one of the following ops to
build `RaggedTensor`s:
* `tf.RaggedTensor.from_row_lengths`
* `tf.RaggedTensor.from_value_rowids`
* `tf.RaggedTensor.from_row_splits`
* `tf.RaggedTensor.from_row_starts`
* `tf.RaggedTensor.from_row_limits`
* `tf.RaggedTensor.from_nested_row_splits`
* `tf.RaggedTensor.from_nested_row_lengths`
* `tf.RaggedTensor.from_nested_value_rowids`
Args:
values: A potentially ragged tensor of any dtype and shape `[nvals, ...]`.
row_partition: A `RowPartition` object, representing the arrangement of
the lists at the top level.
internal: True if the constructor is being called by one of the factory
methods. If false, an exception will be raised.
Raises:
ValueError: If internal = False. Note that this method is intended only
for internal use.
TypeError: If values is not a `RaggedTensor` or `Tensor`, or
row_partition is not a `RowPartition`.
"""
if not internal:
raise ValueError("RaggedTensor constructor is private; please use one "
"of the factory methods instead (e.g., "
"RaggedTensor.from_row_lengths())")
_assert_is_supported_ragged_values_type(values)
if not isinstance(row_partition, RowPartition):
raise TypeError("row_partition must be a RowPartition, got %r" %
row_partition)
# Validate shapes.
values.shape.with_rank_at_least(1)
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
assert row_partition.dtype == values._row_partition.dtype
self._values = values
self._row_partition = row_partition
#=============================================================================
# Factory Methods
#=============================================================================
@classmethod
def _from_row_partition(cls, values, row_partition, validate=True):
"""Creates a `RaggedTensor` with a row partition.
This is used as a way for RaggedTensors to share row partitions.
The outer dimension of values must be equal to `partition.nvals()`.
Args:
values: A potentially ragged tensor.
row_partition: a `RowPartition`: can be shared between tensors.
validate: If true, then use assertions to check that the arguments form a
valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If partition.nvals() != _nrows(values)
"""
if not isinstance(row_partition, RowPartition):
raise TypeError("row_partition must be a RowPartition")
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
values, row_partition = cls._convert_values_and_partition(
values, row_partition, "partition")
if row_partition.has_precomputed_value_rowids():
value_rowids_shape = row_partition.value_rowids().shape
values.shape[:1].assert_is_compatible_with(value_rowids_shape)
if validate:
msg = "Arguments to _from_row_partition do not form a valid RaggedTensor"
nvals = _nrows(values, row_partition.dtype)
checks = [
check_ops.assert_equal(
row_partition.nvals(out_type=row_partition.dtype),
nvals,
message=msg),
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_partition = row_partition.with_dependencies(checks)
return cls(
values=values,
internal=True,
row_partition=row_partition)
@classmethod
@dispatch.add_dispatch_support
def from_value_rowids(cls,
values,
value_rowids,
nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `value_rowids`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values[i] for i in range(len(values)) if value_rowids[i] == row]
for row in range(nrows)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds
one-to-one with `values`, and specifies each value's row index. Must be
nonnegative, and must be sorted in ascending order.
nrows: An integer scalar specifying the number of rows. This should be
specified if the `RaggedTensor` may containing empty training rows. Must
be greater than `value_rowids[-1]` (or zero if `value_rowids` is empty).
Defaults to `value_rowids[-1]` (or zero if `value_rowids` is empty).
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `nrows` is incompatible with `value_rowids`.
#### Example:
>>> print(tf.RaggedTensor.from_value_rowids(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3],
... nrows=5))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromValueRowIds",
[values, value_rowids, nrows]):
row_partition = RowPartition.from_value_rowids(
value_rowids=value_rowids,
nrows=nrows,
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_splits(cls, values, row_splits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_splits`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [values[row_splits[i]:row_splits[i + 1]]
for i in range(len(row_splits) - 1)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be
empty, and must be sorted in ascending order. `row_splits[0]` must be
zero and `row_splits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `row_splits` is an empty list.
#### Example:
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowSplits", [values, row_splits]):
row_partition = RowPartition.from_row_splits(
row_splits=row_splits,
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_lengths(cls, values, row_lengths, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_lengths`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values.pop(0) for i in range(length)]
for length in row_lengths]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative. `sum(row_lengths)` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_lengths(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_lengths=[4, 0, 3, 1, 0]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowLengths", [values, row_lengths]):
row_partition = RowPartition.from_row_lengths(
row_lengths=row_lengths,
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_starts(cls, values, row_starts, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_starts`.
Equivalent to: `from_row_splits(values, concat([row_starts, nvals]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_starts: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative and sorted in ascending order. If `nrows>0`, then
`row_starts[0]` must be zero.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_starts(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_starts=[0, 4, 4, 7, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowStarts", [values, row_starts]):
values = _convert_to_ragged_tensor_values(values)
row_partition = RowPartition.from_row_starts(
row_starts=row_starts,
nvals=_nrows(values),
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_limits(cls, values, row_limits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_limits`.
Equivalent to: `from_row_splits(values, concat([0, row_limits]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in
ascending order. If `nrows>0`, then `row_limits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_limits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_limits=[4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowLimits", [values, row_limits]):
row_partition = RowPartition.from_row_limits(
row_limits=row_limits,
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_uniform_row_length(cls,
values,
uniform_row_length,
nrows=None,
validate=True,
name=None):
"""Creates a `RaggedTensor` with rows partitioned by `uniform_row_length`.
This method can be used to create `RaggedTensor`s with multiple uniform
outer dimensions. For example, a `RaggedTensor` with shape `[2, 2, None]`
can be constructed with this method from a `RaggedTensor` values with shape
`[4, None]`:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(values.shape)
(4, None)
>>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> print(rt1)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt1.shape)
(2, 2, None)
Note that `rt1` only contains one ragged dimension (the innermost
dimension). In contrast, if `from_row_splits` is used to construct a similar
`RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:
>>> rt2 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])
>>> print(rt2.shape)
(2, None, None)
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
uniform_row_length: A scalar integer tensor. Must be nonnegative. The
size of the outer axis of `values` must be evenly divisible by
`uniform_row_length`.
nrows: The number of rows in the constructed RaggedTensor. If not
specified, then it defaults to `nvals/uniform_row_length` (or `0` if
`uniform_row_length==0`). `nrows` only needs to be specified if
`uniform_row_length` might be zero. `uniform_row_length*nrows` must
be `nvals`.
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor` that corresponds with the python list defined by:
```python
result = [[values.pop(0) for i in range(uniform_row_length)]
for _ in range(nrows)]
```
`result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromUniformRowLength",
[values, uniform_row_length, nrows]):
values = _convert_to_ragged_tensor_values(values)
uniform_row_length = _convert_row_partition(
uniform_row_length, "UniformRowLength",
_get_optional_partition_dtype(values))
nvals = _nvals_uniform_row_length(values, uniform_row_length)
row_partition = RowPartition.from_uniform_row_length(
uniform_row_length=uniform_row_length,
nvals=nvals,
nrows=nrows,
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_nested_value_rowids(cls,
flat_values,
nested_value_rowids,
nested_nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `value_rowids` tensors.
Equivalent to:
```python
result = flat_values
for (rowids, nrows) in reversed(zip(nested_value_rowids, nested_nrows)):
result = from_value_rowids(result, rowids, nrows)
```
Args:
flat_values: A potentially ragged tensor.
nested_value_rowids: A list of 1-D integer tensors. The `i`th tensor is
used as the `value_rowids` for the `i`th ragged dimension.
nested_nrows: A list of integer scalars. The `i`th scalar is used as the
`nrows` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_value_rowids` is empty).
Raises:
ValueError: If `len(nested_values_rowids) != len(nested_nrows)`.
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_value_rowids, ops.Tensor):
raise TypeError("nested_value_rowids must be a list of Tensors")
if nested_nrows is None:
nested_nrows = [None] * len(nested_value_rowids)
else:
if isinstance(nested_nrows, ops.Tensor):
raise TypeError("nested_nrows must be a list of Tensors")
if len(nested_nrows) != len(nested_value_rowids):
raise ValueError("nested_nrows must have the same length as "
"nested_value_rowids")
with ops.name_scope(name, "RaggedFromNestedValueRowIds", [flat_values] +
list(nested_value_rowids) + list(nested_nrows)):
result = flat_values
for value_rowids, nrows in reversed(
list(zip(nested_value_rowids, nested_nrows))):
result = cls.from_value_rowids(
result, value_rowids, nrows, validate=validate)
return result
@classmethod
@dispatch.add_dispatch_support
def from_nested_row_splits(cls,
flat_values,
nested_row_splits,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_splits` tensors.
Equivalent to:
```python
result = flat_values
for row_splits in reversed(nested_row_splits):
result = from_row_splits(result, row_splits)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_splits: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_splits` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty).
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_row_splits, ops.Tensor):
raise TypeError("nested_row_splits must be a list of Tensors")
with ops.name_scope(name, "RaggedFromNestedRowSplits",
[flat_values] + list(nested_row_splits)):
result = flat_values
for splits in reversed(nested_row_splits):
result = cls.from_row_splits(result, splits, validate=validate)
return result
@classmethod
@dispatch.add_dispatch_support
def from_nested_row_lengths(cls,
flat_values,
nested_row_lengths,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_lengths` tensors.
Equivalent to:
```python
result = flat_values
for row_lengths in reversed(nested_row_lengths):
result = from_row_lengths(result, row_lengths)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_lengths: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_lengths` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_row_lengths, ops.Tensor):
raise TypeError("nested_row_lengths must be a list of Tensors")
with ops.name_scope(name, "RaggedFromNestedRowlengths",
[flat_values] + list(nested_row_lengths)):
result = flat_values
for lengths in reversed(nested_row_lengths):
result = cls.from_row_lengths(result, lengths, validate=validate)
return result
@classmethod
def _convert_values_and_partition(cls, values, row_partition, name):
"""Converts `values` and `partition` to Tensors.
If `values` is a `RaggedTensor`, then converts `values` and `partition`
to have compatible row-partitioning dtypes. In particular, if any of the
row partitioning tensors are `int64`, then all of the other row
partitioning tensors wil be cast to `int64` (if auto_cast_partition_dtype()
is true) or an error will be raised (if auto_cast_partition_dtype() is
false).
Args:
values: The `values` for the `RaggedTensor` being constructed.
row_partition: A RowPartition object for the `RaggedTensor` being
constructed.
name: The name of the RowPartition object.
Returns:
A tuple (values, partition).
"""
if not isinstance(row_partition, RowPartition):
raise ValueError("partition must be a RowPartition")
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
if values._row_partition.dtype != row_partition.dtype:
if not ragged_config.auto_cast_partition_dtype():
# pylint: disable=protected-access
raise ValueError(
"dtype mismatch: %s (%s) vs values.partition (%s)" %
(name, row_partition.dtype, values._row_partition.dtype))
values = values.with_row_splits_dtype(row_partition.dtype)
else:
values = _convert_to_ragged_tensor_values(values)
return (values, row_partition)
#=============================================================================
# Accessors
#=============================================================================
@property
def dtype(self):
"""The `DType` of values in this tensor."""
return self._values.dtype
@property
def shape(self):
"""The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Examples:
>>> tf.ragged.constant([[0], [1, 2]]).shape
TensorShape([2, None])
>>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape
TensorShape([2, None, 2])
"""
nrows = self._row_partition.static_nrows
ncols = self._row_partition.static_uniform_row_length
value_shape = self._values.shape[1:]
return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape)
def get_shape(self):
"""The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Alias for `shape` property.
Examples:
>>> tf.ragged.constant([[0], [1, 2]]).get_shape()
TensorShape([2, None])
>>> tf.ragged.constant(
... [[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).get_shape()
TensorShape([2, None, 2])
"""
return self.shape
@property
def ragged_rank(self):
"""The number of times the RaggedTensor's flat_values is partitioned.
Examples:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> values.ragged_rank
1
>>> rt = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> rt.ragged_rank
2
Returns:
A Python `int` indicating the number of times the underlying `flat_values`
Tensor has been partitioned to add a new dimension.
I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.
"""
values_is_ragged = isinstance(self._values, RaggedTensor)
return self._values.ragged_rank + 1 if values_is_ragged else 1
@property
def values(self):
"""The concatenated rows for this ragged tensor.
`rt.values` is a potentially ragged tensor formed by flattening the two
outermost dimensions of `rt` into a single dimension.
`rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the
number of items in the outer two dimensions of `rt`).
`rt.ragged_rank = self.ragged_rank - 1`
Returns:
A potentially ragged tensor.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
"""
return self._values
@property
def _nested_row_partitions(self):
"""Returns the row partitions for this `RaggedTensor`."""
partitions = [self._row_partition]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
# pylint: disable=protected-access
partitions.append(rt_values._row_partition)
rt_values = rt_values.values
return tuple(partitions)
@property
def row_splits(self):
"""The row-split indices for this ragged tensor's `values`.
`rt.row_splits` specifies where the values for each row begin and end in
`rt.values`. In particular, the values for row `rt[i]` are stored in
the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Returns:
A 1-D integer `Tensor` with shape `[self.nrows+1]`.
The returned tensor is non-empty, and is sorted in ascending order.
`self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to
`self.values.shape[0]`.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.row_splits) # indices of row splits in rt.values
tf.Tensor([0 4 4 7 8 8], shape=(6,), dtype=int64)
"""
return self._row_partition.row_splits()
@property
def uniform_row_length(self):
"""The length of each row in this ragged tensor, or None if rows are ragged.
>>> rt1 = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(rt1.uniform_row_length) # rows are ragged.
None
>>> rt2 = tf.RaggedTensor.from_uniform_row_length(
... values=rt1, uniform_row_length=2)
>>> print(rt2)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt2.uniform_row_length) # rows are not ragged (all have size 2).
tf.Tensor(2, shape=(), dtype=int64)
A RaggedTensor's rows are only considered to be uniform (i.e. non-ragged)
if it can be determined statically (at graph construction time) that the
rows all have the same length.
Returns:
A scalar integer `Tensor`, specifying the length of every row in this
ragged tensor (for ragged tensors whose rows are uniform); or `None`
(for ragged tensors whose rows are ragged).
"""
return self._row_partition.uniform_row_length()
@property
def flat_values(self):
"""The innermost `values` tensor for this ragged tensor.
Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is
`rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`.
Conceptually, `flat_values` is the tensor formed by flattening the
outermost dimension and all of the ragged dimensions into a single
dimension.
`rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]`
(where `nvals` is the number of items in the flattened dimensions).
Returns:
A `Tensor`.
#### Example:
>>> rt = tf.ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
>>> print(rt.flat_values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
"""
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_values = rt_values.values
return rt_values
@property
def nested_row_splits(self):
"""A tuple containing the row_splits for all ragged dimensions.
`rt.nested_row_splits` is a tuple containing the `row_splits` tensors for
all ragged dimensions in `rt`, ordered from outermost to innermost. In
particular, `rt.nested_row_splits = (rt.row_splits,) + value_splits` where:
* `value_splits = ()` if `rt.values` is a `Tensor`.
* `value_splits = rt.values.nested_row_splits` otherwise.
Returns:
A `tuple` of 1-D integer `Tensor`s.
#### Example:
>>> rt = tf.ragged.constant(
... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, splits in enumerate(rt.nested_row_splits):
... print('Splits for dimension %d: %s' % (i+1, splits.numpy()))
Splits for dimension 1: [0 3]
Splits for dimension 2: [0 3 3 5]
Splits for dimension 3: [0 4 4 7 8 8]
"""
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits)
def value_rowids(self, name=None):
"""Returns the row indices for the `values` in this ragged tensor.
`rt.value_rowids()` corresponds one-to-one with the outermost dimension of
`rt.values`, and specifies the row containing each value. In particular,
the row `rt[row]` consists of the values `rt.values[j]` where
`rt.value_rowids()[j] == row`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer `Tensor` with shape `self.values.shape[:1]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.value_rowids()) # corresponds 1:1 with rt.values
tf.Tensor([0 0 0 0 2 2 2 3], shape=(8,), dtype=int64)
"""
with ops.name_scope(name, "RaggedValueRowIds", [self]):
return self._row_partition.value_rowids()
def nested_value_rowids(self, name=None):
"""Returns a tuple containing the value_rowids for all ragged dimensions.
`rt.nested_value_rowids` is a tuple containing the `value_rowids` tensors
for
all ragged dimensions in `rt`, ordered from outermost to innermost. In
particular, `rt.nested_value_rowids = (rt.value_rowids(),) + value_ids`
where:
* `value_ids = ()` if `rt.values` is a `Tensor`.
* `value_ids = rt.values.nested_value_rowids` otherwise.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D integer `Tensor`s.
#### Example:
>>> rt = tf.ragged.constant(
... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, ids in enumerate(rt.nested_value_rowids()):
... print('row ids for dimension %d: %s' % (i+1, ids.numpy()))
row ids for dimension 1: [0 0 0]
row ids for dimension 2: [0 0 0 2 2]
row ids for dimension 3: [0 0 0 0 2 2 2 3]
"""
with ops.name_scope(name, "RaggedNestedValueRowIds", [self]):
rt_nested_ids = [self.value_rowids()]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_ids.append(rt_values.value_rowids())
rt_values = rt_values.values
return tuple(rt_nested_ids)
def nrows(self, out_type=None, name=None):
"""Returns the number of rows in this ragged tensor.
I.e., the size of the outermost dimension of the tensor.
Args:
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
name: A name prefix for the returned tensor (optional).
Returns:
A scalar `Tensor` with dtype `out_type`.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.nrows()) # rt has 5 rows.
tf.Tensor(5, shape=(), dtype=int64)
"""
with ops.name_scope(name, "RaggedNRows", [self]):
return self._row_partition.nrows(out_type=out_type)
def row_starts(self, name=None):
"""Returns the start indices for rows in this ragged tensor.
These indices specify where the values for each row begin in
`self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.row_starts()) # indices of row starts in rt.values
tf.Tensor([0 4 4 7 8], shape=(5,), dtype=int64)
"""
with ops.name_scope(name, "RaggedRowStarts", [self]):
return self._row_partition.row_starts()
def row_limits(self, name=None):
"""Returns the limit indices for rows in this ragged tensor.
These indices specify where the values for each row end in
`self.values`. `rt.row_limits(self)` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.row_limits()) # indices of row limits in rt.values
tf.Tensor([4 4 7 8 8], shape=(5,), dtype=int64)
"""
with ops.name_scope(name, "RaggedRowLimits", [self]):
return self._row_partition.row_limits()
def row_lengths(self, axis=1, name=None):
"""Returns the lengths of the rows in this ragged tensor.
`rt.row_lengths()[i]` indicates the number of values in the
`i`th row of `rt`.
Args:
axis: An integer constant indicating the axis whose row lengths should be
returned.
name: A name prefix for the returned tensor (optional).
Returns:
A potentially ragged integer Tensor with shape `self.shape[:axis]`.
Raises:
ValueError: If `axis` is out of bounds.
#### Example:
>>> rt = tf.ragged.constant(
... [[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []])
>>> print(rt.row_lengths()) # lengths of rows in rt
tf.Tensor([2 0 2 1 0], shape=(5,), dtype=int64)
>>> print(rt.row_lengths(axis=2)) # lengths of axis=2 rows.
<tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]>
"""
if axis == 0:
return self._row_partition.nrows()
if axis == 1:
return self._row_partition.row_lengths()
with ops.name_scope(name, "RaggedRowLengths", [self]):
axis = array_ops.get_positive_axis(
axis, self.shape.rank, ndims_name="rank(self)")
if axis == 0:
return self.nrows()
elif axis == 1:
splits = self.row_splits
return splits[1:] - splits[:-1]
elif isinstance(self.values, RaggedTensor):
return self.with_values(self.values.row_lengths(axis - 1))
else:
shape = array_ops.shape(self.values, out_type=self._row_partition.dtype)
return self.with_values(
array_ops.ones(shape[:axis - 1], self._row_partition.dtype) *
shape[axis - 1])
def nested_row_lengths(self, name=None):
"""Returns a tuple containing the row_lengths for all ragged dimensions.
`rt.nested_row_lengths()` is a tuple containing the `row_lengths` tensors
for all ragged dimensions in `rt`, ordered from outermost to innermost.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D integer `Tensors`. The length of the tuple is equal to
`self.ragged_rank`.
"""
with ops.name_scope(name, "RaggedNestedRowLengths", [self]):
rt_nested_row_lengths = []
rt = self
while isinstance(rt, RaggedTensor):
rt_nested_row_lengths.append(rt.row_lengths())
rt = rt.values
return tuple(rt_nested_row_lengths)
def bounding_shape(self, axis=None, name=None, out_type=None):
"""Returns the tight bounding box shape for this `RaggedTensor`.
Args:
axis: An integer scalar or vector indicating which axes to return the
bounding box for. If not specified, then the full bounding box is
returned.
name: A name prefix for the returned tensor (optional).
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
Returns:
An integer `Tensor` (`dtype=self.row_splits.dtype`). If `axis` is not
specified, then `output` is a vector with
`output.shape=[self.shape.ndims]`. If `axis` is a scalar, then the
`output` is a scalar. If `axis` is a vector, then `output` is a vector,
where `output[i]` is the bounding size for dimension `axis[i]`.
#### Example:
>>> rt = tf.ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]])
>>> rt.bounding_shape().numpy()
array([5, 4])
"""
if out_type is None:
out_type = self._row_partition.dtype
else:
out_type = dtypes.as_dtype(out_type)
with ops.name_scope(name, "RaggedBoundingBox", [self, axis]):
nested_splits = self.nested_row_splits
rt_flat_values = self.flat_values
# Optimized special cases for when axis=0 or axis=1:
if isinstance(axis, int):
if axis == 0:
return array_ops.shape(nested_splits[0], out_type=out_type)[0] - 1
elif axis == 1:
return math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0)
splits_shape = array_ops.shape(self.row_splits, out_type=out_type)
flat_values_shape = array_ops.shape(rt_flat_values, out_type=out_type)
ragged_dimensions = [splits_shape[0] - 1] + [
math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0)
for splits in nested_splits
]
inner_dimensions = flat_values_shape[1:]
if out_type != self._row_partition.dtype:
ragged_dimensions = [
math_ops.cast(d, out_type) for d in ragged_dimensions
]
bbox = array_ops.concat(
[array_ops.stack(ragged_dimensions), inner_dimensions], axis=0)
return bbox if axis is None else array_ops.gather(bbox, axis)
#=============================================================================
# Transformation
#=============================================================================
def with_values(self, new_values):
"""Returns a copy of `self` with `values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor to use as the `values` for the
returned `RaggedTensor`. Must have `rank > 0`, and must have the same
number of rows as `self.values`.
Returns:
A `RaggedTensor`. `result.rank = 1 + new_values.rank`.
`result.ragged_rank = 1 + new_values.ragged_rank`
"""
new_values = _convert_to_ragged_tensor_values(new_values)
new_values.shape.with_rank_at_least(1)
self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1])
if (isinstance(new_values, RaggedTensor) and
self._row_partition.dtype != new_values.row_splits.dtype):
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("self and new_values have mismatched row_splits "
"dtypes; use RaggedTensor.with_row_splits_dtype() to "
"convert them to compatible dtypes.")
new_values = new_values.with_row_splits_dtype(dtypes.int64)
return self.with_row_splits_dtype(dtypes.int64).with_values(new_values)
return RaggedTensor(
values=new_values, row_partition=self._row_partition, internal=True)
def with_flat_values(self, new_values):
"""Returns a copy of `self` with `flat_values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor that should replace
`self.flat_values`. Must have `rank > 0`, and must have the same number
of rows as `self.flat_values`.
Returns:
A `RaggedTensor`.
`result.rank = self.ragged_rank + new_values.rank`.
`result.ragged_rank = self.ragged_rank + new_values.ragged_rank`.
"""
if isinstance(self._values, RaggedTensor):
return self.with_values(self.values.with_flat_values(new_values))
else:
new_values = _convert_to_ragged_tensor_values(new_values)
return self.with_values(new_values)
def with_row_splits_dtype(self, dtype):
"""Returns a copy of this RaggedTensor with the given `row_splits` dtype.
For RaggedTensors with multiple ragged dimensions, the `row_splits` for all
nested `RaggedTensor` objects are cast to the given dtype.
Args:
dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`.
Returns:
A copy of this RaggedTensor, with the `row_splits` cast to the given
type.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("dtype must be int32 or int64")
if self._row_partition.dtype == dtype:
return self
current_values = self._values
if isinstance(current_values, RaggedTensor):
return RaggedTensor(
values=current_values.with_row_splits_dtype(dtype),
row_partition=self._row_partition.with_row_splits_dtype(dtype),
internal=True)
else:
return RaggedTensor(
values=current_values,
row_partition=self._row_partition.with_row_splits_dtype(dtype),
internal=True)
def merge_dims(self, outer_axis, inner_axis):
"""Merges outer_axis...inner_axis into a single dimension.
Returns a copy of this RaggedTensor with the specified range of dimensions
flattened into a single dimension, with elements in row-major order.
#### Examples:
>>> rt = tf.ragged.constant([[[1, 2], [3]], [[4, 5, 6]]])
>>> print(rt.merge_dims(0, 1))
<tf.RaggedTensor [[1, 2], [3], [4, 5, 6]]>
>>> print(rt.merge_dims(1, 2))
<tf.RaggedTensor [[1, 2, 3], [4, 5, 6]]>
>>> print(rt.merge_dims(0, 2))
tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32)
To mimic the behavior of `np.flatten` (which flattens all dimensions), use
`rt.merge_dims(0, -1). To mimic the behavior of `tf.layers.Flatten` (which
flattens all dimensions except the outermost batch dimension), use
`rt.merge_dims(1, -1)`.
Args:
outer_axis: `int`: The first dimension in the range of dimensions to
merge. May be negative if `self.shape.rank` is statically known.
inner_axis: `int`: The last dimension in the range of dimensions to merge.
May be negative if `self.shape.rank` is statically known.
Returns:
A copy of this tensor, with the specified dimensions merged into a
single dimension. The shape of the returned tensor will be
`self.shape[:outer_axis] + [N] + self.shape[inner_axis + 1:]`, where `N`
is the total number of slices in the merged dimensions.
"""
outer_axis = array_ops.get_positive_axis(
outer_axis,
self.shape.rank,
axis_name="outer_axis",
ndims_name="rank(self)")
inner_axis = array_ops.get_positive_axis(
inner_axis,
self.shape.rank,
axis_name="inner_axis",
ndims_name="rank(self)")
if not outer_axis < inner_axis:
raise ValueError("Expected outer_axis (%d) to be less than "
"inner_axis (%d)" % (outer_axis, inner_axis))
return merge_dims(self, outer_axis, inner_axis)
def _set_shape(self, shape):
"""Updates the static shape of `self` to be `shape`.
* If a dimension of `shape` has known rank, and is encoded via
partitioning, then this will update the corresponding partition to
define `_uniform_row_length` and `nrows`.
* If a dimension of `shape` has a known rank, and is encoded as one
of the `flat_values` dimensions, then `flat_values.set_shape()` will
be used to update its shape.
Warning: Using this method to assert an incorrect shape for a RaggedTensor
(i.e., one that's not consistent with its actual shape) can cause
segmentation faults and very difficult-to-diagnose behavior. Only use this
method if you are certain that the shape is correct.
Args:
shape: `tf.TensorShape` specifying the shape for this `RaggedTensor`.
"""
# TODO(edloper): Refactor this to not directly access private members
# of RowPartition.
# pylint: disable=protected-access
shape = tensor_shape.as_shape(shape)
if shape.rank is None:
return # Nothing to do.
shape = shape.as_list()
# Outermost dimension
if shape[0] is not None:
self._row_partition._row_splits.set_shape(shape[0] + 1)
# Partitioned dimensions
dtype = self._row_partition.dtype
for i, partition in enumerate(self._nested_row_partitions):
size = shape[i + 1]
if size is not None:
if partition._uniform_row_length is not None:
old_row_length = tensor_util.constant_value(
partition._uniform_row_length)
if old_row_length is not None:
if size == old_row_length:
continue # already have shape info for this axis.
else:
raise ValueError("Inconsistent size for axis %s: %s vs %s" %
((i + 1), old_row_length, size))
partition._uniform_row_length = ops.convert_to_tensor(size, dtype)
if partition._nrows is None:
partition._nrows = array_ops.size(partition._row_splits) - 1
# Inner dimensions
flat_shape = tensor_shape.as_shape([None] + shape[self.ragged_rank + 1:])
self.flat_values.set_shape(flat_shape)
#=============================================================================
# Tensor Type Conversions
#=============================================================================
@classmethod
@dispatch.add_dispatch_support
def from_tensor(cls,
tensor,
lengths=None,
padding=None,
ragged_rank=1,
name=None,
row_splits_dtype=dtypes.int64):
"""Converts a `tf.Tensor` into a `RaggedTensor`.
The set of absent/default values may be specified using a vector of lengths
or a padding value (but not both). If `lengths` is specified, then the
output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`. If
'lengths' is a list of lists or tuple of lists, those lists will be used
as nested row lengths. If `padding` is specified, then any row *suffix*
consisting entirely of `padding` will be excluded from the returned
`RaggedTensor`. If neither `lengths` nor `padding` is specified, then the
returned `RaggedTensor` will have no absent/default values.
Examples:
>>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
>>> tf.RaggedTensor.from_tensor(dt)
<tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, lengths=[1, 0, 3])
<tf.RaggedTensor [[5], [], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, padding=0)
<tf.RaggedTensor [[5, 7], [0, 3], [6]]>
>>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]],
... [[0, 0], [3, 0], [0, 0]],
... [[6, 0], [0, 0], [0, 0]]])
>>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1]))
<tf.RaggedTensor [[[5], [7]], [], [[6, 0], [], [0]]]>
Args:
tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or
higher.
lengths: An optional set of row lengths, specified using a 1-D integer
`Tensor` whose length is equal to `tensor.shape[0]` (the number of rows
in `tensor`). If specified, then `output[row]` will contain
`tensor[row][:lengths[row]]`. Negative lengths are treated as zero. You
may optionally pass a list or tuple of lengths to this argument, which
will be used as nested row lengths to construct a ragged tensor with
multiple ragged dimensions.
padding: An optional padding value. If specified, then any row suffix
consisting entirely of `padding` will be excluded from the returned
RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor`
and with `shape=tensor.shape[ragged_rank + 1:]`.
ragged_rank: Integer specifying the ragged rank for the returned
`RaggedTensor`. Must be greater than zero.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the specified `ragged_rank`. The shape of the
returned ragged tensor is compatible with the shape of `tensor`.
Raises:
ValueError: If both `lengths` and `padding` are specified.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if lengths is not None and padding is not None:
raise ValueError("Specify lengths or padding, but not both")
if not isinstance(ragged_rank, int):
raise TypeError("ragged_rank expected int, got %r" % ragged_rank)
if ragged_rank <= 0:
raise ValueError("ragged_rank must be greater than 0; got %s" %
ragged_rank)
with ops.name_scope(name, "RaggedFromTensor", [tensor, lengths, padding]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
tensor.shape.with_rank_at_least(ragged_rank + 1)
input_shape = array_ops.shape(tensor, out_type=row_splits_dtype)
ncols = input_shape[1]
# Handle nested row lengths.
if (lengths is not None and isinstance(lengths, (list, tuple)) and
len(lengths) and not isinstance(lengths[0], (int, float))):
if ragged_rank not in (1, len(lengths)):
# Note: we accept `ragged_rank=1` here because it's the default value;
# i.e., if the user passes in a tuple of lengths, but doesn't specify
# ragged_rank, then we should use that tuple to determine ragged_rank.
# We only want to complain if they pass in an explicit ragged_rank
# that doesn't match len(lengths).
raise ValueError("If lengths is a tuple of row_lengths, then "
"ragged_rank must be len(lengths).")
# Rather than reconstructing the tensor mask directly, we can
# recreate it as a boolean RaggedTensor, then densify that and use
# that as the mask to clear out the unused data in the passed tensor.
tensor.shape.with_rank_at_least(len(lengths) + 1)
num_tokens = math_ops.reduce_sum(lengths[-1])
ones_mask = array_ops.ones([num_tokens], dtype=dtypes.bool)
ragged_mask = cls.from_nested_row_lengths(
ones_mask, lengths, validate=False)
dense_ragged_mask = ragged_mask.to_tensor(default_value=False)
masked_data = array_ops.boolean_mask(tensor, dense_ragged_mask)
return cls.from_nested_row_lengths(masked_data, lengths, validate=False)
# Handle ragged_rank>1 via recursion:
# If the output should have multiple ragged dimensions, then first
# flatten the tensor to eliminate all but the last ragged dimension,
# and recursively convert that flattened tensor. Then add on the splits
# for the dimensions that we flattened out.
if ragged_rank > 1:
if tensor.shape.is_fully_defined():
input_shape = tensor.shape.as_list()
# The total number of elements in each dimension. E.g., if
# input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.
dim_size = np.cumprod(input_shape)
new_shape = [dim_size[ragged_rank - 1]] + input_shape[ragged_rank:]
else:
dim_size = math_ops.cumprod(input_shape)
new_shape = array_ops.concat([[dim_size[ragged_rank - 1]],
input_shape[ragged_rank:]],
axis=0)
flattened = array_ops.reshape(tensor, new_shape)
result = cls.from_tensor(
flattened, lengths, padding, row_splits_dtype=row_splits_dtype)
for axis in range(ragged_rank - 1, 0, -1):
dim_len = tensor_shape.dimension_at_index(tensor.shape, axis).value
if dim_len is None:
dim_len = input_shape[axis]
else:
dim_len = constant_op.constant(dim_len, row_splits_dtype)
result = RaggedTensor.from_uniform_row_length(
values=result,
uniform_row_length=dim_len,
nrows=dim_size[axis - 1],
validate=False)
return result
# If padding was specified, then use it to find row lengths.
if padding is not None:
padding = ops.convert_to_tensor(
padding, name="padding", dtype=tensor.dtype)
padding.shape.assert_is_compatible_with(tensor.shape[2:])
# Find places where the padding is equal to the tensor. (This will
# broadcast `padding` across the outermost 2 dimensions of `tensor`,
# so `has_default_value.shape = tensor.shape`.)
has_default_value = math_ops.equal(padding, tensor)
# If the padding isn't a scalar, then require that all values in the
# padding match each item in the tensor. After this block of code,
# `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just
# use reduce_all for both cases, becaue when you pass an empty `axis`
# list to reduce_all, it reduces all axes; but we want it to reduce no
# axes -- i.e., to be a no-op.)
tensor_rank = array_ops.rank(tensor)
reduce_axis = math_ops.range(2, tensor_rank)
has_default = control_flow_ops.cond(
tensor_rank > 2,
lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis),
lambda: has_default_value)
has_default.set_shape(tensor_shape.TensorShape([None, None]))
has_default.set_shape(tensor.shape[:2])
# Use has_default to find the length of each row: for each
# non-default item in a row, calculate the length that the row needs to
# have to include that item; and then take the max of those values
# (across each row).
has_nondefault = math_ops.logical_not(has_default)
has_nondefault = math_ops.cast(has_nondefault, row_splits_dtype)
length_for_nondefault_value = (
has_nondefault *
array_ops.expand_dims(math_ops.range(1, ncols + 1), 0))
lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)
if lengths is not None:
# If we have lengths (either directly supplied, or computed from
# paddings), then use those to construct splits; and then use masking
# to get the corresponding values.
lengths = ragged_util.convert_to_int_tensor(lengths, "lengths",
row_splits_dtype)
lengths.shape.assert_has_rank(1)
lengths = math_ops.minimum(lengths, ncols)
lengths = math_ops.maximum(lengths, 0)
limits = math_ops.cumsum(lengths)
splits = array_ops.concat(
[array_ops.zeros([1], row_splits_dtype), limits], axis=0)
mask = array_ops.sequence_mask(lengths, maxlen=ncols)
values = array_ops.boolean_mask(tensor, mask)
return cls.from_row_splits(values, splits, validate=False)
# If neither padding nor lengths were specified, then create a splits
# vector that contains no default values, and reshape the input tensor
# to form the values for the RaggedTensor.
values_shape = array_ops.concat([[input_shape[0] * input_shape[1]],
input_shape[2:]], axis=0)
values = array_ops.reshape(tensor, values_shape)
const_nrows = tensor_shape.dimension_at_index(tensor.shape, 0).value
const_ncols = tensor_shape.dimension_at_index(tensor.shape, 1).value
if const_nrows is not None:
nrows = constant_op.constant(const_nrows, row_splits_dtype)
else:
nrows = input_shape[0]
if const_ncols is not None:
ncols = constant_op.constant(const_ncols, row_splits_dtype)
else:
ncols = input_shape[1]
return RaggedTensor.from_uniform_row_length(
values=values, uniform_row_length=ncols, nrows=nrows, validate=False)
def to_tensor(self, default_value=None, name=None, shape=None):
"""Converts this `RaggedTensor` into a `tf.Tensor`.
If `shape` is specified, then the result is padded and/or truncated to
the specified shape.
Examples:
>>> rt = tf.ragged.constant([[9, 8, 7], [], [6, 5], [4]])
>>> print(rt.to_tensor())
tf.Tensor(
[[9 8 7] [0 0 0] [6 5 0] [4 0 0]], shape=(4, 3), dtype=int32)
>>> print(rt.to_tensor(shape=[5, 2]))
tf.Tensor(
[[9 8] [0 0] [6 5] [4 0] [0 0]], shape=(5, 2), dtype=int32)
Args:
default_value: Value to set for indices not specified in `self`. Defaults
to zero. `default_value` must be broadcastable to
`self.shape[self.ragged_rank + 1:]`.
name: A name prefix for the returned tensors (optional).
shape: The shape of the resulting dense tensor. In particular,
`result.shape[i]` is `shape[i]` (if `shape[i]` is not None), or
`self.bounding_shape(i)` (otherwise).`shape.rank` must be `None` or
equal to `self.rank`.
Returns:
A `Tensor` with shape `ragged.bounding_shape(self)` and the
values specified by the non-empty values in `self`. Empty values are
assigned `default_value`.
"""
with ops.name_scope(name, "RaggedToTensor", [self, default_value, shape]):
if default_value is not None:
default_value = ops.convert_to_tensor(
default_value, name="default_value", dtype=self.dtype)
type_tensor_pairs = _get_row_partition_type_tensor_pairs(self)
row_partition_types = [x[0] for x in type_tensor_pairs]
row_partition_tensors = [x[1] for x in type_tensor_pairs]
if default_value is None:
default_value = array_ops.zeros((), self.dtype)
if (isinstance(shape, (list, tuple)) and
any(isinstance(v, ops.Tensor) for v in shape) and
all(isinstance(v, (int, ops.Tensor)) for v in shape)):
shape = array_ops.stack(shape)
shape_tensor = _shape_as_tensor(shape, row_partition_tensors[0].dtype)
tensor = gen_ragged_conversion_ops.ragged_tensor_to_tensor(
shape=shape_tensor,
values=self.flat_values,
default_value=default_value,
row_partition_types=row_partition_types,
row_partition_tensors=row_partition_tensors)
ragged_shape = self.shape
if ragged_shape.rank is not None and not isinstance(shape, ops.Tensor):
# Merged self.shape and shape, favoring the second one as it takes
# into account potential padding added to the output.
shape = tensor_shape.as_shape(shape)
if shape.rank is None:
output_shape = ragged_shape
else:
# At this point we can assume that hshape.rank == ragged_shape.rank
# because otherwise it would have failed earlier.
output_shape = [s1 if s1 is not None else s2 for (s1, s2)
in zip(shape.as_list(), ragged_shape.as_list())]
tensor.set_shape(output_shape)
return tensor
@classmethod
@dispatch.add_dispatch_support
def from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64):
"""Converts a 2D `tf.sparse.SparseTensor` to a `RaggedTensor`.
Each row of the `output` `RaggedTensor` will contain the explicit values
from the same row in `st_input`. `st_input` must be ragged-right. If not
it is not ragged-right, then an error will be generated.
Example:
>>> indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]]
>>> st = tf.sparse.SparseTensor(indices=indices,
... values=[1, 2, 3, 4, 5],
... dense_shape=[4, 3])
>>> tf.RaggedTensor.from_sparse(st).to_list()
[[1, 2, 3], [4], [], [5]]
Currently, only two-dimensional `SparseTensors` are supported.
Args:
st_input: The sparse tensor to convert. Must have rank 2.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the same values as `st_input`.
`output.ragged_rank = rank(st_input) - 1`.
`output.shape = [st_input.dense_shape[0], None]`.
Raises:
ValueError: If the number of dimensions in `st_input` is not known
statically, or is not two.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if not sparse_tensor.is_sparse(st_input):
raise TypeError("Expected SparseTensor, got %s" % type(st_input).__name__)
with ops.name_scope(name, "RaggedFromSparse", [st_input]):
st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
st_input, name="st_input")
if st_input.dense_shape.shape.ndims is None:
static_rank_from_dense_shape = None
else:
static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value
if st_input.indices.shape.ndims is None:
static_rank_from_indices = None
else:
static_rank_from_indices = st_input.indices.shape.dims[1].value
if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
raise ValueError("rank(st_input) must be 2")
with ops.control_dependencies(
_assert_sparse_indices_are_ragged_right(st_input.indices)):
# Treat sparse row indices as segment ids to generate a splits tensor
# thta we can pair with the sparse tensor values. (Ignore sparse column
# indices.)
segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype)
num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype)
return cls.from_value_rowids(
st_input.values, segment_ids, num_segments, validate=False)
def to_sparse(self, name=None):
"""Converts this `RaggedTensor` into a `tf.sparse.SparseTensor`.
Example:
>>> rt = tf.ragged.constant([[1, 2, 3], [4], [], [5, 6]])
>>> print(rt.to_sparse())
SparseTensor(indices=tf.Tensor(
[[0 0] [0 1] [0 2] [1 0] [3 0] [3 1]],
shape=(6, 2), dtype=int64),
values=tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32),
dense_shape=tf.Tensor([4 3], shape=(2,), dtype=int64))
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A SparseTensor with the same values as `self`.
"""
with ops.name_scope(name, "RaggedToSparse", [self]):
result = gen_ragged_conversion_ops.ragged_tensor_to_sparse(
self.nested_row_splits, self.flat_values, name=name)
return sparse_tensor.SparseTensor(result.sparse_indices,
result.sparse_values,
result.sparse_dense_shape)
@classmethod
def _from_variant(cls,
variant,
dtype,
output_ragged_rank,
input_ragged_rank=None,
row_splits_dtype=dtypes.int64,
name=None):
"""Converts a `variant` Tensor into a `RaggedTensor`.
The input `variant` could be a scalar, meaning it encodes a single
`RaggedTensor` with ragged_rank `output_ragged_rank`. Alternatively it could
have an arbitrary rank, in which case each element is decoded into a
`RaggedTensor` with ragged_rank `input_ragged_rank` and these are then
stacked according to the input shape to output a single `RaggedTensor`
with ragged_rank `output_ragged_rank`. If `input_ragged_rank` is not
provided, it is inferred dynamically as `output_ragged_rank` -
`rank(variant)`. If `input_ragged_rank` is provided, the following must be
true: `output_ragged_rank` = `input_ragged_rank` + `rank(variant)`.
Example:
>>> rt = tf.ragged.constant([[0], [1, 2]])
>>> et = rt._to_variant()
>>> stacked_et = tf.stack([et, et])
>>> tf.RaggedTensor._from_variant( # scalar input.
... et, dtype=tf.int32, output_ragged_rank=1).to_list()
[[0], [1, 2]]
>>> tf.RaggedTensor._from_variant( # batched input.
... stacked_et, dtype=tf.int32, output_ragged_rank=2).to_list()
[[[0], [1, 2]], [[0], [1, 2]]]
Args:
variant: A `variant` Tensor representing an encoded (possibly
nested-batched) `RaggedTensor`.
dtype: The dtype of the encoded `RaggedTensor`.
output_ragged_rank: The expected ragged rank of the output `RaggedTensor`.
input_ragged_rank: The ragged rank of each encoded `RaggedTensor`. This is
optional and inferred dynamically if not provided.
row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
name: A name prefix for the returned tensors (optional).
Returns:
A `RaggedTensor` of dtype `dtype` and ragged rank `output_ragged_rank`.
Raises:
ValueError: If the input rank is known, `input_ragged_rank` is provided
and `output_ragged_rank` = `input_ragged_rank` + `rank(variant)` does
not hold.
"""
variant = ops.convert_to_tensor(
variant, name="variant", dtype=dtypes.variant)
if (variant.shape.ndims is not None and input_ragged_rank is not None and
output_ragged_rank != input_ragged_rank + variant.shape.ndims):
raise ValueError(
"output_ragged_rank must be equal to input_ragged_rank +"
"variant.shape.ndims, found variant.shape.ndims: %d, "
"input_ragged_rank: %d, output_ragged_rank: %d" %
(variant.shape.ndims, input_ragged_rank, output_ragged_rank))
input_ragged_rank = -1 if input_ragged_rank is None else input_ragged_rank
with ops.name_scope(
name, "RaggedFromVariant",
[variant, dtype, input_ragged_rank, output_ragged_rank]):
result = gen_ragged_conversion_ops.ragged_tensor_from_variant(
variant, input_ragged_rank, output_ragged_rank, dtype,
row_splits_dtype, name)
return cls.from_nested_row_splits(
result.output_dense_values,
result.output_nested_splits,
validate=False)
def _to_variant(self, batched_input=False, name=None):
"""Converts this `RaggedTensor` into a `variant` Tensor.
If `batched_input` is `True`, then the `RaggedTensor` is unbatched along the
zero-th dimension, each component `RaggedTensor` is encoded into a scalar
`variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.
If `batched_input` is `False`, then the `RaggedTensor` is encoded as is and
a scalar `variant` Tensor is returned.
Example:
>>> rt = tf.ragged.constant([[[0]], [[1]], [[2]]])
>>> rt._to_variant().shape.as_list()
[]
>>> rt._to_variant(batched_input=True).shape.as_list()
[3]
Args:
batched_input: If `True`, the `RaggedTensor` is unbatched and converted to
a `variant` vector. Set to `False` by default.
name: A name prefix for the returned tensors (optional).
Returns:
A `variant` Tensor that encodes this `RaggedTensor`.
"""
with ops.name_scope(name, "RaggedToVariant", [self, batched_input]):
return gen_ragged_conversion_ops.ragged_tensor_to_variant(
self.nested_row_splits, self.flat_values, batched_input, name)
#=============================================================================
# String Encoding
#=============================================================================
def __repr__(self):
if self._is_eager():
return "<tf.RaggedTensor %s>" % self.to_list()
else:
return "tf.RaggedTensor(values=%s, row_splits=%s)" % (
self.values, self.row_splits)
#=============================================================================
# Eager Execution Mode
#=============================================================================
def numpy(self):
"""Returns a numpy `array` with the values for this `RaggedTensor`.
Requires that this `RaggedTensor` was constructed in eager execution mode.
Ragged dimensions are encoded using numpy `arrays` with `dtype=object` and
`rank=1`, where each element is a single row.
#### Examples
In the following example, the value returned by `RaggedTensor.numpy()`
contains three numpy `array` objects: one for each row (with `rank=1` and
`dtype=int64`), and one to combine them (with `rank=1` and `dtype=object`):
>>> tf.ragged.constant([[1, 2, 3], [4, 5]], dtype=tf.int64).numpy()
array([array([1, 2, 3]), array([4, 5])], dtype=object)
Uniform dimensions are encoded using multidimensional numpy `array`s. In
the following example, the value returned by `RaggedTensor.numpy()` contains
a single numpy `array` object, with `rank=2` and `dtype=int64`:
>>> tf.ragged.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.int64).numpy()
array([[1, 2, 3], [4, 5, 6]])
Returns:
A numpy `array`.
"""
if not self._is_eager():
raise ValueError("RaggedTensor.numpy() is only supported in eager mode.")
values = self.values.numpy()
splits = self.row_splits.numpy()
rows = [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)]
if not rows:
return np.zeros((0, 0) + values.shape[1:], dtype=values.dtype)
# Note: if `rows` have ragged lengths, then they will be stored in a
# np.ndarray with dtype=object and rank=1. If they have uniform lengths,
# they will be combined into a single np.ndarray with dtype=row.dtype and
# rank=row.rank+1.
return np.array(rows)
def to_list(self):
"""Returns a nested Python `list` with the values for this `RaggedTensor`.
Requires that `rt` was constructed in eager execution mode.
Returns:
A nested Python `list`.
"""
if self._is_eager():
return self._eager_value().to_list()
else:
raise ValueError("RaggedTensor.to_list() is only supported in eager "
"mode; in graph mode, evaluate the RaggedTensor first "
"and then use RaggedTensorValue.to_list().")
def _eager_value(self):
"""Returns a RaggedTensorValue for self. Requires self._is_eager()=true."""
value = self.flat_values.numpy()
for row_splits in reversed(self.nested_row_splits):
value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy())
return value
def _is_eager(self):
"""Returns True if values & row_splits Tensors are all `EagerTensor`s."""
rt = self
while isinstance(rt, RaggedTensor):
if not isinstance(rt.row_splits, ops.EagerTensor):
return False
rt = rt.values
return isinstance(rt, ops.EagerTensor)
#=============================================================================
# Operators
#=============================================================================
# To avoid circular dependencies, we define stub methods for operators here,
# and then override them when the ragged_operators module is imported.
def _overloaded_operator(name): # pylint: disable=no-self-argument
def stub(*args, **kwargs):
del args, kwargs
raise ValueError(
"You must import 'tensorflow.python.ops.ragged.ragged_ops' "
"before using RaggedTensor.%s" % name)
return stub
__getitem__ = _overloaded_operator("__getitem__")
__ge__ = _overloaded_operator("__ge__")
__gt__ = _overloaded_operator("__gt__")
__le__ = _overloaded_operator("__le__")
__lt__ = _overloaded_operator("__lt__")
__and__ = _overloaded_operator("__and__")
__rand__ = _overloaded_operator("__rand__")
__invert__ = _overloaded_operator("__invert__")
__ror__ = _overloaded_operator("__ror__")
__or__ = _overloaded_operator("__or__")
__xor__ = _overloaded_operator("__xor__")
__rxor__ = _overloaded_operator("__rxor__")
__abs__ = _overloaded_operator("__abs__")
__add__ = _overloaded_operator("__add__")
__radd__ = _overloaded_operator("__radd__")
__div__ = _overloaded_operator("__div__")
__rdiv__ = _overloaded_operator("__rdiv__")
__floordiv__ = _overloaded_operator("__floordiv__")
__rfloordiv__ = _overloaded_operator("__rfloordiv__")
__mod__ = _overloaded_operator("__mod__")
__rmod__ = _overloaded_operator("__rmod__")
__mul__ = _overloaded_operator("__mul__")
__rmul__ = _overloaded_operator("__rmul__")
__neg__ = _overloaded_operator("__neg__")
__pow__ = _overloaded_operator("__pow__")
__rpow__ = _overloaded_operator("__rpow__")
__sub__ = _overloaded_operator("__sub__")
__rsub__ = _overloaded_operator("__rsub__")
__truediv__ = _overloaded_operator("__truediv__")
__rtruediv__ = _overloaded_operator("__rtruediv__")
del _overloaded_operator
#=============================================================================
# Name Scope
#=============================================================================
# This private function is used by ops.name_scope to ensure that all of the
# input tensors for the scope belong to the same graph. Defining this means
# that you may include `RaggedTensor` objects in the name_scope `values`
# list.
def _as_graph_element(self):
"""Convert `self` to a graph element."""
values = self.values
while isinstance(values, RaggedTensor):
values = values.values
return values
#=============================================================================
# Composite Tensor
#=============================================================================
@property
def _type_spec(self):
return RaggedTensorSpec.from_value(self)
def _shape_invariant_to_type_spec(self, shape):
return RaggedTensorSpec(shape, self.dtype, self.ragged_rank,
self.row_splits.dtype)
def consumers(self):
return self._consumers()
def is_ragged(value):
"""Returns true if `value` is a ragged tensor or ragged tensor value."""
return isinstance(value,
(RaggedTensor, ragged_tensor_value.RaggedTensorValue))
def match_row_splits_dtypes(*tensors, **kwargs):
"""Return a copy of `tensors` with row_splits all having the same dtype.
Args:
*tensors: A list of Tensors or RaggedTensors.
**kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors),
where `dtype` is the data type used by row-splits, and `tensors` is the
converted list of `Tensors` and `RaggedTensors`.
Returns:
The converted list of `Tensors` and `RaggedTensors`.
"""
return_dtype = kwargs.pop("return_dtype", False)
if kwargs:
raise ValueError("Unexpected keyword args %r" % kwargs)
has_int32 = False
has_int64 = False
for tensor in tensors:
if isinstance(tensor, RaggedTensor):
if tensor.row_splits.dtype == dtypes.int32:
has_int32 = True
else:
has_int64 = True
if has_int32 and has_int64:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; "
"use RaggedTensor.with_row_splits_dtype() to convert "
"them to compatible dtypes.")
dtype = dtypes.int64
tensors = tuple(
t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor
) else t
for t in tensors)
elif has_int32:
dtype = dtypes.int32
else:
dtype = dtypes.int64
if return_dtype:
return (dtype, tensors)
else:
return tensors
#===============================================================================
# RaggedTensorSpec
#===============================================================================
@tf_export("RaggedTensorSpec")
class RaggedTensorSpec(type_spec.BatchableTypeSpec):
"""Type specification for a `tf.RaggedTensor`."""
__slots__ = [
"_shape", "_dtype", "_ragged_rank", "_row_splits_dtype",
"_flat_values_spec"
]
@property
def dtype(self):
"""The `tf.dtypes.DType` specified by this type for the RaggedTensor.
Examples:
>>> rt = tf.ragged.constant([["a"], ["b", "c"]], dtype=tf.string)
>>> tf.type_spec_from_value(rt).dtype
tf.string
Returns:
A `tf.dtypes.DType` of the values in the RaggedTensor.
"""
return self._dtype
@property
def shape(self):
"""The statically known shape of the RaggedTensor.
Examples:
>>> rt = tf.ragged.constant([[0], [1, 2]])
>>> tf.type_spec_from_value(rt).shape
TensorShape([2, None])
>>> rt = tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1)
>>> tf.type_spec_from_value(rt).shape
TensorShape([2, None, 2])
Returns:
A `tf.TensorShape` containing the statically known shape of the
RaggedTensor. Ragged dimensions have a size of `None`.
"""
return self._shape
@property
def ragged_rank(self):
"""The number of times the RaggedTensor's flat_values is partitioned.
Defaults to `shape.ndims - 1`.
Examples:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> tf.type_spec_from_value(values).ragged_rank
1
>>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> tf.type_spec_from_value(rt1).ragged_rank
2
Returns:
A Python `int` indicating the number of times the underlying `flat_values`
Tensor has been partitioned to add a new dimension.
I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.
"""
return self._ragged_rank
@property
def row_splits_dtype(self):
"""The `tf.dtypes.DType` of the RaggedTensor's `row_splits`.
Examples:
>>> rt = tf.ragged.constant([[1, 2, 3], [4]], row_splits_dtype=tf.int64)
>>> tf.type_spec_from_value(rt).row_splits_dtype
tf.int64
Returns:
A `tf.dtypes.DType` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
"""
return self._row_splits_dtype
@property
def flat_values_spec(self):
"""The `TypeSpec` of the flat_values of RaggedTensor.
Returns:
- The TypeSpec of flat_values.
- None when the flat_values is a Tensor.
"""
return self._flat_values_spec
@property
def value_type(self):
return RaggedTensor if self._ragged_rank > 0 else ops.Tensor
def __init__(self,
shape=None,
dtype=dtypes.float32,
ragged_rank=None,
row_splits_dtype=dtypes.int64,
flat_values_spec=None):
"""Constructs a type specification for a `tf.RaggedTensor`.
Args:
shape: The shape of the RaggedTensor, or `None` to allow any shape. If a
shape is specified, then all ragged dimensions must have size `None`.
dtype: `tf.DType` of values in the RaggedTensor.
ragged_rank: Python integer, the number of times the RaggedTensor's
flat_values is partitioned. Defaults to `shape.ndims - 1`.
row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
flat_values_spec: TypeSpec for flat_value of the RaggedTensor. It shall be
provided when the flat_values is a CompositeTensor rather then Tensor.
If both `dtype` and `flat_values_spec` and are provided, `dtype` must
be the same as `flat_values_spec.dtype`. (experimental)
"""
self._shape = tensor_shape.as_shape(shape)
self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if flat_values_spec is not None:
if dtype is None:
dtype = flat_values_spec.dtype
elif dtype != flat_values_spec.dtype:
raise ValueError("dtype must be the same as flat_values_spec.dtype")
elif dtype is None:
raise ValueError(
"At least one of dtype or flat_values_spec must be provided")
self._dtype = dtypes.as_dtype(dtype)
self._flat_values_spec = flat_values_spec
rank = self._shape.ndims
if ragged_rank is None:
if rank is None:
raise ValueError("Must specify ragged_rank or "
"a shape with a known rank.")
ragged_rank = rank - 1
self._ragged_rank = ragged_rank
if not isinstance(self._ragged_rank, int):
raise TypeError("ragged_rank must be an int")
if rank is not None:
if ragged_rank >= rank:
raise ValueError("ragged_rank must be less than rank.")
def is_compatible_with(self, spec_or_value):
# RaggedTensor with ragged_rank 0 can be compatible with raw flat_values.
if self._ragged_rank == 0:
if self._flat_values_spec is None:
if isinstance(spec_or_value, (ops.Tensor, tensor_spec.TensorSpec)):
return tensor_spec.TensorSpec(
self._shape, self._dtype).is_compatible_with(spec_or_value)
elif not isinstance(spec_or_value, (RaggedTensor, RaggedTensorSpec)):
return self._flat_values_spec.is_compatible_with(spec_or_value)
return super(RaggedTensorSpec, self).is_compatible_with(spec_or_value)
def _serialize(self):
if self._flat_values_spec is None:
return (self._shape, self._dtype, self._ragged_rank,
self._row_splits_dtype)
else:
return (self._shape, self._dtype, self._ragged_rank,
self._row_splits_dtype, self._flat_values_spec)
@property
def _component_specs(self):
if self._ragged_rank == 0:
if self._flat_values_spec is not None:
return [self._flat_values_spec]
else:
return [tensor_spec.TensorSpec(self._shape, self._dtype)]
flat_values_spec = self._flat_values_spec
if flat_values_spec is None:
flat_values_shape = tensor_shape.TensorShape([None]).concatenate(
self._shape[self._ragged_rank + 1:])
flat_values_spec = tensor_spec.TensorSpec(flat_values_shape, self._dtype)
outer_dim = tensor_shape.dimension_at_index(self._shape, 0)
outer_splits_shape = [None if outer_dim is None else outer_dim + 1]
inner_splits_spec = tensor_spec.TensorSpec([None], self._row_splits_dtype)
specs = ([
flat_values_spec,
tensor_spec.TensorSpec(outer_splits_shape, self._row_splits_dtype)
] + [inner_splits_spec for _ in range(self._ragged_rank - 1)])
return specs
def _to_components(self, value):
if is_ragged(value):
return [value.flat_values] + list(value.nested_row_splits)
else:
return [value]
def _from_components(self, tensor_list):
result = tensor_list[0]
if (all(isinstance(t, np.ndarray) for t in tensor_list) and
not tf2.enabled()):
for row_splits in reversed(tensor_list[1:]):
result = ragged_tensor_value.RaggedTensorValue(result, row_splits)
else:
if isinstance(tensor_list[0], np.ndarray):
tensor_list = [ops.convert_to_tensor(t) for t in tensor_list]
result = tensor_list[0]
for row_splits in reversed(tensor_list[1:]):
result = RaggedTensor(
result,
RowPartition.from_row_splits(row_splits, validate=False),
internal=True)
return result
# The RaggedTensorSpec tensor_list encoding uses to/from_variant ops
# to (un)box the component tensors in a way that allows for batching &
# unbatching.
@property
def _flat_tensor_specs(self):
# NOTE(mishragaurav): The default flat shape of a boxed `RaggedTensor` is
# `[]` (scalar), but a `RaggedTensorSpec` can also represent a batch of
# boxed `RaggedTensor` objects with shape `(...)` (and batches of batches,
# etc.), so the flat shape must be unknown.
return [tensor_spec.TensorSpec(None, dtypes.variant)]
def _to_tensor_list(self, value):
# TODO(edloper): Update gen_ragged_conversion_ops that convert to and
# from variant to include all of the row-partitioning tensors.
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported")
ragged_rank = value.ragged_rank if isinstance(value, RaggedTensor) else 0
if ragged_rank != self._ragged_rank:
raise ValueError("Ragged rank of value (%d) does not match ragged "
"rank of type (%d)" % (ragged_rank, self._ragged_rank))
if ragged_rank == 0:
return [
gen_ragged_conversion_ops.ragged_tensor_to_variant(
(), value, batched_input=False)
]
# pylint: disable=protected-access
return [value._to_variant(batched_input=False)]
def _to_batched_tensor_list(self, value):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported")
ragged_rank = value.ragged_rank if isinstance(value, RaggedTensor) else 0
if ragged_rank != self._ragged_rank:
raise ValueError("Ragged rank of value (%d) does not match ragged "
"rank of type (%d)" % (ragged_rank, self._ragged_rank))
if ragged_rank == 0:
# TODO(b/141789000) Update this to handle ragged_rank=0.
raise ValueError(
"_to_batched_tensor_list doesn't support ragged_rank=0 yet")
# pylint: disable=protected-access
return [value._to_variant(batched_input=True)]
def _from_compatible_tensor_list(self, tensor_list):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported")
if self._ragged_rank < 0:
raise ValueError("ragged_rank must be non-negative; got %s." %
self._ragged_rank)
result = RaggedTensor._from_variant( # pylint: disable=protected-access
tensor_list[0],
dtype=self._dtype,
row_splits_dtype=self._row_splits_dtype,
output_ragged_rank=self._ragged_rank)
if self._shape.ndims is not None:
if isinstance(result, RaggedTensor):
outer_dim = tensor_shape.dimension_value(self._shape[0])
if outer_dim is not None:
result.row_splits.set_shape([outer_dim + 1])
result._set_shape(self._shape) # pylint: disable=protected-access
else:
result.set_shape(self._shape)
return result
def _batch(self, batch_size):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported")
return RaggedTensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype, self._ragged_rank + 1, self._row_splits_dtype)
def _unbatch(self):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported")
# Note: Negative ragged_rank is allowed here because the dataset could be
# subsequently batched again. If ragged_rank > 1, assume row_splits_dtype is
# consistent. Errors are handled in
# RaggedTensorSpec._from_compatible_tensor_list()
return RaggedTensorSpec(self._shape[1:], self._dtype, self._ragged_rank - 1,
self._row_splits_dtype)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return self
@classmethod
def from_value(cls, value):
if (isinstance(value, ragged_tensor_value.RaggedTensorValue) or
isinstance(value.flat_values, ops.Tensor)):
return cls(
shape=value.shape,
dtype=value.values.dtype,
ragged_rank=value.ragged_rank,
row_splits_dtype=value.row_splits.dtype)
else:
return cls(
shape=value.shape,
dtype=value.values.dtype,
ragged_rank=value.ragged_rank,
row_splits_dtype=value.row_splits.dtype,
flat_values_spec=type_spec.type_spec_from_value(value.flat_values))
type_spec.register_type_spec_from_value_converter(
ragged_tensor_value.RaggedTensorValue, RaggedTensorSpec.from_value)
#===============================================================================
# Convert value -> tensor
#===============================================================================
def convert_to_tensor_or_ragged_tensor(value,
dtype=None,
preferred_dtype=None,
name=None):
"""Converts value to a `RaggedTensor` or `Tensor`.
* If `value` is a `RaggedTensor`, then return it as-is.
* If `value` is a `RaggedTensorValue`, return a corresponding constant
`RaggedTensor`.
* Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`.
Args:
value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has
a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing the type
is inferred from the type of `value`.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. This argument has no effect if `value` is already a
tensor, or when conversion is not possible.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `RaggedTensor`.
"""
if isinstance(value, RaggedTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError("Tensor conversion requested dtype %s for "
"RaggedTensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
elif isinstance(value, ragged_tensor_value.RaggedTensorValue):
with ops.name_scope(name, "ConvertToTensorOrRaggedTensor", []):
flat_values = ops.convert_to_tensor(
value=value.flat_values,
dtype=dtype,
preferred_dtype=preferred_dtype,
name="flat_values")
return RaggedTensor.from_nested_row_splits(
flat_values, value.nested_row_splits, validate=False)
else:
return ops.convert_to_tensor_v2_with_dispatch(
value=value, dtype=dtype, dtype_hint=preferred_dtype, name=name)
def _convert_to_ragged_tensor_values(value):
"""Converts value to supported RaggedTensor value.
* If `value` is an object of supported value type, then return it as-is.
* Otherwise convert it to Tensor or RaggedTensor.
Args:
value: An object of `Tensor`, `RaggedTensor` or registerred RaggedTensor
value types, or an object whose type has a registered `Tensor`
conversion function.
Returns:
An object of `Tensor`, `RaggedTensor` or registerred RaggedTensor
value types
"""
if _is_supported_ragged_values_type(value):
return value
else:
return convert_to_tensor_or_ragged_tensor(value, name="values")
#===============================================================================
# Register RaggedTensor for use with session.run.
#===============================================================================
def _ragged_tensor_value_from_components(components):
components = list(components)
value = components.pop()
while components:
value = ragged_tensor_value.RaggedTensorValue(value, components.pop())
return value
def _ragged_tensor_session_fetch(rt):
components = rt.nested_row_splits + (rt.flat_values,)
return (components, _ragged_tensor_value_from_components)
def _ragged_tensor_session_feed(feed_key, feed_val):
key_components = feed_key.nested_row_splits + (feed_key.flat_values,)
val_components = feed_val.nested_row_splits + (feed_val.flat_values,)
return zip(key_components, val_components)
def _ragged_tensor_session_feed_for_partial_run(feed_key):
return feed_key.nested_row_splits + (feed_key.flat_values,)
session.register_session_run_conversion_functions(
RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed,
_ragged_tensor_session_feed_for_partial_run)
#===============================================================================
# RaggedTensorType
#===============================================================================
class RaggedTensorType(object):
"""Encoding of a static type for a `RaggedTensor`.
Use this type to express/declare that an output must have the type of
`RaggedTensor`.
"""
def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):
"""Initializes a RaggedTensorType object.
Args:
dtype: data type of the `RaggedTensor`'s inner values.
ragged_rank: ragged_rank of the declared `RaggedTensor`.
row_splits_dtype: data type for the `RaggedTensor`'s row splits.
One of: `tf.int32` or `tf.int64`.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
self._dtype = dtype
self._ragged_rank = ragged_rank
self._row_splits_dtype = row_splits_dtype
dtype = property(lambda self: self._dtype)
ragged_rank = property(lambda self: self._ragged_rank)
row_splits_dtype = property(lambda self: self._row_splits_dtype)
def __repr__(self):
return "RaggedTensorType(%r, %r, %r)" % (
self.dtype, self.ragged_rank, self.row_splits_dtype)
#===============================================================================
# Helper Functions
#===============================================================================
def _assert_sparse_indices_are_ragged_right(indices):
"""Checks that the given SparseTensor.indices tensor is ragged-right.
Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right
because the entry `[3, 1]` skips a cell.
Args:
indices: The SparseTensor indices to check.
Returns:
A list of control dependency op tensors.
"""
index_prefix = indices[:, :-1]
index_suffix = indices[:, -1]
# Check whether each index is starting a new row in the innermost dimension
# (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).
# (Note: this skips the first index; we will check that separately below.)
index_prefix_changed = math_ops.reduce_any(
math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)
# Check two cases:
# * For indices that start a new row: index_suffix[i] must be zero.
# * For indices that continue a row: index_suffix[i] must be equal to
# index_suffix[i-1]+1.
index_ok = array_ops.where(
index_prefix_changed, math_ops.equal(index_suffix[1:], 0),
math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))
# Also check that the very first index didn't skip any cells. The first
# index starts a new row (by definition), so its suffix should be zero.
sparse_indices_are_ragged_right = math_ops.logical_and(
math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),
math_ops.reduce_all(index_ok))
message = [
"SparseTensor is not right-ragged", "SparseTensor.indices =", indices
]
return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]
@ops.RegisterGradient("RaggedTensorToSparse")
def _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad,
sparse_values_grad,
unused_sparse_shape_grad):
"""Gradient for RaggedTensorToSparse."""
op_inputs_nested_row_splits = op.inputs[:-1]
op_inputs_flat_values = op.inputs[-1]
# No gradient for the RaggedTensor's nested_row_splits.
nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits)
# Gradient for the RaggedTensor's flat_values is formed by reshaping
# the gradient for the SparseTensor's values.
flat_values_shape = array_ops.shape(op_inputs_flat_values)
flat_values_gradient = array_ops.reshape(sparse_values_grad,
flat_values_shape)
return nested_row_splits_gradient + [flat_values_gradient]
def _assert_monotonic_increasing(tensor, message=None):
return check_ops.assert_non_negative(
tensor[1:] - tensor[:-1], message=message)
def _assert_zero(tensor, message=None):
return check_ops.assert_equal(
tensor, constant_op.constant(0, dtype=tensor.dtype), message=message)
def _nrows(tensor, out_type=dtypes.int32):
if isinstance(tensor, RaggedTensor):
return tensor.nrows(out_type=out_type)
else:
return array_ops.shape(tensor, out_type=out_type)[0]
def merge_dims(value, outer_axis, inner_axis):
"""Merges value[outer_axis...inner_axis] into a single dimension.
See `RaggedTensor.merge_dims()` for more details. This helper differs from
`RaggedTensor.merge_dims()` in that `value` may be a dense or ragged tensor.
Args:
value: A `RaggedTensor` or `Tensor`
outer_axis: `int`
inner_axis: `int`
Returns:
A flattened `RaggedTensor` or `Tensor`.
"""
if outer_axis == inner_axis:
return value
# Flatten outer dimensions of a RaggedTensor by just taking its values.
while outer_axis == 0 and isinstance(value, RaggedTensor):
value = value.values
inner_axis -= 1
if inner_axis == 0:
return value
# Flatten non-Ragged tensors using tf.reshape().
if not isinstance(value, RaggedTensor):
if value.shape.is_fully_defined():
old_shape = value.shape.as_list()
new_shape = old_shape[:outer_axis] + [-1] + old_shape[inner_axis + 1:]
else:
old_shape = array_ops.shape(value)
new_shape = array_ops.concat(
[old_shape[:outer_axis], [-1], old_shape[inner_axis + 1:]], axis=0)
return array_ops.reshape(value, new_shape)
# Handle outer_axis>1 via recursion.
if outer_axis > 1:
return value.with_values(
merge_dims(value.values, outer_axis - 1, inner_axis - 1))
# At this point, we know outer_axis == 1, and value is a RaggedTensor.
# So we need to flatten the values and build a corresponding splits tensor.
new_values = value.values
new_splits = value.row_splits
for axis in range(outer_axis, inner_axis):
if isinstance(new_values, RaggedTensor):
# Flatten a single ragged dimension.
new_splits = array_ops.gather(new_values.row_splits, new_splits)
new_values = new_values.values
else:
# Flatten all remaining dense dimensions.
shape_split = inner_axis - axis + 1
if new_values.shape.is_fully_defined():
old_shape = new_values.shape.as_list()
new_shape = [-1] + old_shape[shape_split:]
flat_size = _prod(old_shape[1:shape_split])
else:
old_shape = array_ops.shape(new_values)
new_shape = array_ops.concat([[-1], old_shape[shape_split:]], axis=0)
flat_size = math_ops.cast(
math_ops.reduce_prod(old_shape[1:shape_split]), new_splits.dtype)
new_values = array_ops.reshape(new_values, new_shape)
new_splits = new_splits * flat_size
break
return RaggedTensor.from_row_splits(new_values, new_splits)
def _prod(lst):
"""Returns the product of the numbers in a list."""
return functools.reduce(operator.mul, lst, 1)
def _get_row_partition_type_tensor_pairs_tail(partition):
"""Gets a row partition type tensor pair for the tail.
If value_rowid is defined, then it is used. Otherwise, row_splits
are used.
Args:
partition: a RowPartition.
Returns:
A list of (row_partition_type, row_partition_tensor) pairs.
"""
if partition.has_precomputed_value_rowids():
return ("VALUE_ROWIDS", partition.value_rowids())
else:
return ("ROW_SPLITS", partition.row_splits())
def _get_row_partition_type_tensor_pairs(rt_input):
"""Gets a list of the row partitions for rt_input.
If value_rowids are defined, then they are used. Otherwise, row_splits
are used. If the outermost level has value_rowids defind, then nrows is
also added.
Args:
rt_input: a ragged tensor.
Returns:
A list of (row_partition_type, row_partition_tensor) pairs.
"""
partitions = rt_input._nested_row_partitions # pylint: disable=protected-access
tail = [_get_row_partition_type_tensor_pairs_tail(x) for x in partitions[1:]]
if partitions[0]._value_rowids is not None: # pylint: disable=protected-access
return [("FIRST_DIM_SIZE", partitions[0].nrows()),
("VALUE_ROWIDS", partitions[0].value_rowids())] + tail
else:
return [("ROW_SPLITS", partitions[0].row_splits())] + tail
def _shape_as_tensor(shape, dtype):
"""Takes shape and coerces it to a shape as a tensor.
If the object is already a tensor, simply passes it on (result is guaranteed
to be int64 or int32, but not necessarily dtype).
If not, creates a tensor of type dtype.
Result is either a scalar equal to -1 if the shape is unknown_rank.
Otherwise, it is a vector, where unknown dimensions are represented with a
value of -1.
In C++, see TensorShapeFromTensor for parsing shapes in kernels, and
InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape, for
use in the shape inference function.
Args:
shape: input to coerce from TensorShape, Tensor, None, List[Optional[Int]],
Tuple[Optional[Int]].
dtype: tf.int64 or tf.int32
Returns:
a scalar or vector tensor of dtype tf.int32 or tf.int64.
"""
if dtype != dtypes.int64 and dtype != dtypes.int32:
raise ValueError("Expected int64 or int32 for dtype: got {}".format(dtype))
if isinstance(shape, ops.Tensor):
if shape.dtype != dtypes.int64 and shape.dtype != dtypes.int32:
return math_ops.cast(shape, dtype)
return shape
shape = tensor_shape.as_shape(shape)
if not shape:
# Imply rank is unknown using a -1 scalar.
return constant_op.constant(-1, dtype=dtype)
shape = [(-1 if x is None else x) for x in shape.as_list()]
# At this point, shape is List[Int].
return constant_op.constant(shape, dtype=dtype)
def _nvals_uniform_row_length(values, uniform_row_length):
"""Get the number of values for uniform row length constructor."""
const_nvals = tensor_shape.dimension_at_index(values.shape, 0).value
if const_nvals is not None:
nvals = constant_op.constant(const_nvals, uniform_row_length.dtype)
elif isinstance(values, RaggedTensor):
nvals = values.nrows(out_type=uniform_row_length.dtype)
else:
nvals = array_ops.shape(values, out_type=uniform_row_length.dtype)[0]
return nvals
def _get_optional_partition_dtype(values):
"""Returns the partition dtype, or None if None exists."""
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
return values._row_partition.dtype
return None
_SUPPORTED_RAGGED_VALUE_TYPES = (ops.Tensor, RaggedTensor)
# TODO(edloper): Consider whether we should change the registry to be on
# TypeSpecs rather than ValueTypes.
def _add_supported_value_type(cls):
"""Register the `cls` as supported value type of RaggedTenosr.
The cls must be a subclass of CompositeTensor, and must support:
- Properties:
- x.shape
- x.dtype
- Methods:
- x.__getitem__(idx) (method: returns a supported value type)
- Ops:
- tf.shape(x) -- tf.shape(x)[0] must be a tf.Tensor.
- tf.tile(x)
- assert_rank_at_least(x)
- tf.ones_like(x)
- tf.gather(params=x, indices=Tensor)
- tf.add(x, y)
- tf.boolean_mask(x, ...)
- @TODO(edloper): Complete this list
Note: the following RaggedTensor, RaggedTensorSpec methods & ops are not
currently supported unless `rt.values` is a RaggedTensor or a tf.Tensor:
- rt.to_tensor()
- rt.to_sparse_tensor()
- rt._to_variant()
- rt._from_variant()
- tf.ragged.cross([rt])
- tf.gather(params=x, indices=rt) # rt used for indices
- RaggedTensorSpec methods:
- _batch
- _unbatch
- _to_tensor_list
- _to_batched_tensor_list
- _from_compatible_tensor_list
Args:
cls: The type to be added to supported value types.
"""
if not issubclass(cls, composite_tensor.CompositeTensor):
raise ValueError("cls(%s) must be a subclass of CompositeTensor" % cls)
if not hasattr(cls, "shape"):
raise ValueError("cls must support the `shape` property")
if not hasattr(cls, "dtype"):
raise ValueError("cls must support the `dtype` property")
global _SUPPORTED_RAGGED_VALUE_TYPES
_SUPPORTED_RAGGED_VALUE_TYPES += (cls,)
def _is_supported_ragged_values_type(value):
return isinstance(value, _SUPPORTED_RAGGED_VALUE_TYPES)
def _assert_is_supported_ragged_values_type(value):
if not _is_supported_ragged_values_type(value):
ok_types = ", ".join(cls.__name__ for cls in
_SUPPORTED_RAGGED_VALUE_TYPES)
raise TypeError("type(values) must be one of: %r, got %r" %
(ok_types, value))
| apache-2.0 | -2,884,416,179,128,553,000 | 38.828862 | 82 | 0.62639 | false |
niphlod/pydal | pydal/connection.py | 1 | 5577 | # -*- coding: utf-8 -*-
import os
from ._compat import itervalues
from ._globals import GLOBAL_LOCKER, THREAD_LOCAL
from ._load import OrderedDict
from .helpers._internals import Cursor
class ConnectionPool(object):
POOLS = {}
check_active_connection = True
def __init__(self):
_iid_ = str(id(self))
self._connection_thname_ = '_pydal_connection_' + _iid_ + '_'
self._cursors_thname_ = '_pydal_cursors_' + _iid_ + '_'
@property
def _pid_(self):
return str(os.getpid())
@property
def _connection_uname_(self):
return self._connection_thname_ + self._pid_
@property
def _cursors_uname_(self):
return self._cursors_thname_ + self._pid_
@staticmethod
def set_folder(folder):
THREAD_LOCAL._pydal_folder_ = folder
@property
def connection(self):
return getattr(THREAD_LOCAL, self._connection_uname_)
@connection.setter
def connection(self, val):
setattr(THREAD_LOCAL, self._connection_uname_, val)
self._clean_cursors()
if val is not None:
self._build_cursor()
def _clean_cursors(self):
setattr(THREAD_LOCAL, self._cursors_uname_, OrderedDict())
@property
def cursors(self):
return getattr(THREAD_LOCAL, self._cursors_uname_)
def _build_cursor(self):
rv = Cursor(self.connection)
self.cursors[id(rv.cursor)] = rv
return rv
def _get_or_build_free_cursor(self):
for handler in itervalues(self.cursors):
if handler.available:
return handler
return self._build_cursor()
@property
def cursor(self):
return self._get_or_build_free_cursor().cursor
def lock_cursor(self, cursor):
self.cursors[id(cursor)].lock()
def release_cursor(self, cursor):
self.cursors[id(cursor)].release()
def close_cursor(self, cursor):
cursor.close()
del self.cursors[id(cursor)]
def close(self, action='commit', really=True):
#: if we have an action (commit, rollback), try to execute it
succeeded = True
if action:
try:
if callable(action):
action(self)
else:
getattr(self, action)()
except:
#: connection had some problems, we want to drop it
succeeded = False
#: if we have pools, we should recycle the connection (but only when
# we succeded in `action`, if any and `len(pool)` is good)
if self.pool_size and succeeded:
GLOBAL_LOCKER.acquire()
pool = ConnectionPool.POOLS[self.uri]
if len(pool) < self.pool_size:
pool.append(self.connection)
really = False
GLOBAL_LOCKER.release()
#: closing the connection when we `really` want to, in particular:
# - when we had an exception running `action`
# - when we don't have pools
# - when we have pools but they're full
if really:
try:
self.close_connection()
except:
pass
#: always unset `connection` attribute
self.connection = None
@staticmethod
def close_all_instances(action):
""" to close cleanly databases in a multithreaded environment """
dbs = getattr(THREAD_LOCAL, '_pydal_db_instances_', {}).items()
for db_uid, db_group in dbs:
for db in db_group:
if hasattr(db, '_adapter'):
db._adapter.close(action)
getattr(THREAD_LOCAL, '_pydal_db_instances_', {}).clear()
getattr(THREAD_LOCAL, '_pydal_db_instances_zombie_', {}).clear()
if callable(action):
action(None)
return
def _find_work_folder(self):
self.folder = getattr(THREAD_LOCAL, '_pydal_folder_', '')
def after_connection_hook(self):
"""Hook for the after_connection parameter"""
if callable(self._after_connection):
self._after_connection(self)
self.after_connection()
def after_connection(self):
#this it is supposed to be overloaded by adapters
pass
def reconnect(self):
"""
Defines: `self.connection` and `self.cursor`
if `self.pool_size>0` it will try pull the connection from the pool
if the connection is not active (closed by db server) it will loop
if not `self.pool_size` or no active connections in pool makes a new one
"""
if getattr(THREAD_LOCAL, self._connection_uname_, None) is not None:
return
if not self.pool_size:
self.connection = self.connector()
self.after_connection_hook()
else:
uri = self.uri
POOLS = ConnectionPool.POOLS
while True:
GLOBAL_LOCKER.acquire()
if uri not in POOLS:
POOLS[uri] = []
if POOLS[uri]:
self.connection = POOLS[uri].pop()
GLOBAL_LOCKER.release()
try:
if self.check_active_connection:
self.test_connection()
break
except:
pass
else:
GLOBAL_LOCKER.release()
self.connection = self.connector()
self.after_connection_hook()
break
| bsd-3-clause | 647,975,468,857,402,200 | 31.805882 | 80 | 0.551013 | false |
pmav99/praktoras | checks.d/windows_service.py | 9 | 3049 | """ Collect status information for Windows services
"""
# project
from checks import AgentCheck
from checks.wmi_check import WinWMICheck
from utils.containers import hash_mutable
from utils.timeout import TimeoutException
class WindowsService(WinWMICheck):
STATE_TO_VALUE = {
'Stopped': AgentCheck.CRITICAL,
'Start Pending': AgentCheck.WARNING,
'Stop Pending': AgentCheck.WARNING,
'Running': AgentCheck.OK,
'Continue Pending': AgentCheck.WARNING,
'Pause Pending': AgentCheck.WARNING,
'Paused': AgentCheck.WARNING,
'Unknown': AgentCheck.UNKNOWN
}
NAMESPACE = "root\\CIMV2"
CLASS = "Win32_Service"
def __init__(self, name, init_config, agentConfig, instances):
WinWMICheck.__init__(self, name, init_config, agentConfig, instances)
def check(self, instance):
# Connect to the WMI provider
host = instance.get('host', "localhost")
user = instance.get('username', "")
password = instance.get('password', "")
services = instance.get('services', [])
instance_hash = hash_mutable(instance)
instance_key = self._get_instance_key(host, self.NAMESPACE, self.CLASS, instance_hash)
tags = [] if (host == "localhost" or host == ".") else [u'host:{0}'.format(host)]
if len(services) == 0:
raise Exception('No services defined in windows_service.yaml')
properties = ["Name", "State"]
filters = map(lambda x: {"Name": tuple(('=', x))}, services)
wmi_sampler = self._get_wmi_sampler(
instance_key,
self.CLASS, properties,
filters=filters,
host=host, namespace=self.NAMESPACE,
username=user, password=password
)
try:
# Sample, extract & submit metrics
wmi_sampler.sample()
except TimeoutException:
self.log.warning(
u"[WinService] WMI query timed out."
u" class={wmi_class} - properties={wmi_properties} -"
u" filters={filters} - tags={tags}".format(
wmi_class=self.CLASS, wmi_properties=properties,
filters=filters, tags=tags
)
)
else:
self._process_services(wmi_sampler, services, tags)
def _process_services(self, wmi_sampler, services, tags):
expected_services = set(services)
for wmi_obj in wmi_sampler:
service = wmi_obj['Name']
if service not in services:
continue
status = self.STATE_TO_VALUE.get(wmi_obj["state"], AgentCheck.UNKNOWN)
self.service_check("windows_service.state", status,
tags=tags + ['service:{0}'.format(service)])
expected_services.remove(service)
for service in expected_services:
self.service_check("windows_service.state", AgentCheck.CRITICAL,
tags=tags + ['service:{0}'.format(service)])
| bsd-3-clause | 7,398,474,776,349,262,000 | 36.641975 | 94 | 0.58675 | false |
av8ramit/tensorflow | tensorflow/contrib/copy_graph/python/util/copy_test.py | 112 | 3739 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.copy_graph.python.util.copy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.copy_graph.python.util import copy_elements
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
graph1 = ops.Graph()
graph2 = ops.Graph()
class CopyVariablesTest(test.TestCase):
def testVariableCopy(self):
with graph1.as_default():
#Define a Variable in graph1
some_var = variables.Variable(2)
#Initialize session
sess1 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess1)
#Make a copy of some_var in the defsult scope in graph2
copy1 = copy_elements.copy_variable_to_graph(some_var, graph2)
#Make another copy with different scope
copy2 = copy_elements.copy_variable_to_graph(some_var, graph2, "test_scope")
#Initialize both the copies
with graph2.as_default():
#Initialize Session
sess2 = session_lib.Session()
#Initialize the Variables
variables.global_variables_initializer().run(session=sess2)
#Ensure values in all three variables are the same
v1 = some_var.eval(session=sess1)
v2 = copy1.eval(session=sess2)
v3 = copy2.eval(session=sess2)
assert isinstance(copy1, variables.Variable)
assert isinstance(copy2, variables.Variable)
assert v1 == v2 == v3 == 2
class CopyOpsTest(test.TestCase):
def testOpsCopy(self):
with graph1.as_default():
#Initialize a basic expression y = ax + b
x = array_ops.placeholder("float")
a = variables.Variable(3.0)
b = constant_op.constant(4.0)
ax = math_ops.multiply(x, a)
y = math_ops.add(ax, b)
#Initialize session
sess1 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess1)
#First, initialize a as a Variable in graph2
a1 = copy_elements.copy_variable_to_graph(a, graph2)
#Initialize a1 in graph2
with graph2.as_default():
#Initialize session
sess2 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess2)
#Initialize a copy of y in graph2
y1 = copy_elements.copy_op_to_graph(y, graph2, [a1])
#Now that y has been copied, x must be copied too.
#Get that instance
x1 = copy_elements.get_copied_op(x, graph2)
#Compare values of y & y1 for a sample input
#and check if they match
v1 = y.eval({x: 5}, session=sess1)
v2 = y1.eval({x1: 5}, session=sess2)
assert v1 == v2
if __name__ == "__main__":
test.main()
| apache-2.0 | -514,260,967,706,795,900 | 32.383929 | 80 | 0.695641 | false |
jparyani/capnproto | doc/_plugins/capnp_lexer.py | 35 | 2103 | #! /usr/bin/env python
from pygments.lexer import RegexLexer
from pygments.token import *
class CapnpLexer(RegexLexer):
name = "Cap'n Proto lexer"
aliases = ['capnp']
filenames = ['*.capnp']
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|extends|in|of|on|as|with|from|fixed)\b',
Token.Keyword),
(r'[a-zA-Z0-9_.]+', Token.Name),
(r'[^#@=:$a-zA-Z0-9_]+', Text),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[[(]', Name.Class, 'parentype'),
(r'', Name.Class, '#pop')
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
(r'', Name.Class, '#pop')
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[[(]', Literal, 'parenexp'),
(r'', Literal, '#pop')
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
(r'', Literal, '#pop')
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[[(]', Name.Attribute, 'annexp'),
(r'', Name.Attribute, '#pop')
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
(r'', Name.Attribute, '#pop')
],
}
if __name__ == "__main__":
from setuptools import setup, find_packages
setup(name = "CapnpPygmentsLexer",
version = "0.1",
packages = find_packages(),
py_modules = [ 'capnp_lexer' ],
entry_points = {'pygments.lexers': 'capnp = capnp_lexer:CapnpLexer'})
| mit | -155,919,753,465,851,780 | 31.859375 | 114 | 0.410842 | false |
inveniosoftware/iugw2017 | 3-datamodels/custom-data-module/custom_data_module/providers.py | 1 | 1974 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""PID providers."""
from __future__ import absolute_import, print_function
from invenio_pidstore.providers.base import BaseProvider
from invenio_pidstore.models import PIDStatus
class CustomRecordProvider(BaseProvider):
"""Record identifier provider."""
pid_type = 'custid'
"""Type of persistent identifier."""
pid_provider = None
"""Provider name.
The provider name is not recorded in the PID since the provider does not
provide any additional features besides creation of record ids.
"""
default_status = PIDStatus.REGISTERED
"""Record UUIDs are registered immediately."""
@classmethod
def create(cls, object_type=None, object_uuid=None, **kwargs):
"""Create a new record identifier from the depoist PID value."""
assert 'pid_value' in kwargs
kwargs.setdefault('status', cls.default_status)
return super(CustomRecordProvider, cls).create(
object_type=object_type, object_uuid=object_uuid, **kwargs)
| gpl-3.0 | -4,090,832,047,793,225,000 | 35.555556 | 76 | 0.727964 | false |
PowerDNS/exabgp | dev/unittest2/connection.py | 6 | 1401 | #!/usr/bin/env python
# encoding: utf-8
"""
connection.py
Created by Thomas Mangin on 2013-07-13.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
import os
import sys
import unittest
from exabgp.util.od import od
def test ():
OPEN = ''.join([chr(int(_,16)) for _ in "FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF 00 1D 01 04 78 14 00 5A 52 DB 00 45 00".split()])
KEEP = ''.join([chr(int(_,16)) for _ in "FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF 00 00 04".split()])
from exabgp.reactor.network.outgoing import Outgoing
connection = Outgoing(1,'82.219.0.5','82.219.212.34')
writer=connection._writer(OPEN)
while writer() == False:
pass
writer=connection._writer(KEEP)
while writer() == False:
pass
reader=connection.reader()
for size,kind,header,body in reader:
if size: print od(header+body)
else: sys.stdout.write('-')
reader=connection.reader()
for size,kind,header,body in reader:
if size: print od(header+body)
else: sys.stdout.write('+')
connection.close()
class TestData (unittest.TestCase):
def test_1 (self):
if not os.environ.get('profile',False):
result = test()
if result: self.fail(result)
def test_2 (self):
if not not os.environ.get('profile',False):
cProfile.run('test()')
if __name__ == '__main__':
unittest.main()
# import cProfile
# print 'profiling'
# cProfile.run('unittest.main()','profile.info')
| bsd-3-clause | 6,275,132,405,746,301,000 | 22.35 | 139 | 0.673804 | false |
infoxchange/django-localflavor | localflavor/nz/forms.py | 4 | 4267 | # -*- coding: utf-8 -*-
"""
New Zealand specific form helpers
"""
from __future__ import unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from .nz_councils import NORTH_ISLAND_COUNCIL_CHOICES, SOUTH_ISLAND_COUNCIL_CHOICES
from .nz_provinces import PROVINCE_CHOICES
from .nz_regions import REGION_CHOICES
PHONE_08_RE = re.compile(r'^((0800\d{6})|(0800\w{6,10}))$')
PHONE_IN_RE = re.compile(r'^((0064|064|\+64|\+\+64)((\d{8})|(2\d{7,9})))$')
PHONE_NZ_RE = re.compile(r'^((0\d{8})|(02\d{7,9}))$')
BANK_ACCOUNT_NUMBER_RE = re.compile(r'^(\d{2})(\d{4})(\d{7})(\d{2,3})$')
class NZRegionSelect(Select):
"""
A select widget with list of New Zealand regions as its choices.
"""
def __init__(self, attrs=None):
super(NZRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class NZProvinceSelect(Select):
"""
A select widget with list of New Zealand provinces as its choices.
"""
def __init__(self, attrs=None):
super(NZProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class NZNorthIslandCouncilSelect(Select):
"""
A select widget with list of New Zealand North Island city and district councils as its choices.
"""
def __init__(self, attrs=None):
super(NZNorthIslandCouncilSelect, self).__init__(attrs, choices=NORTH_ISLAND_COUNCIL_CHOICES)
class NZSouthIslandCouncilSelect(Select):
"""
A select widget with list of New Zealand South Island city and district councils as its choices.
"""
def __init__(self, attrs=None):
super(NZSouthIslandCouncilSelect, self).__init__(attrs, choices=SOUTH_ISLAND_COUNCIL_CHOICES)
class NZPostCodeField(RegexField):
"""
A form field that validates its input as New Zealand postal code.
"""
default_error_messages = {
'invalid': _('Invalid post code.'),
}
def __init__(self, *args, **kwargs):
super(NZPostCodeField, self).__init__(r'^\d{4}$',
*args, **kwargs)
class NZPhoneNumberField(Field):
"""
A form field that validates its input as New Zealand phone number.
"""
default_error_messages = {'invalid': _('Invalid phone number.')}
def clean(self, value):
super(NZPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('(\(|\)|\s+|_|-)', '', smart_str(value))
value = re.sub('^(\+\+)', '00', smart_str(value))
value = re.sub('^(\+)', '00', smart_str(value))
phone_08_match = PHONE_08_RE.search(value)
if phone_08_match:
return '%s' % phone_08_match.group(0)
phone_nz_match = PHONE_NZ_RE.search(value)
if phone_nz_match:
return '%s' % phone_nz_match.group(0)
phone_in_match = PHONE_IN_RE.search(value)
if phone_in_match:
return '%s' % phone_in_match.group(0)
raise ValidationError(self.error_messages['invalid'])
class NZBankAccountNumberField(Field):
"""
A form field that validates its input as New Zealand bank account number.
Formats:
XX-XXXX-XXXXXXX-XX
XX-XXXX-XXXXXXX-XXX
Where:
* the first two digits is the bank ID
* the next four digits are the branch number where the account was opened
* the next 7 digits are the account numbers
* the last two or three digits define type of the account.
"""
default_error_messages = {
'invalid': _('Invalid bank account number.'),
}
def clean(self, value):
super(NZBankAccountNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('(\s+|-)', '', smart_str(value))
match = BANK_ACCOUNT_NUMBER_RE.search(value)
if match:
# normalize the last part
last = '0%s' % match.group(4) if len(match.group(4)) == 2 else match.group(4)
return '%s-%s-%s-%s' % (match.group(1),
match.group(2), match.group(3), last)
raise ValidationError(self.error_messages['invalid'])
| bsd-3-clause | -3,785,158,740,881,121,300 | 29.478571 | 101 | 0.627842 | false |
huanpc/IoT-1 | gui/controller/.venv/lib/python3.5/site-packages/rest_framework/test.py | 7 | 8635 | # -- coding: utf-8 --
# Note that we import as `DjangoRequestFactory` and `DjangoClient` in order
# to make it harder for the user to import the wrong thing without realizing.
from __future__ import unicode_literals
from django.conf import settings
from django.test import testcases
from django.test.client import Client as DjangoClient
from django.test.client import RequestFactory as DjangoRequestFactory
from django.test.client import ClientHandler
from django.utils import six
from django.utils.encoding import force_bytes
from django.utils.http import urlencode
from rest_framework.settings import api_settings
def force_authenticate(request, user=None, token=None):
request._force_auth_user = user
request._force_auth_token = token
class APIRequestFactory(DjangoRequestFactory):
renderer_classes_list = api_settings.TEST_REQUEST_RENDERER_CLASSES
default_format = api_settings.TEST_REQUEST_DEFAULT_FORMAT
def __init__(self, enforce_csrf_checks=False, **defaults):
self.enforce_csrf_checks = enforce_csrf_checks
self.renderer_classes = {}
for cls in self.renderer_classes_list:
self.renderer_classes[cls.format] = cls
super(APIRequestFactory, self).__init__(**defaults)
def _encode_data(self, data, format=None, content_type=None):
"""
Encode the data returning a two tuple of (bytes, content_type)
"""
if data is None:
return ('', content_type)
assert format is None or content_type is None, (
'You may not set both `format` and `content_type`.'
)
if content_type:
# Content type specified explicitly, treat data as a raw bytestring
ret = force_bytes(data, settings.DEFAULT_CHARSET)
else:
format = format or self.default_format
assert format in self.renderer_classes, (
"Invalid format '{0}'. Available formats are {1}. "
"Set TEST_REQUEST_RENDERER_CLASSES to enable "
"extra request formats.".format(
format,
', '.join(["'" + fmt + "'" for fmt in self.renderer_classes.keys()])
)
)
# Use format and render the data into a bytestring
renderer = self.renderer_classes[format]()
ret = renderer.render(data)
# Determine the content-type header from the renderer
content_type = "{0}; charset={1}".format(
renderer.media_type, renderer.charset
)
# Coerce text to bytes if required.
if isinstance(ret, six.text_type):
ret = bytes(ret.encode(renderer.charset))
return ret, content_type
def get(self, path, data=None, **extra):
r = {
'QUERY_STRING': urlencode(data or {}, doseq=True),
}
# Fix to support old behavior where you have the arguments in the url
# See #1461
if not data and '?' in path:
r['QUERY_STRING'] = path.split('?')[1]
r.update(extra)
return self.generic('GET', path, **r)
def post(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('POST', path, data, content_type, **extra)
def put(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('PUT', path, data, content_type, **extra)
def patch(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('PATCH', path, data, content_type, **extra)
def delete(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('DELETE', path, data, content_type, **extra)
def options(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('OPTIONS', path, data, content_type, **extra)
def request(self, **kwargs):
request = super(APIRequestFactory, self).request(**kwargs)
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
return request
class ForceAuthClientHandler(ClientHandler):
"""
A patched version of ClientHandler that can enforce authentication
on the outgoing requests.
"""
def __init__(self, *args, **kwargs):
self._force_user = None
self._force_token = None
super(ForceAuthClientHandler, self).__init__(*args, **kwargs)
def get_response(self, request):
# This is the simplest place we can hook into to patch the
# request object.
force_authenticate(request, self._force_user, self._force_token)
return super(ForceAuthClientHandler, self).get_response(request)
class APIClient(APIRequestFactory, DjangoClient):
def __init__(self, enforce_csrf_checks=False, **defaults):
super(APIClient, self).__init__(**defaults)
self.handler = ForceAuthClientHandler(enforce_csrf_checks)
self._credentials = {}
def credentials(self, **kwargs):
"""
Sets headers that will be used on every outgoing request.
"""
self._credentials = kwargs
def force_authenticate(self, user=None, token=None):
"""
Forcibly authenticates outgoing requests with the given
user and/or token.
"""
self.handler._force_user = user
self.handler._force_token = token
if user is None:
self.logout() # Also clear any possible session info if required
def request(self, **kwargs):
# Ensure that any credentials set get added to every request.
kwargs.update(self._credentials)
return super(APIClient, self).request(**kwargs)
def get(self, path, data=None, follow=False, **extra):
response = super(APIClient, self).get(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super(APIClient, self).post(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super(APIClient, self).put(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super(APIClient, self).patch(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super(APIClient, self).delete(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super(APIClient, self).options(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def logout(self):
self._credentials = {}
# Also clear any `force_authenticate`
self.handler._force_user = None
self.handler._force_token = None
if self.session:
super(APIClient, self).logout()
class APITransactionTestCase(testcases.TransactionTestCase):
client_class = APIClient
class APITestCase(testcases.TestCase):
client_class = APIClient
class APISimpleTestCase(testcases.SimpleTestCase):
client_class = APIClient
class APILiveServerTestCase(testcases.LiveServerTestCase):
client_class = APIClient
| mit | -7,925,660,871,083,471,000 | 36.543478 | 88 | 0.633353 | false |
shanot/imp | modules/multifit/test/test_connected_components.py | 2 | 1704 | import sys
import os
import IMP
import IMP.em
import IMP.test
import IMP.core
import IMP.atom
import IMP.multifit
class Tests(IMP.test.TestCase):
"""Test connected components """
def setUp(self):
"""Build test model and optimizer"""
IMP.test.TestCase.setUp(self)
IMP.set_log_level(IMP.VERBOSE) # SILENT)
def test_connected_components(self):
"""test connected components"""
for i in range(5):
# sample i populations
mdl = IMP.Model()
ps = []
# create a map of i components
for j in range(i + 1):
bb = IMP.algebra.BoundingBox3D(
IMP.algebra.Vector3D(
-1 * (j + 1),
-1 * (j + 1),
-1 * (j + 1)),
IMP.algebra.Vector3D(1 * (j + 1), 1 * (j + 1), 1 * (j + 1)))
for k in range(10):
p = IMP.Particle(mdl)
center = IMP.algebra.get_random_vector_in(bb) \
+ IMP.algebra.Vector3D(j * 20, j * 20, j * 20)
IMP.core.XYZR.setup_particle(p,
IMP.algebra.Sphere3D(center, 2))
IMP.atom.Mass.setup_particle(p, 1)
ps.append(p)
dmap = IMP.em.particles2density(ps, 10, 1)
con_comp = IMP.multifit.get_connected_components(dmap, 0.001, 0.5)
for c in con_comp:
for ind in c:
self.assertLess(ind, dmap.get_number_of_voxels())
self.assertEqual(len(con_comp), i + 1)
if __name__ == '__main__':
IMP.test.main()
| gpl-3.0 | -9,053,860,916,981,820,000 | 32.411765 | 80 | 0.474178 | false |
yunlongliukm/chm1_scripts | AlignmentReaders.py | 2 | 1464 | #!/usr/bin/env python
class Alignment:
def __init__(self):
self.qname = ""
self.tname = ""
self.qstat = 0
self.qend = 0
self.qstrand = 0
self.qlen = 0
self.tstart = 0
self.tend = 0
self.tstrand = 0
self.tlen = 0
self.score = 0
self.number = 0
self.identity = 0
def ToString(self):
members = ["qname", "tname", "qstart", "qend", "qstrand", "qlen", "tstart", "tend", "tstrand", "tlen", "score", "number", "identity"]
#return str(self.__dict__.values())
return " ".join([str(getattr(self,members[i])) for i in range(len(members))])
class M4Reader:
def __init__(self, filename):
self.fh = open(filename)
self.prev = None
def GetNext(self):
line = self.fh.readline()
if (line == ""):
return None
vals = line.split()
a = Alignment()
a.qname = vals[0]
a.tname = vals[1]
a.tstrand = int(vals[2])
a.qstrand = int(vals[3])
a.score = int(vals[4])
a.identity = float(vals[5])
a.tstart = int(vals[6])
a.tend = int(vals[7])
a.tlen = int(vals[8])
a.qstart = int(vals[9])
a.qend = int(vals[10])
a.qlen = int(vals[11])
if (self.prev is not None and self.prev.qname == a.qname):
a.number = self.prev.number + 1
self.prev = a
return a
| mit | 1,735,863,842,657,650,400 | 26.622642 | 141 | 0.495219 | false |
jriehl/numba | numba/roc/hsadrv/devices.py | 2 | 3256 | """
Expose each GPU device directly
"""
from __future__ import print_function, absolute_import, division
import functools
from numba import servicelib
from .driver import hsa as driver, Context as _Context
class _culist(object):
"""A thread local list of GPU instances
"""
def __init__(self):
self._lst = None
@property
def _gpus(self):
if not self._lst:
self._lst = self._init_gpus()
return self._lst
def _init_gpus(self):
gpus = []
for com in driver.components:
gpus.append(CU(com))
return gpus
def __getitem__(self, item):
return self._gpus[item]
def append(self, item):
return self._gpus.append(item)
def __len__(self):
return len(self._gpus)
def __nonzero__(self):
return bool(self._gpus)
def __iter__(self):
return iter(self._gpus)
__bool__ = __nonzero__
def reset(self):
for gpu in self:
gpu.reset()
@property
def current(self):
"""Get the current GPU object associated with the thread
"""
return _custack.top
cus = _culist()
del _culist
class CU(object):
def __init__(self, cu):
self._cu = cu
self._context = None
def __getattr__(self, key):
"""Redirect to self._gpu
"""
if key.startswith('_'):
raise AttributeError(key)
return getattr(self._cu, key)
def __repr__(self):
return repr(self._cu)
def associate_context(self):
"""Associate the context of this GPU to the running thread
"""
# No context was created for this GPU
if self._context is None:
self._context = self._cu.create_context()
return self._context
def __enter__(self):
self.associate_context()
_custack.push(self)
def __exit__(self, exc_type, exc_val, exc_tb):
assert _get_device() is self
self._context.pop()
_custack.pop()
def reset(self):
if self._context:
self._context.reset()
self._context = None
_cpu_context = None
def get_cpu_context():
global _cpu_context
if _cpu_context is None:
cpu_agent = [a for a in driver.agents if not a.is_component][0]
_cpu_context = _Context(cpu_agent)
return _cpu_context
def get_gpu(i):
return cus[i]
def get_num_gpus():
return len(cus)
_custack = servicelib.TLStack()
def _get_device(devnum=0):
"""Get the current device or use a device by device number.
"""
if not _custack:
_custack.push(get_gpu(devnum))
return _custack.top
def get_context(devnum=0):
"""Get the current device or use a device by device number, and
return the HSA context.
"""
return _get_device(devnum=devnum).associate_context()
def get_all_contexts():
return [get_context(i) for i in range(get_num_gpus())]
def require_context(fn):
"""
A decorator to ensure a context for the HSA subsystem
"""
@functools.wraps(fn)
def _require_cu_context(*args, **kws):
get_context()
return fn(*args, **kws)
return _require_cu_context
def reset():
cus.reset()
_custack.clear()
| bsd-2-clause | 2,244,699,453,681,832,400 | 19.738854 | 71 | 0.577088 | false |
prestoncarman/vxquery | vxquery-server/src/main/resources/scripts/cluster_cli.py | 11 | 3506 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, getopt, os
# Custom modules.
from cluster_actions import *
def main(argv):
action = ""
cluster_file_name = ""
deploy_path = ""
try:
opts, args = getopt.getopt(argv, "a:c:d:h", ["action=", "deploy_path="])
except getopt.GetoptError:
print 'The file options for cluster_cli.py were not correctly specified.'
print 'To see a full list of options try:'
print ' $ python cluster_cli.py -h'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'Options for pushing a benchmark:'
print ' -a (str) The action to perform (deploy, start, or stop).'
print ' -c The XML cluster configuration file.'
sys.exit()
elif opt in ('-a', "--action"):
# check if file exists.
if arg in ('deploy', 'start', 'stop', 'kill'):
action = arg
else:
print 'Error: Argument must be a string ("deploy", "start", "stop", or "kill") for --action (-a).'
sys.exit()
elif opt in ('-c', "--cluster"):
# check if file exists.
if os.path.exists(arg):
cluster_file_name = arg
else:
print 'Error: Argument must be a file name for --cluster (-c).'
sys.exit()
elif opt in ('-d', "--deploy_folder"):
# check if file exists.
if os.path.exists(arg):
if os.path.basename(arg) == "":
deploy_path = os.path.dirname(arg)
else:
deploy_path = arg
else:
print 'Error: Argument must be a file name for --deploy_folder (-d).'
sys.exit()
# Required fields to run the script.
if cluster_file_name == "" or not os.path.exists(cluster_file_name):
print 'Error: The cluster XML file option must be supplied: --cluster (-c).'
sys.exit()
# The action to take on the cluster.
cluster = ClusterActions(cluster_file_name)
if action == 'start':
cluster.start()
elif action == 'stop':
cluster.stop_cluster()
elif action == 'kill':
cluster.stop()
elif action == 'deploy':
if deploy_path != "":
cluster.deploy(deploy_path)
else:
print 'Error: The cluster cli must have a deploy_folder option when doing the deploy action: --deploy_folder (-d).'
sys.exit()
else:
print 'Error: The cluster cli must have an action option must be supplied: --action (-a).'
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 | 6,922,124,569,572,692,000 | 38.393258 | 127 | 0.577581 | false |
dyyi/moneybook | venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/fields.py | 200 | 5872 | from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| apache-2.0 | 3,371,072,089,198,396,000 | 31.988764 | 88 | 0.586683 | false |
thaumos/ansible | lib/ansible/modules/network/fortios/fortios_webfilter_content.py | 24 | 10670 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_content
short_description: Configure Web filter banned word table in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure webfilter feature and content category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
webfilter_content:
description:
- Configure Web filter banned word table.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
comment:
description:
- Optional comments.
entries:
description:
- Configure banned word entries.
suboptions:
action:
description:
- Block or exempt word when a match is found.
choices:
- block
- exempt
lang:
description:
- Language of banned word.
choices:
- western
- simch
- trach
- japanese
- korean
- french
- thai
- spanish
- cyrillic
name:
description:
- Banned word.
required: true
pattern-type:
description:
- "Banned word pattern type: wildcard pattern or Perl regular expression."
choices:
- wildcard
- regexp
score:
description:
- Score, to be applied every time the word appears on a web page (0 - 4294967295, default = 10).
status:
description:
- Enable/disable banned word.
choices:
- enable
- disable
id:
description:
- ID.
required: true
name:
description:
- Name of table.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure Web filter banned word table.
fortios_webfilter_content:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
webfilter_content:
state: "present"
comment: "Optional comments."
entries:
-
action: "block"
lang: "western"
name: "default_name_7"
pattern-type: "wildcard"
score: "9"
status: "enable"
id: "11"
name: "default_name_12"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_webfilter_content_data(json):
option_list = ['comment', 'entries', 'id',
'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def webfilter_content(data, fos):
vdom = data['vdom']
webfilter_content_data = data['webfilter_content']
filtered_data = filter_webfilter_content_data(webfilter_content_data)
if webfilter_content_data['state'] == "present":
return fos.set('webfilter',
'content',
data=filtered_data,
vdom=vdom)
elif webfilter_content_data['state'] == "absent":
return fos.delete('webfilter',
'content',
mkey=filtered_data['id'],
vdom=vdom)
def fortios_webfilter(data, fos):
login(data)
methodlist = ['webfilter_content']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"webfilter_content": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"comment": {"required": False, "type": "str"},
"entries": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["block", "exempt"]},
"lang": {"required": False, "type": "str",
"choices": ["western", "simch", "trach",
"japanese", "korean", "french",
"thai", "spanish", "cyrillic"]},
"name": {"required": True, "type": "str"},
"pattern-type": {"required": False, "type": "str",
"choices": ["wildcard", "regexp"]},
"score": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_webfilter(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,413,479,616,187,359,000 | 30.568047 | 124 | 0.515558 | false |
kemalakyol48/python-for-android | python-modules/twisted/twisted/names/test/test_names.py | 49 | 31329 | # -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.names.
"""
import socket, operator, copy
from twisted.trial import unittest
from twisted.internet import reactor, defer, error
from twisted.internet.defer import succeed
from twisted.names import client, server, common, authority, hosts, dns
from twisted.python import failure
from twisted.names.error import DNSFormatError, DNSServerError, DNSNameError
from twisted.names.error import DNSNotImplementedError, DNSQueryRefusedError
from twisted.names.error import DNSUnknownError
from twisted.names.dns import EFORMAT, ESERVER, ENAME, ENOTIMP, EREFUSED
from twisted.names.dns import Message
from twisted.names.client import Resolver
from twisted.names.test.test_client import StubPort
from twisted.python.compat import reduce
def justPayload(results):
return [r.payload for r in results[0]]
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# Yes, skip FileAuthority
common.ResolverBase.__init__(self)
self.soa, self.records = soa, records
soa_record = dns.Record_SOA(
mname = 'test-domain.com',
rname = 'root.test-domain.com',
serial = 100,
refresh = 1234,
minimum = 7654,
expire = 19283784,
retry = 15,
ttl=1
)
reverse_soa = dns.Record_SOA(
mname = '93.84.28.in-addr.arpa',
rname = '93.84.28.in-addr.arpa',
serial = 120,
refresh = 54321,
minimum = 382,
expire = 11193983,
retry = 30,
ttl=3
)
my_soa = dns.Record_SOA(
mname = 'my-domain.com',
rname = 'postmaster.test-domain.com',
serial = 130,
refresh = 12345,
minimum = 1,
expire = 999999,
retry = 100,
)
test_domain_com = NoFileAuthority(
soa = ('test-domain.com', soa_record),
records = {
'test-domain.com': [
soa_record,
dns.Record_A('127.0.0.1'),
dns.Record_NS('39.28.189.39'),
dns.Record_SPF('v=spf1 mx/30 mx:example.org/30 -all'),
dns.Record_SPF('v=spf1 +mx a:\0colo', '.example.com/28 -all not valid'),
dns.Record_MX(10, 'host.test-domain.com'),
dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know'),
dns.Record_CNAME('canonical.name.com'),
dns.Record_MB('mailbox.test-domain.com'),
dns.Record_MG('mail.group.someplace'),
dns.Record_TXT('A First piece of Text', 'a SecoNd piece'),
dns.Record_A6(0, 'ABCD::4321', ''),
dns.Record_A6(12, '0:0069::0', 'some.network.tld'),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net'),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?'),
dns.Record_MR('mail.redirect.or.whatever'),
dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box'),
dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com'),
dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text'),
dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP,
'\x12\x01\x16\xfe\xc1\x00\x01'),
dns.Record_NAPTR(100, 10, "u", "sip+E2U",
"!^.*$!sip:[email protected]!"),
dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF')],
'http.tcp.test-domain.com': [
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool')
],
'host.test-domain.com': [
dns.Record_A('123.242.1.5'),
dns.Record_A('0.255.0.255'),
],
'host-two.test-domain.com': [
#
# Python bug
# dns.Record_A('255.255.255.255'),
#
dns.Record_A('255.255.255.254'),
dns.Record_A('0.0.0.0')
],
'cname.test-domain.com': [
dns.Record_CNAME('test-domain.com')
],
'anothertest-domain.com': [
dns.Record_A('1.2.3.4')],
}
)
reverse_domain = NoFileAuthority(
soa = ('93.84.28.in-addr.arpa', reverse_soa),
records = {
'123.93.84.28.in-addr.arpa': [
dns.Record_PTR('test.host-reverse.lookup.com'),
reverse_soa
]
}
)
my_domain_com = NoFileAuthority(
soa = ('my-domain.com', my_soa),
records = {
'my-domain.com': [
my_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')
]
}
)
class ServerDNSTestCase(unittest.TestCase):
"""
Test cases for DNS server and client.
"""
def setUp(self):
self.factory = server.DNSServerFactory([
test_domain_com, reverse_domain, my_domain_com
], verbose=2)
p = dns.DNSDatagramProtocol(self.factory)
while 1:
listenerTCP = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
# It's simpler to do the stop listening with addCleanup,
# even though we might not end up using this TCP port in
# the test (if the listenUDP below fails). Cleaning up
# this TCP port sooner than "cleanup time" would mean
# adding more code to keep track of the Deferred returned
# by stopListening.
self.addCleanup(listenerTCP.stopListening)
port = listenerTCP.getHost().port
try:
listenerUDP = reactor.listenUDP(port, p, interface="127.0.0.1")
except error.CannotListenError:
pass
else:
self.addCleanup(listenerUDP.stopListening)
break
self.listenerTCP = listenerTCP
self.listenerUDP = listenerUDP
self.resolver = client.Resolver(servers=[('127.0.0.1', port)])
def tearDown(self):
"""
Clean up any server connections associated with the
L{DNSServerFactory} created in L{setUp}
"""
# It'd be great if DNSServerFactory had a method that
# encapsulated this task. At least the necessary data is
# available, though.
for conn in self.factory.connections[:]:
conn.transport.loseConnection()
def namesTest(self, d, r):
self.response = None
def setDone(response):
self.response = response
def checkResults(ignored):
if isinstance(self.response, failure.Failure):
raise self.response
results = justPayload(self.response)
assert len(results) == len(r), "%s != %s" % (map(str, results), map(str, r))
for rec in results:
assert rec in r, "%s not in %s" % (rec, map(str, r))
d.addBoth(setDone)
d.addCallback(checkResults)
return d
def testAddressRecord1(self):
"""Test simple DNS 'A' record queries"""
return self.namesTest(
self.resolver.lookupAddress('test-domain.com'),
[dns.Record_A('127.0.0.1', ttl=19283784)]
)
def testAddressRecord2(self):
"""Test DNS 'A' record queries with multiple answers"""
return self.namesTest(
self.resolver.lookupAddress('host.test-domain.com'),
[dns.Record_A('123.242.1.5', ttl=19283784), dns.Record_A('0.255.0.255', ttl=19283784)]
)
def testAddressRecord3(self):
"""Test DNS 'A' record queries with edge cases"""
return self.namesTest(
self.resolver.lookupAddress('host-two.test-domain.com'),
[dns.Record_A('255.255.255.254', ttl=19283784), dns.Record_A('0.0.0.0', ttl=19283784)]
)
def testAuthority(self):
"""Test DNS 'SOA' record queries"""
return self.namesTest(
self.resolver.lookupAuthority('test-domain.com'),
[soa_record]
)
def testMailExchangeRecord(self):
"""Test DNS 'MX' record queries"""
return self.namesTest(
self.resolver.lookupMailExchange('test-domain.com'),
[dns.Record_MX(10, 'host.test-domain.com', ttl=19283784)]
)
def testNameserver(self):
"""Test DNS 'NS' record queries"""
return self.namesTest(
self.resolver.lookupNameservers('test-domain.com'),
[dns.Record_NS('39.28.189.39', ttl=19283784)]
)
def testHINFO(self):
"""Test DNS 'HINFO' record queries"""
return self.namesTest(
self.resolver.lookupHostInfo('test-domain.com'),
[dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know', ttl=19283784)]
)
def testPTR(self):
"""Test DNS 'PTR' record queries"""
return self.namesTest(
self.resolver.lookupPointer('123.93.84.28.in-addr.arpa'),
[dns.Record_PTR('test.host-reverse.lookup.com', ttl=11193983)]
)
def testCNAME(self):
"""Test DNS 'CNAME' record queries"""
return self.namesTest(
self.resolver.lookupCanonicalName('test-domain.com'),
[dns.Record_CNAME('canonical.name.com', ttl=19283784)]
)
def testCNAMEAdditional(self):
"""Test additional processing for CNAME records"""
return self.namesTest(
self.resolver.lookupAddress('cname.test-domain.com'),
[dns.Record_CNAME('test-domain.com', ttl=19283784), dns.Record_A('127.0.0.1', ttl=19283784)]
)
def testMB(self):
"""Test DNS 'MB' record queries"""
return self.namesTest(
self.resolver.lookupMailBox('test-domain.com'),
[dns.Record_MB('mailbox.test-domain.com', ttl=19283784)]
)
def testMG(self):
"""Test DNS 'MG' record queries"""
return self.namesTest(
self.resolver.lookupMailGroup('test-domain.com'),
[dns.Record_MG('mail.group.someplace', ttl=19283784)]
)
def testMR(self):
"""Test DNS 'MR' record queries"""
return self.namesTest(
self.resolver.lookupMailRename('test-domain.com'),
[dns.Record_MR('mail.redirect.or.whatever', ttl=19283784)]
)
def testMINFO(self):
"""Test DNS 'MINFO' record queries"""
return self.namesTest(
self.resolver.lookupMailboxInfo('test-domain.com'),
[dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box', ttl=19283784)]
)
def testSRV(self):
"""Test DNS 'SRV' record queries"""
return self.namesTest(
self.resolver.lookupService('http.tcp.test-domain.com'),
[dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl=19283784)]
)
def testAFSDB(self):
"""Test DNS 'AFSDB' record queries"""
return self.namesTest(
self.resolver.lookupAFSDatabase('test-domain.com'),
[dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com', ttl=19283784)]
)
def testRP(self):
"""Test DNS 'RP' record queries"""
return self.namesTest(
self.resolver.lookupResponsibility('test-domain.com'),
[dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text', ttl=19283784)]
)
def testTXT(self):
"""Test DNS 'TXT' record queries"""
return self.namesTest(
self.resolver.lookupText('test-domain.com'),
[dns.Record_TXT('A First piece of Text', 'a SecoNd piece', ttl=19283784),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?', ttl=19283784)]
)
def test_spf(self):
"""
L{DNSServerFactory} can serve I{SPF} resource records.
"""
return self.namesTest(
self.resolver.lookupSenderPolicy('test-domain.com'),
[dns.Record_SPF('v=spf1 mx/30 mx:example.org/30 -all', ttl=19283784),
dns.Record_SPF('v=spf1 +mx a:\0colo', '.example.com/28 -all not valid', ttl=19283784)]
)
def testWKS(self):
"""Test DNS 'WKS' record queries"""
return self.namesTest(
self.resolver.lookupWellKnownServices('test-domain.com'),
[dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP, '\x12\x01\x16\xfe\xc1\x00\x01', ttl=19283784)]
)
def testSomeRecordsWithTTLs(self):
result_soa = copy.copy(my_soa)
result_soa.ttl = my_soa.expire
return self.namesTest(
self.resolver.lookupAllRecords('my-domain.com'),
[result_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')]
)
def testAAAA(self):
"""Test DNS 'AAAA' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupIPV6Address('test-domain.com'),
[dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF', ttl=19283784)]
)
def testA6(self):
"""Test DNS 'A6' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupAddress6('test-domain.com'),
[dns.Record_A6(0, 'ABCD::4321', '', ttl=19283784),
dns.Record_A6(12, '0:0069::0', 'some.network.tld', ttl=19283784),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net', ttl=19283784)]
)
def test_zoneTransfer(self):
"""
Test DNS 'AXFR' queries (Zone transfer)
"""
default_ttl = soa_record.expire
results = [copy.copy(r) for r in reduce(operator.add, test_domain_com.records.values())]
for r in results:
if r.ttl is None:
r.ttl = default_ttl
return self.namesTest(
self.resolver.lookupZone('test-domain.com').addCallback(lambda r: (r[0][:-1],)),
results
)
def testSimilarZonesDontInterfere(self):
"""Tests that unrelated zones don't mess with each other."""
return self.namesTest(
self.resolver.lookupAddress("anothertest-domain.com"),
[dns.Record_A('1.2.3.4', ttl=19283784)]
)
def test_NAPTR(self):
"""
Test DNS 'NAPTR' record queries.
"""
return self.namesTest(
self.resolver.lookupNamingAuthorityPointer('test-domain.com'),
[dns.Record_NAPTR(100, 10, "u", "sip+E2U",
"!^.*$!sip:[email protected]!",
ttl=19283784)])
class DNSServerFactoryTests(unittest.TestCase):
"""
Tests for L{server.DNSServerFactory}.
"""
def _messageReceivedTest(self, methodName, message):
"""
Assert that the named method is called with the given message when
it is passed to L{DNSServerFactory.messageReceived}.
"""
# Make it appear to have some queries so that
# DNSServerFactory.allowQuery allows it.
message.queries = [None]
receivedMessages = []
def fakeHandler(message, protocol, address):
receivedMessages.append((message, protocol, address))
class FakeProtocol(object):
def writeMessage(self, message):
pass
protocol = FakeProtocol()
factory = server.DNSServerFactory(None)
setattr(factory, methodName, fakeHandler)
factory.messageReceived(message, protocol)
self.assertEqual(receivedMessages, [(message, protocol, None)])
def test_notifyMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode
of C{OP_NOTIFY} on to L{DNSServerFactory.handleNotify}.
"""
# RFC 1996, section 4.5
opCode = 4
self._messageReceivedTest('handleNotify', Message(opCode=opCode))
def test_updateMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode
of C{OP_UPDATE} on to L{DNSServerFactory.handleOther}.
This may change if the implementation ever covers update messages.
"""
# RFC 2136, section 1.3
opCode = 5
self._messageReceivedTest('handleOther', Message(opCode=opCode))
def test_connectionTracking(self):
"""
The C{connectionMade} and C{connectionLost} methods of
L{DNSServerFactory} cooperate to keep track of all
L{DNSProtocol} objects created by a factory which are
connected.
"""
protoA, protoB = object(), object()
factory = server.DNSServerFactory()
factory.connectionMade(protoA)
self.assertEqual(factory.connections, [protoA])
factory.connectionMade(protoB)
self.assertEqual(factory.connections, [protoA, protoB])
factory.connectionLost(protoA)
self.assertEqual(factory.connections, [protoB])
factory.connectionLost(protoB)
self.assertEqual(factory.connections, [])
class HelperTestCase(unittest.TestCase):
def testSerialGenerator(self):
f = self.mktemp()
a = authority.getSerial(f)
for i in range(20):
b = authority.getSerial(f)
self.failUnless(a < b)
a = b
class AXFRTest(unittest.TestCase):
def setUp(self):
self.results = None
self.d = defer.Deferred()
self.d.addCallback(self._gotResults)
self.controller = client.AXFRController('fooby.com', self.d)
self.soa = dns.RRHeader(name='fooby.com', type=dns.SOA, cls=dns.IN, ttl=86400, auth=False,
payload=dns.Record_SOA(mname='fooby.com',
rname='hooj.fooby.com',
serial=100,
refresh=200,
retry=300,
expire=400,
minimum=500,
ttl=600))
self.records = [
self.soa,
dns.RRHeader(name='fooby.com', type=dns.NS, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.MX, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_MX(preference=10, exchange='mail.mv3d.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.A, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_A(address='64.123.27.105', ttl=700)),
self.soa
]
def _makeMessage(self):
# hooray they all have the same message format
return dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1, rCode=0, trunc=0, maxSize=0)
def testBindAndTNamesStyle(self):
# Bind style = One big single message
m = self._makeMessage()
m.queries = [dns.Query('fooby.com', dns.AXFR, dns.IN)]
m.answers = self.records
self.controller.messageReceived(m, None)
self.assertEquals(self.results, self.records)
def _gotResults(self, result):
self.results = result
def testDJBStyle(self):
# DJB style = message per record
records = self.records[:]
while records:
m = self._makeMessage()
m.queries = [] # DJB *doesn't* specify any queries.. hmm..
m.answers = [records.pop(0)]
self.controller.messageReceived(m, None)
self.assertEquals(self.results, self.records)
class HostsTestCase(unittest.TestCase):
def setUp(self):
f = open('EtcHosts', 'w')
f.write('''
1.1.1.1 EXAMPLE EXAMPLE.EXAMPLETHING
1.1.1.2 HOOJY
::1 ip6thingy
''')
f.close()
self.resolver = hosts.Resolver('EtcHosts')
def testGetHostByName(self):
data = [('EXAMPLE', '1.1.1.1'),
('EXAMPLE.EXAMPLETHING', '1.1.1.1'),
('HOOJY', '1.1.1.2'),
]
ds = [self.resolver.getHostByName(n).addCallback(self.assertEqual, ip)
for n, ip in data]
return defer.gatherResults(ds)
def testLookupAddress(self):
d = self.resolver.lookupAddress('HOOJY')
d.addCallback(lambda x: self.assertEqual(x[0][0].payload.dottedQuad(),
'1.1.1.2'))
return d
def testIPv6(self):
d = self.resolver.lookupIPV6Address('ip6thingy')
d.addCallback(self.assertEqual, '::1')
return d
testIPv6.skip = 'IPv6 support is not in our hosts resolver yet'
def testNotImplemented(self):
return self.assertFailure(self.resolver.lookupMailExchange('EXAMPLE'),
NotImplementedError)
def testQuery(self):
d = self.resolver.query(dns.Query('EXAMPLE'))
d.addCallback(lambda x: self.assertEqual(x[0][0].payload.dottedQuad(),
'1.1.1.1'))
return d
def testNotFound(self):
return self.assertFailure(self.resolver.lookupAddress('foueoa'),
dns.DomainError)
def test_searchFileFor(self):
"""
L{searchFileFor} parses hosts(5) files and returns the address for
the given name, or C{None} if the name is not found.
"""
tmp = self.mktemp()
f = open(tmp, 'w')
f.write('127.0.1.1 helmut.example.org helmut\n')
f.write('# a comment\n')
f.write('::1 localhost ip6-localhost ip6-loopback\n')
f.close()
self.assertEquals(hosts.searchFileFor(tmp, 'helmut'), '127.0.1.1')
self.assertEquals(hosts.searchFileFor(tmp, 'ip6-localhost'), '::1')
self.assertIdentical(hosts.searchFileFor(tmp, 'blah'), None)
class FakeDNSDatagramProtocol(object):
def __init__(self):
self.queries = []
self.transport = StubPort()
def query(self, address, queries, timeout=10, id=None):
self.queries.append((address, queries, timeout, id))
return defer.fail(dns.DNSQueryTimeoutError(queries))
def removeResend(self, id):
# Ignore this for the time being.
pass
class RetryLogic(unittest.TestCase):
testServers = [
'1.2.3.4',
'4.3.2.1',
'a.b.c.d',
'z.y.x.w']
def testRoundRobinBackoff(self):
addrs = [(x, 53) for x in self.testServers]
r = client.Resolver(resolv=None, servers=addrs)
r.protocol = proto = FakeDNSDatagramProtocol()
return r.lookupAddress("foo.example.com"
).addCallback(self._cbRoundRobinBackoff
).addErrback(self._ebRoundRobinBackoff, proto
)
def _cbRoundRobinBackoff(self, result):
raise unittest.FailTest("Lookup address succeeded, should have timed out")
def _ebRoundRobinBackoff(self, failure, fakeProto):
failure.trap(defer.TimeoutError)
# Assert that each server is tried with a particular timeout
# before the timeout is increased and the attempts are repeated.
for t in (1, 3, 11, 45):
tries = fakeProto.queries[:len(self.testServers)]
del fakeProto.queries[:len(self.testServers)]
tries.sort()
expected = list(self.testServers)
expected.sort()
for ((addr, query, timeout, id), expectedAddr) in zip(tries, expected):
self.assertEquals(addr, (expectedAddr, 53))
self.assertEquals(timeout, t)
self.failIf(fakeProto.queries)
class ResolvConfHandling(unittest.TestCase):
def testMissing(self):
resolvConf = self.mktemp()
r = client.Resolver(resolv=resolvConf)
self.assertEquals(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
def testEmpty(self):
resolvConf = self.mktemp()
fObj = file(resolvConf, 'w')
fObj.close()
r = client.Resolver(resolv=resolvConf)
self.assertEquals(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
class FilterAnswersTests(unittest.TestCase):
"""
Test L{twisted.names.client.Resolver.filterAnswers}'s handling of various
error conditions it might encounter.
"""
def setUp(self):
# Create a resolver pointed at an invalid server - we won't be hitting
# the network in any of these tests.
self.resolver = Resolver(servers=[('0.0.0.0', 0)])
def test_truncatedMessage(self):
"""
Test that a truncated message results in an equivalent request made via
TCP.
"""
m = Message(trunc=True)
m.addQuery('example.com')
def queryTCP(queries):
self.assertEqual(queries, m.queries)
response = Message()
response.answers = ['answer']
response.authority = ['authority']
response.additional = ['additional']
return succeed(response)
self.resolver.queryTCP = queryTCP
d = self.resolver.filterAnswers(m)
d.addCallback(
self.assertEqual, (['answer'], ['authority'], ['additional']))
return d
def _rcodeTest(self, rcode, exc):
m = Message(rCode=rcode)
err = self.resolver.filterAnswers(m)
err.trap(exc)
def test_formatError(self):
"""
Test that a message with a result code of C{EFORMAT} results in a
failure wrapped around L{DNSFormatError}.
"""
return self._rcodeTest(EFORMAT, DNSFormatError)
def test_serverError(self):
"""
Like L{test_formatError} but for C{ESERVER}/L{DNSServerError}.
"""
return self._rcodeTest(ESERVER, DNSServerError)
def test_nameError(self):
"""
Like L{test_formatError} but for C{ENAME}/L{DNSNameError}.
"""
return self._rcodeTest(ENAME, DNSNameError)
def test_notImplementedError(self):
"""
Like L{test_formatError} but for C{ENOTIMP}/L{DNSNotImplementedError}.
"""
return self._rcodeTest(ENOTIMP, DNSNotImplementedError)
def test_refusedError(self):
"""
Like L{test_formatError} but for C{EREFUSED}/L{DNSQueryRefusedError}.
"""
return self._rcodeTest(EREFUSED, DNSQueryRefusedError)
def test_refusedErrorUnknown(self):
"""
Like L{test_formatError} but for an unrecognized error code and
L{DNSUnknownError}.
"""
return self._rcodeTest(EREFUSED + 1, DNSUnknownError)
class AuthorityTests(unittest.TestCase):
"""
Tests for the basic response record selection code in L{FileAuthority}
(independent of its fileness).
"""
def test_recordMissing(self):
"""
If a L{FileAuthority} has a zone which includes an I{NS} record for a
particular name and that authority is asked for another record for the
same name which does not exist, the I{NS} record is not included in the
authority section of the response.
"""
authority = NoFileAuthority(
soa=(str(soa_record.mname), soa_record),
records={
str(soa_record.mname): [
soa_record,
dns.Record_NS('1.2.3.4'),
]})
d = authority.lookupAddress(str(soa_record.mname))
result = []
d.addCallback(result.append)
answer, authority, additional = result[0]
self.assertEquals(answer, [])
self.assertEquals(
authority, [
dns.RRHeader(
str(soa_record.mname), soa_record.TYPE,
ttl=soa_record.expire, payload=soa_record,
auth=True)])
self.assertEquals(additional, [])
def _referralTest(self, method):
"""
Create an authority and make a request against it. Then verify that the
result is a referral, including no records in the answers or additional
sections, but with an I{NS} record in the authority section.
"""
subdomain = 'example.' + str(soa_record.mname)
nameserver = dns.Record_NS('1.2.3.4')
authority = NoFileAuthority(
soa=(str(soa_record.mname), soa_record),
records={
subdomain: [
nameserver,
]})
d = getattr(authority, method)(subdomain)
result = []
d.addCallback(result.append)
answer, authority, additional = result[0]
self.assertEquals(answer, [])
self.assertEquals(
authority, [dns.RRHeader(
subdomain, dns.NS, ttl=soa_record.expire,
payload=nameserver, auth=False)])
self.assertEquals(additional, [])
def test_referral(self):
"""
When an I{NS} record is found for a child zone, it is included in the
authority section of the response. It is marked as non-authoritative if
the authority is not also authoritative for the child zone (RFC 2181,
section 6.1).
"""
self._referralTest('lookupAddress')
def test_allRecordsReferral(self):
"""
A referral is also generated for a request of type C{ALL_RECORDS}.
"""
self._referralTest('lookupAllRecords')
class NoInitialResponseTestCase(unittest.TestCase):
def test_no_answer(self):
"""
If a request returns a L{dns.NS} response, but we can't connect to the
given server, the request fails with the error returned at connection.
"""
def query(self, *args):
# Pop from the message list, so that it blows up if more queries
# are run than expected.
return succeed(messages.pop(0))
def queryProtocol(self, *args, **kwargs):
return defer.fail(socket.gaierror("Couldn't connect"))
resolver = Resolver(servers=[('0.0.0.0', 0)])
resolver._query = query
messages = []
# Let's patch dns.DNSDatagramProtocol.query, as there is no easy way to
# customize it.
self.patch(dns.DNSDatagramProtocol, "query", queryProtocol)
records = [
dns.RRHeader(name='fooba.com', type=dns.NS, cls=dns.IN, ttl=700,
auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com',
ttl=700))]
m = dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1,
rCode=0, trunc=0, maxSize=0)
m.answers = records
messages.append(m)
return self.assertFailure(
resolver.getHostByName("fooby.com"), socket.gaierror)
| apache-2.0 | -5,313,144,541,597,137,000 | 33.926421 | 110 | 0.574388 | false |
openconnectome/m2g | MR-OCP/MROCPdjango/computation/plotting/HBMPlot.py | 2 | 14895 | #!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Disa Mhembere, Johns Hopkins University
# Separated: 10/2/2012
# Plot all .np arrays in a common dir on the same axis & save
# 1 indexed
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pylab as pl
import numpy as np
import os
import sys
from glob import glob
import argparse
import scipy
from scipy import interpolate
import inspect
import csv
# Issues: Done nothing with MAD
def lineno():
'''
Get current line number
'''
return str(inspect.getframeinfo(inspect.currentframe())[1])
def csvtodict(fn ='/home/disa/code/mrn_covariates_n120-v4.csv', char = 'class'):
if char == 'class':
col = 4
elif char == 'gender':
col = 2
reader = csv.reader(open(fn, 'rb'))
outdict = dict()
for row in reader:
outdict[row[0].strip()] = row[col].strip()
#print row[0] ,'TYPE' ,outdict[row[0]]
#import pdb; pdb.set_trace()
return outdict
def pickprintcolor(charDict, arrfn):
'''
charDict: dict
'''
if (charDict[(arrfn.split('/')[-1]).split('_')[0]] == '0'):
plot_color = 'grey'
elif (charDict[(arrfn.split('/')[-1]).split('_')[0]] == '1'):
plot_color = 'blue'
elif (charDict[(arrfn.split('/')[-1]).split('_')[0]] == '2'):
plot_color = 'green'
else:
print "[ERROR]: %s, no match on subject type" % lineno()
return plot_color
def plotInvDist(invDir, pngName, numBins =100):
subj_types = csvtodict(char = 'class') # load up subject types
# ClustCoeff Degree Eigen MAD numEdges.npy ScanStat Triangle
MADdir = "MAD"
ccDir = "ClustCoeff"
DegDir = "Degree"
EigDir = "Eigen/values"
SS1dir = "ScanStat1"
triDir = "Triangle"
invDirs = [triDir, ccDir, SS1dir, DegDir ]
if not os.path.exists(invDir):
print "%s does not exist" % invDir
sys.exit(1)
pl.figure(2)
fig_gl, axes = pl.subplots(nrows=3, ncols=2)
for idx, drcty in enumerate (invDirs):
for arrfn in glob(os.path.join(invDir, drcty,'*.npy')):
try:
arr = np.load(arrfn)
arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "[ERROR]: Line %s: Invariant file not found %s" % (lineno(),arrfn)
pl.figure(1)
n, bins, patches = pl.hist(arr, bins=numBins , range=None, normed=False, weights=None, cumulative=False, \
bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
rwidth=None, log=False, color=None, label=None, hold=None)
n = np.append(n,0)
n = n/float(sum(n))
fig = pl.figure(2)
fig.subplots_adjust(hspace=.5)
ax = pl.subplot(3,2,idx+1)
#if idx == 0:
# plt.axis([0, 35, 0, 0.04])
# ax.set_yticks(scipy.arange(0,0.04,0.01))
#if idx == 1 or idx == 2:
# ax.set_yticks(scipy.arange(0,0.03,0.01))
#if idx == 3:
# ax.set_yticks(scipy.arange(0,0.04,0.01))
# Interpolation
f = interpolate.interp1d(bins, n, kind='cubic')
x = np.arange(bins[0],bins[-1],0.03) # vary linspc
interp = f(x)
ltz = interp < 0
interp[ltz] = 0
plot_color = pickprintcolor(subj_types, arrfn)
#pl.plot(x, interp, color = plot_color, linewidth=1)
pl.plot(interp, color = plot_color, linewidth=1)
if idx == 0:
pl.ylabel('Probability')
pl.xlabel('Log Number of Local Triangles')
if idx == 1:
#pl.ylabel('Probability') #**
pl.xlabel('Log Local Clustering Coefficient')
if idx == 2:
pl.ylabel('Probability')
pl.xlabel('Log Scan Statistic 1')
if idx == 3:
#pl.ylabel('Probability') #**
pl.xlabel('Log Degree')
''' Eigenvalues '''
ax = pl.subplot(3,2,5)
ax.set_yticks(scipy.arange(0,16,4))
for eigValInstance in glob(os.path.join(invDir, EigDir,"*.npy")):
try:
eigv = np.load(eigValInstance)
except:
print "Eigenvalue array"
n = len(eigv)
sa = (np.sort(eigv)[::-1])
plot_color = pickprintcolor(subj_types, eigValInstance)
pl.plot(range(1,n+1), sa/10000, color=plot_color)
pl.ylabel('Magnitude ($X 10^4$) ')
pl.xlabel('Eigenvalue Rank')
''' Edges '''
arrfn = os.path.join(invDir, 'Globals/numEdges.npy')
try:
arr = np.load(arrfn)
arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "[ERROR]: Line %s: Invariant file not found %s" % (lineno(),arrfn)
pl.figure(1)
n, bins, patches = pl.hist(arr, bins=10 , range=None, normed=False, weights=None, cumulative=False, \
bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
rwidth=None, log=False, color=None, label=None, hold=None)
n = np.append(n,0)
fig = pl.figure(2)
ax = pl.subplot(3,2,6)
ax.set_xticks(scipy.arange(17.2,18.1,0.2))
f = interpolate.interp1d(bins, n, kind='cubic')
x = np.arange(bins[0],bins[-1],0.01) # vary linspc
interp = f(x)
ltz = interp < 0
interp[ltz] = 0
pl.plot(x, interp,color ='grey' ,linewidth=1)
pl.ylabel('Frequency')
pl.xlabel('Log Global Edge Number')
pl.savefig(pngName+'.pdf')
#################################################
##################################################
##################################################
def plotstdmean(invDir, pngName, numBins =100):
subj_types = csvtodict() # load up subject types
# ClustCoeff Degree Eigen MAD numEdges.npy ScanStat Triangle
MADdir = "MAD"
ccDir = "ClustCoeff"
DegDir = "Degree"
EigDir = "Eigen"
SS1dir = "ScanStat1"
triDir = "Triangle"
invDirs = [triDir, ccDir, SS1dir, DegDir ]
if not os.path.exists(invDir):
print "%s does not exist" % invDir
sys.exit(1)
pl.figure(2)
fig_gl, axes = pl.subplots(nrows=3, ncols=2)
fig_gl.tight_layout()
for idx, drcty in enumerate (invDirs):
mean_arr = []
stddev_arr = []
ones_mean = []
twos_mean = []
zeros_mean = []
ones_std = []
twos_std = []
zeros_std = []
for arrfn in glob(os.path.join(invDir, drcty,'*.npy')):
try:
arr = np.load(arrfn)
arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "[ERROR]: Line %s: Invariant file not found %s" % (lineno(),arrfn)
pl.figure(1)
n, bins, patches = pl.hist(arr, bins=numBins , range=None, normed=False, weights=None, cumulative=False, \
bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
rwidth=None, log=False, color=None, label=None, hold=None)
n = np.append(n,0)
n = n/float(sum(n))
fig = pl.figure(2)
fig.subplots_adjust(hspace=.5)
nrows=5
ncols=4
ax = pl.subplot(nrows,ncols,idx+1)
if idx == 0:
plt.axis([0, 35, 0, 0.04])
ax.set_yticks(scipy.arange(0,0.04,0.01))
if idx == 1 or idx == 2:
ax.set_yticks(scipy.arange(0,0.03,0.01))
if idx == 3:
ax.set_yticks(scipy.arange(0,0.04,0.01))
# Interpolation
f = interpolate.interp1d(bins, n, kind='cubic')
x = np.arange(bins[0],bins[-1],0.03) # vary linspc
interp = f(x)
ltz = interp < 0
interp[ltz] = 0
import pdb; pdb.set_trace()
'''
pl.plot(x, interp, color = plot_color, linewidth=1)
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '0'):
zeros_mean.append(arr.mean())
zeros_std.append(arr.std())
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '1'):
ones_mean.append(arr.mean())
ones_std.append(arr.std())
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '2'):
twos_mean.append(arr.mean())
twos_std.append(arr.std())
'''
plot_color = pickprintcolor(subj_types, arrfn)
if idx == 0:
pl.ylabel('Probability')
pl.xlabel('Log Number of Local Triangles')
if idx == 1:
#pl.ylabel('Probability') #**
pl.xlabel('Log Local Clustering Coefficient')
if idx == 2:
pl.ylabel('Probability')
pl.xlabel('Log Scan Statistic 1')
if idx == 3:
#pl.ylabel('Probability') #**
pl.xlabel('Log Degree')
''' Eigenvalues '''
ax = pl.subplot(3,2,5)
ax.set_yticks(scipy.arange(0,16,4))
for eigValInstance in glob(os.path.join(invDir, EigDir,"*.npy")):
try:
eigv = np.load(eigValInstance)
except:
print "Eigenvalue array"
n = len(eigv)
sa = (np.sort(eigv)[::-1])
plot_color = pickprintcolor(subj_types, eigValInstance)
pl.plot(range(1,n+1), sa/10000, color=plot_color)
pl.ylabel('Magnitude ($X 10^4$) ')
pl.xlabel('eigenvalue rank')
''' Edges '''
arrfn = os.path.join(invDir, 'Globals/numEdges.npy')
try:
arr = np.load(arrfn)
arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "[ERROR]: Line %s: Invariant file not found %s" % (lineno(),arrfn)
pl.figure(1)
n, bins, patches = pl.hist(arr, bins=10 , range=None, normed=False, weights=None, cumulative=False, \
bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
rwidth=None, log=False, color=None, label=None, hold=None)
n = np.append(n,0)
fig = pl.figure(2)
ax = pl.subplot(3,2,6)
ax.set_xticks(scipy.arange(17.2,18.1,0.2))
f = interpolate.interp1d(bins, n, kind='cubic')
x = np.arange(bins[0],bins[-1],0.01) # vary linspc
interp = f(x)
ltz = interp < 0
interp[ltz] = 0
pl.plot(x, interp,color ='grey' ,linewidth=1)
pl.ylabel('Frequency')
pl.xlabel('log global edge number')
pl.savefig(pngName+'.png')
##################################################
##################################################
##################################################
def OLDplotstdmean(invDir, pngName):
subj_types = csvtodict() # load up subject types
# ClustCoeff Degree Eigen MAD numEdges.npy ScanStat Triangle
ccDir = "ClustCoeff"
DegDir = "Degree"
EigDir = "Eigen"
SS1dir = "ScanStat1"
triDir = "Triangle"
invDirs = [triDir, ccDir, SS1dir, DegDir ]
#invDirs = []
if not os.path.exists(invDir):
print "%s does not exist" % invDir
sys.exit(1)
pl.figure(1)
nrows=4
ncols=2
fig_gl, axes = pl.subplots(nrows=nrows, ncols=ncols)
fig_gl.tight_layout()
for idx, drcty in enumerate (invDirs):
mean_arr = []
stddev_arr = []
ones_mean = []
twos_mean = []
zeros_mean = []
ones_std = []
twos_std = []
zeros_std = []
for arrfn in glob(os.path.join(invDir, drcty,'*.npy')):
try:
arr = np.load(arrfn)
mean_arr.append(arr.mean())
stddev_arr.append(arr.std())
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '0'):
zeros_mean.append(arr.mean())
zeros_std.append(arr.std())
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '1'):
ones_mean.append(arr.mean())
ones_std.append(arr.std())
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '2'):
twos_mean.append(arr.mean())
twos_std.append(arr.std())
#mean_arr.append(np.log(arr.mean()))
#stddev_arr.append(np.log(arr.std()))
#arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "[ERROR]: Line %s: Invariant file not found %s" % (lineno(),arrfn)
mean_arr = np.array(mean_arr)
stddev_arr = np.array(stddev_arr)
ax = pl.subplot(nrows,ncols,(idx*ncols)+1)
ax.set_yticks(scipy.arange(0,1,.25))
pl.gcf().subplots_adjust(bottom=0.07)
'''
if idx == 0:
plt.axis([0, 35, 0, 0.04])
ax.set_yticks(scipy.arange(0,0.04,0.01))
if idx == 1 or idx == 2:
ax.set_yticks(scipy.arange(0,0.03,0.01))
if idx == 3:
ax.set_yticks(scipy.arange(0,0.04,0.01))
'''
# Interpolation
#f = interpolate.interp1d(bins, n, kind='cubic')
#x = np.arange(bins[0],bins[-1],0.03) # vary linspc
#interp = f(x)
#ltz = interp < 0
#interp[ltz] = 0
#plot_color = pickprintcolor(subj_types, arrfn)
#pl.plot(x, interp, color = plot_color, linewidth=1)
#pl.plot(mean_arr/float(mean_arr.max()), color = "black", linewidth=1)
if (idx*ncols)+1 == 1:
pl.ylabel('')
pl.xlabel('Norm. Local Triangle Count Mean')
if (idx*ncols)+1 == 3:
#pl.ylabel('Probability') #**
pl.xlabel('Norm. Local Clustering Coefficient Mean')
if (idx*ncols)+1 == 5:
pl.ylabel('Normalized Magnitude Scale')
pl.xlabel('Norm. Scan Statistic 1 Mean')
if (idx*ncols)+1 == 7:
#pl.ylabel('Probability') #**
pl.xlabel('Norm. Local Degree Mean')
pl.plot(zeros_mean, color = 'grey' , linewidth=1)
pl.plot(ones_mean, color = 'blue', linewidth=1)
pl.plot(twos_mean, color = 'green', linewidth=1)
ax = pl.subplot(nrows,ncols,(idx*ncols)+2)
ax.set_yticks(scipy.arange(0,1,.25))
pl.gcf().subplots_adjust(bottom=0.07)
stddev_arr = np.array(stddev_arr)
#pl.plot(stddev_arr/float(stddev_arr.max()), color = "black", linewidth=1)
if (idx*ncols)+2 == 2:
pl.ylabel('')
pl.xlabel('Norm. Local Triangle Count Std Dev')
if (idx*ncols)+2 == 4:
#pl.ylabel('Probability') #**
pl.xlabel('Norm. Local Clustering Coefficient Std Dev')
if (idx*ncols)+2 == 6:
#pl.ylabel('Probability')
pl.xlabel('Norm. Scan Statistic 1 Std Dev')
if (idx*ncols)+2 == 8:
#pl.ylabel('Probability') #**
pl.xlabel('Norm. Local Degree Std Dev')
pl.plot(zeros_std, color = 'grey' , linewidth=1)
pl.plot(ones_std, color = 'blue', linewidth=1)
pl.plot(twos_std, color = 'green', linewidth=1)
pl.savefig(pngName+'.png')
def main():
parser = argparse.ArgumentParser(description='Plot distribution of invariant arrays of several graphs')
parser.add_argument('invDir', action='store',help='The full path of directory containing .npy invariant arrays')
parser.add_argument('pngName', action='store', help='Full path of directory of resulting png file')
parser.add_argument('numBins', type = int, action='store', help='Number of bins')
result = parser.parse_args()
plotInvDist(result.invDir, result.pngName, result.numBins)
#plotstdmean(result.invDir, result.pngName)
if __name__ == '__main__':
main()
#csvtodict(sys.argv[1]) | apache-2.0 | -4,639,764,769,957,461,000 | 27.373333 | 116 | 0.590198 | false |
arush0311/scrapy | scrapy/mail.py | 12 | 4745 | """
Mail sending helpers
See documentation in docs/topics/email.rst
"""
import logging
from six.moves import cStringIO as StringIO
import six
from email.utils import COMMASPACE, formatdate
from six.moves.email_mime_multipart import MIMEMultipart
from six.moves.email_mime_text import MIMEText
from six.moves.email_mime_base import MIMEBase
if six.PY2:
from email.MIMENonMultipart import MIMENonMultipart
from email import Encoders
else:
from email.mime.nonmultipart import MIMENonMultipart
from email import encoders as Encoders
from twisted.internet import defer, reactor, ssl
from .utils.misc import arg_to_iter
logger = logging.getLogger(__name__)
class MailSender(object):
def __init__(self, smtphost='localhost', mailfrom='scrapy@localhost',
smtpuser=None, smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False):
self.smtphost = smtphost
self.smtpport = smtpport
self.smtpuser = smtpuser
self.smtppass = smtppass
self.smtptls = smtptls
self.smtpssl = smtpssl
self.mailfrom = mailfrom
self.debug = debug
@classmethod
def from_settings(cls, settings):
return cls(settings['MAIL_HOST'], settings['MAIL_FROM'], settings['MAIL_USER'],
settings['MAIL_PASS'], settings.getint('MAIL_PORT'),
settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))
def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', charset=None, _callback=None):
if attachs:
msg = MIMEMultipart()
else:
msg = MIMENonMultipart(*mimetype.split('/', 1))
to = list(arg_to_iter(to))
cc = list(arg_to_iter(cc))
msg['From'] = self.mailfrom
msg['To'] = COMMASPACE.join(to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
rcpts = to[:]
if cc:
rcpts.extend(cc)
msg['Cc'] = COMMASPACE.join(cc)
if charset:
msg.set_charset(charset)
if attachs:
msg.attach(MIMEText(body, 'plain', charset or 'us-ascii'))
for attach_name, mimetype, f in attachs:
part = MIMEBase(*mimetype.split('/'))
part.set_payload(f.read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' \
% attach_name)
msg.attach(part)
else:
msg.set_payload(body)
if _callback:
_callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)
if self.debug:
logger.debug('Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': len(attachs)})
return
dfd = self._sendmail(rcpts, msg.as_string())
dfd.addCallbacks(self._sent_ok, self._sent_failed,
callbackArgs=[to, cc, subject, len(attachs)],
errbackArgs=[to, cc, subject, len(attachs)])
reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)
return dfd
def _sent_ok(self, result, to, cc, subject, nattachs):
logger.info('Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs})
def _sent_failed(self, failure, to, cc, subject, nattachs):
errstr = str(failure.value)
logger.error('Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d'
'- %(mailerr)s',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs, 'mailerr': errstr})
def _sendmail(self, to_addrs, msg):
# Import twisted.mail here because it is not available in python3
from twisted.mail.smtp import ESMTPSenderFactory
msg = StringIO(msg)
d = defer.Deferred()
factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, \
to_addrs, msg, d, heloFallback=True, requireAuthentication=False, \
requireTransportSecurity=self.smtptls)
factory.noisy = False
if self.smtpssl:
reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())
else:
reactor.connectTCP(self.smtphost, self.smtpport, factory)
return d
| bsd-3-clause | -2,012,089,322,382,089,200 | 36.362205 | 112 | 0.598946 | false |
kaplun/inspire-next | inspirehep/modules/literaturesuggest/__init__.py | 1 | 1056 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""INSPIRE authors."""
from __future__ import absolute_import, division, print_function
from .ext import INSPIRELiteratureSuggestion # noqa: F401
| gpl-3.0 | 939,974,482,425,567,500 | 38.111111 | 77 | 0.756629 | false |
stinos/micropython | tests/extmod/uasyncio_lock_cancel.py | 16 | 1373 | # Test that locks work when cancelling multiple waiters on the lock
try:
import uasyncio as asyncio
except ImportError:
try:
import asyncio
except ImportError:
print("SKIP")
raise SystemExit
async def task(i, lock, lock_flag):
print("task", i, "start")
try:
await lock.acquire()
except asyncio.CancelledError:
print("task", i, "cancel")
return
print("task", i, "lock_flag", lock_flag[0])
lock_flag[0] = True
await asyncio.sleep(0)
lock.release()
lock_flag[0] = False
print("task", i, "done")
async def main():
# Create a lock and acquire it so the tasks below must wait
lock = asyncio.Lock()
await lock.acquire()
lock_flag = [True]
# Create 4 tasks and let them all run
t0 = asyncio.create_task(task(0, lock, lock_flag))
t1 = asyncio.create_task(task(1, lock, lock_flag))
t2 = asyncio.create_task(task(2, lock, lock_flag))
t3 = asyncio.create_task(task(3, lock, lock_flag))
await asyncio.sleep(0)
# Cancel 2 of the tasks (which are waiting on the lock) and release the lock
t1.cancel()
t2.cancel()
lock.release()
lock_flag[0] = False
# Let the tasks run to completion
for _ in range(4):
await asyncio.sleep(0)
# The locke should be unlocked
print(lock.locked())
asyncio.run(main())
| mit | -8,540,278,473,537,875,000 | 23.963636 | 80 | 0.624909 | false |
travisjwarren/train_track | train_track/tests/apps/event/test_event_delete_attendee_views.py | 1 | 2680 | __author__ = 'traviswarren'
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from train_track.apps.profile.models import UserProfileEvent
from train_track.tests.model_factory import UserProfileEventFactory
class EventGetDeleteViewTestCases(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username='user',
email='[email protected]',
password='password')
self.staff = User.objects.create_superuser(
username='staff',
email='[email protected]',
password='password')
def test_get_profile_user_event_delete_is_not_public(self):
user_profile_event = UserProfileEventFactory()
response = self.client.get(reverse('event:event-attendee-delete', kwargs={'pk': user_profile_event.id}))
self.assertContains(response, 'Access Denied', status_code=403)
def test_get_profile_user_event_delete_is_not_user_accessible(self):
user_profile_event = UserProfileEventFactory()
self.assertTrue(self.client.login(username=self.user.username, password='password'))
response = self.client.get(reverse('event:event-attendee-delete', kwargs={'pk': user_profile_event.id}))
self.assertContains(response, 'Access Denied', status_code=403)
def test_get_profile_user_delete_event_attendee_is_staff_only(self):
user_profile_event = UserProfileEventFactory()
self.assertTrue(self.client.login(username=self.staff.username, password='password'))
response = self.client.get(reverse('event:event-attendee-delete', kwargs={'pk': user_profile_event.id}))
self.assertTemplateUsed(response, 'profile/userprofileevent_confirm_delete.html')
class EventPostDeleteViewTestCases(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username='user',
email='[email protected]',
password='password')
self.staff = User.objects.create_superuser(
username='staff',
email='[email protected]',
password='password')
def test_delete_event_attendee_is_staff_only(self):
user_profile_event = UserProfileEventFactory()
self.assertTrue(self.client.login(username=self.staff.username, password='password'))
response = self.client.delete(reverse('event:event-attendee-delete', kwargs={'pk': user_profile_event.id}))
self.failIf(UserProfileEvent.objects.filter(id=user_profile_event.id).exists())
self.assertRedirects(response, 'http://testserver/events/{id}/'.format(id=user_profile_event.event.id)) | gpl-3.0 | -935,778,037,017,512,600 | 39.014925 | 115 | 0.689179 | false |
reinout/django | tests/migrations/test_loader.py | 26 | 19137 | from django.db import connection, connections
from django.db.migrations.exceptions import (
AmbiguityError, InconsistentMigrationHistory, NodeNotFoundError,
)
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.recorder import MigrationRecorder
from django.test import TestCase, modify_settings, override_settings
class RecorderTests(TestCase):
"""
Tests recording migrations as applied or not.
"""
multi_db = True
def test_apply(self):
"""
Tests marking migrations as applied/unapplied.
"""
recorder = MigrationRecorder(connection)
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
set(),
)
recorder.record_applied("myapp", "0432_ponies")
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
{("myapp", "0432_ponies")},
)
# That should not affect records of another database
recorder_other = MigrationRecorder(connections['other'])
self.assertEqual(
{(x, y) for (x, y) in recorder_other.applied_migrations() if x == "myapp"},
set(),
)
recorder.record_unapplied("myapp", "0432_ponies")
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
set(),
)
class LoaderTests(TestCase):
"""
Tests the disk and database loader, and running through migrations
in memory.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
@modify_settings(INSTALLED_APPS={'append': 'basic'})
def test_load(self):
"""
Makes sure the loader can load the migrations for the test apps,
and then render them out to a new Apps.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0002_second"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0002_second"))
self.assertEqual(len(project_state.models), 2)
author_state = project_state.models["migrations", "author"]
self.assertEqual(
[x for x, y in author_state.fields],
["id", "name", "slug", "age", "rating"]
)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "author"]
)
# Ensure we've included unmigrated apps in there too
self.assertIn("basic", project_state.real_apps)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_unmigdep"})
def test_load_unmigrated_dependency(self):
"""
Makes sure the loader can load migrations with a dependency on an unmigrated app.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0001_initial")),
[
('contenttypes', '0001_initial'),
('auth', '0001_initial'),
("migrations", "0001_initial"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0001_initial"))
self.assertEqual(len([m for a, m in project_state.models if a == "migrations"]), 1)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "user"]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_run_before(self):
"""
Makes sure the loader uses Migration.run_before.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0003_third"),
("migrations", "0002_second"),
],
)
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations_first",
"migrations2": "migrations2.test_migrations_2_first",
})
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
def test_first(self):
"""
Makes sure the '__first__' migrations build correctly.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "second")),
[
("migrations", "thefirst"),
("migrations2", "0001_initial"),
("migrations2", "0002_second"),
("migrations", "second"),
],
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_name_match(self):
"Tests prefix name matching"
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.get_migration_by_prefix("migrations", "0001").name,
"0001_initial",
)
with self.assertRaises(AmbiguityError):
migration_loader.get_migration_by_prefix("migrations", "0")
with self.assertRaises(KeyError):
migration_loader.get_migration_by_prefix("migrations", "blarg")
def test_load_import_error(self):
with override_settings(MIGRATION_MODULES={"migrations": "import_error_package"}):
with self.assertRaises(ImportError):
MigrationLoader(connection)
def test_load_module_file(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App with migrations module file not in unmigrated apps."
)
def test_load_empty_dir(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App missing __init__.py in migrations module not in unmigrated apps."
)
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
)
def test_marked_as_migrated(self):
"""
Undefined MIGRATION_MODULES implies default migration module.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(migration_loader.migrated_apps, {'migrated_app'})
self.assertEqual(migration_loader.unmigrated_apps, set())
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
MIGRATION_MODULES={"migrated_app": None},
)
def test_marked_as_unmigrated(self):
"""
MIGRATION_MODULES allows disabling of migrations for a particular app.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(migration_loader.migrated_apps, set())
self.assertEqual(migration_loader.unmigrated_apps, {'migrated_app'})
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
MIGRATION_MODULES={'migrated_app': 'missing-module'},
)
def test_explicit_missing_module(self):
"""
If a MIGRATION_MODULES override points to a missing module, the error
raised during the importation attempt should be propagated unless
`ignore_no_migrations=True`.
"""
with self.assertRaisesMessage(ImportError, 'missing-module'):
migration_loader = MigrationLoader(connection)
migration_loader = MigrationLoader(connection, ignore_no_migrations=True)
self.assertEqual(migration_loader.migrated_apps, set())
self.assertEqual(migration_loader.unmigrated_apps, {'migrated_app'})
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_loading_squashed(self):
"Tests loading a squashed migration"
migration_loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
# Loading with nothing applied should just give us the one node
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
1,
)
# However, fake-apply one migration and it should now use the old two
recorder.record_applied("migrations", "0001_initial")
migration_loader.build_graph()
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
2,
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_loading_squashed_complex(self):
"Tests loading a complex set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 to 5 cannot use the squashed migration
recorder.record_applied("migrations", "3_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "4_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps(self):
loader = MigrationLoader(connection)
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
expected_plan = {
('app1', '1_auto'),
('app2', '1_squashed_2'),
('app1', '2_squashed_3'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps_partially_applied(self):
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
recorder.record_applied('app1', '1_auto')
recorder.record_applied('app1', '2_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '1_squashed_2'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_erroneous"})
def test_loading_squashed_erroneous(self):
"Tests loading a complex but erroneous set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 or 4, nonexistent migrations would be needed.
msg = ("Migration migrations.6_auto depends on nonexistent node ('migrations', '5_auto'). "
"Django tried to replace migration migrations.5_auto with any of "
"[migrations.3_squashed_5] but wasn't able to because some of the replaced "
"migrations are already applied.")
recorder.record_applied("migrations", "3_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
recorder.record_applied("migrations", "4_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations'},
INSTALLED_APPS=['migrations'],
)
def test_check_consistent_history(self):
loader = MigrationLoader(connection=None)
loader.check_consistent_history(connection)
recorder = MigrationRecorder(connection)
recorder.record_applied('migrations', '0002_second')
msg = (
"Migration migrations.0002_second is applied before its dependency "
"migrations.0001_initial on database 'default'."
)
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
loader.check_consistent_history(connection)
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed_extra'},
INSTALLED_APPS=['migrations'],
)
def test_check_consistent_history_squashed(self):
"""
MigrationLoader.check_consistent_history() should ignore unapplied
squashed migrations that have all of their `replaces` applied.
"""
loader = MigrationLoader(connection=None)
recorder = MigrationRecorder(connection)
recorder.record_applied('migrations', '0001_initial')
recorder.record_applied('migrations', '0002_second')
loader.check_consistent_history(connection)
recorder.record_applied('migrations', '0003_third')
loader.check_consistent_history(connection)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_ref_squashed.app1",
"app2": "migrations.test_migrations_squashed_ref_squashed.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_ref_squashed.app1",
"migrations.test_migrations_squashed_ref_squashed.app2",
]})
def test_loading_squashed_ref_squashed(self):
"Tests loading a squashed migration with a new migration referencing it"
r"""
The sample migrations are structured like this:
app_1 1 --> 2 ---------------------*--> 3 *--> 4
\ / /
*-------------------*----/--> 2_sq_3 --*
\ / /
=============== \ ============= / == / ======================
app_2 *--> 1_sq_2 --* /
\ /
*--> 1 --> 2 --*
Where 2_sq_3 is a replacing migration for 2 and 3 in app_1,
as 1_sq_2 is a replacing migration for 1 and 2 in app_2.
"""
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
# Load with nothing applied: both migrations squashed.
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app1', '1_auto'),
('app2', '1_squashed_2'),
('app1', '2_squashed_3'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
# Fake-apply a few from app1: unsquashes migration in app1.
recorder.record_applied('app1', '1_auto')
recorder.record_applied('app1', '2_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '1_squashed_2'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
# Fake-apply one from app2: unsquashes migration in app2 too.
recorder.record_applied('app2', '1_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '2_auto'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
| bsd-3-clause | 297,046,835,465,353,800 | 38.86875 | 107 | 0.603491 | false |
kmoocdev/edx-platform | docs/shared/conf.py | 158 | 10580 | # -*- coding: utf-8 -*-
#
# getting_started documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 16 11:19:12 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# -----------------------------------------------------------------------------
# Common config
#
# This file is imported by the different project conf.py files (in
# course_authors/, data/, and developers/). It includes configuration options
# common to all three.
#
# -----------------------------------------------------------------------------
import os
BASEDIR = os.path.dirname(os.path.abspath(__file__))
def add_base(paths):
"""
Returns a list of paths relative to BASEDIR.
paths: a list of paths
"""
return [os.path.join(BASEDIR, x) for x in paths]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = add_base(['_templates'])
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'edX'
copyright = u'2013, EdX Doc Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<Studio> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = add_base(['_static'])
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'edxdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
'index',
'getting_started.tex',
u'edX Studio Documentation',
u'EdX Doc Team',
'manual',
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'getting_started', u'getting_started Documentation',
[u'EdX Doc Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
'index',
'getting_started',
u'getting_started Documentation',
u'EdX Doc Team',
'getting_started',
'One line description of project.',
'Miscellaneous',
),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'getting_started'
epub_author = u'EdX Doc Team'
epub_publisher = u'EdX Doc Team'
epub_copyright = u'2013, EdX Doc Team'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# If 'no', URL addresses will not be shown.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| agpl-3.0 | 7,472,479,033,266,397,000 | 29.755814 | 173 | 0.684594 | false |
longman694/youtube-dl | youtube_dl/extractor/vube.py | 64 | 6933 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
int_or_none,
ExtractorError,
)
class VubeIE(InfoExtractor):
IE_NAME = 'vube'
IE_DESC = 'Vube.com'
_VALID_URL = r'https?://vube\.com/(?:[^/]+/)+(?P<id>[\da-zA-Z]{10})\b'
_TESTS = [
{
'url': 'http://vube.com/trending/William+Wei/Y8NUZ69Tf7?t=s',
'md5': 'e7aabe1f8f1aa826b9e4735e1f9cee42',
'info_dict': {
'id': 'Y8NUZ69Tf7',
'ext': 'mp4',
'title': 'Best Drummer Ever [HD]',
'description': 'md5:2d63c4b277b85c2277761c2cf7337d71',
'thumbnail': r're:^https?://.*\.jpg',
'uploader': 'William',
'timestamp': 1406876915,
'upload_date': '20140801',
'duration': 258.051,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['amazing', 'hd', 'best drummer ever', 'william wei', 'bucket drumming', 'street drummer', 'epic street drumming'],
},
'skip': 'Not accessible from Travis CI server',
}, {
'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon',
'md5': 'db7aba89d4603dadd627e9d1973946fe',
'info_dict': {
'id': 'YL2qNPkqon',
'ext': 'mp4',
'title': 'Chiara Grispo - Price Tag by Jessie J',
'description': 'md5:8ea652a1f36818352428cb5134933313',
'thumbnail': r're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102e7e63057-5ebc-4f5c-4065-6ce4ebde131f\.jpg$',
'uploader': 'Chiara.Grispo',
'timestamp': 1388743358,
'upload_date': '20140103',
'duration': 170.56,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['pop', 'music', 'cover', 'singing', 'jessie j', 'price tag', 'chiara grispo'],
},
'skip': 'Removed due to DMCA',
},
{
'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1',
'md5': '5d4a52492d76f72712117ce6b0d98d08',
'info_dict': {
'id': 'UeBhTudbfS',
'ext': 'mp4',
'title': 'My 7 year old Sister and I singing "Alive" by Krewella',
'description': 'md5:40bcacb97796339f1690642c21d56f4a',
'thumbnail': r're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102265d5a9f-0f17-4f6b-5753-adf08484ee1e\.jpg$',
'uploader': 'Seraina',
'timestamp': 1396492438,
'upload_date': '20140403',
'duration': 240.107,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['seraina', 'jessica', 'krewella', 'alive'],
},
'skip': 'Removed due to DMCA',
}, {
'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s',
'md5': '0584fc13b50f887127d9d1007589d27f',
'info_dict': {
'id': '0nmsMY5vEq',
'ext': 'mp4',
'title': 'Frozen - Let It Go Cover by Siren Gene',
'description': 'My rendition of "Let It Go" originally sung by Idina Menzel.',
'thumbnail': r're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/10283ab622a-86c9-4681-51f2-30d1f65774af\.jpg$',
'uploader': 'Siren',
'timestamp': 1395448018,
'upload_date': '20140322',
'duration': 221.788,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['let it go', 'cover', 'idina menzel', 'frozen', 'singing', 'disney', 'siren gene'],
},
'skip': 'Removed due to DMCA',
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video = self._download_json(
'http://vube.com/t-api/v1/video/%s' % video_id, video_id, 'Downloading video JSON')
public_id = video['public_id']
formats = []
for media in video['media'].get('video', []) + video['media'].get('audio', []):
if media['transcoding_status'] != 'processed':
continue
fmt = {
'url': 'http://video.thestaticvube.com/video/%s/%s.mp4' % (media['media_resolution_id'], public_id),
'abr': int(media['audio_bitrate']),
'format_id': compat_str(media['media_resolution_id']),
}
vbr = int(media['video_bitrate'])
if vbr:
fmt.update({
'vbr': vbr,
'height': int(media['height']),
})
formats.append(fmt)
self._sort_formats(formats)
if not formats and video.get('vst') == 'dmca':
raise ExtractorError(
'This video has been removed in response to a complaint received under the US Digital Millennium Copyright Act.',
expected=True)
title = video['title']
description = video.get('description')
thumbnail = self._proto_relative_url(video.get('thumbnail_src'), scheme='http:')
uploader = video.get('user_alias') or video.get('channel')
timestamp = int_or_none(video.get('upload_time'))
duration = video['duration']
view_count = video.get('raw_view_count')
like_count = video.get('total_likes')
dislike_count = video.get('total_hates')
comments = video.get('comments')
comment_count = None
if comments is None:
comment_data = self._download_json(
'http://vube.com/api/video/%s/comment' % video_id,
video_id, 'Downloading video comment JSON', fatal=False)
if comment_data is not None:
comment_count = int_or_none(comment_data.get('total'))
else:
comment_count = len(comments)
categories = [tag['text'] for tag in video['tags']]
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
}
| unlicense | -505,294,144,034,248,100 | 39.30814 | 145 | 0.504399 | false |
kleins11/intdatasci-byte2 | jmankoff-mobile/lib/werkzeug/__init__.py | 55 | 6917 | # -*- coding: utf-8 -*-
"""
werkzeug
~~~~~~~~
Werkzeug is the Swiss Army knife of Python web development.
It provides useful classes and functions for any WSGI application to make
the life of a python web developer much easier. All of the provided
classes are independent from each other so you can mix it with any other
library.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from types import ModuleType
import sys
from werkzeug._compat import iteritems
# the version. Usually set automatically by a script.
__version__ = '0.11'
# This import magic raises concerns quite often which is why the implementation
# and motivation is explained here in detail now.
#
# The majority of the functions and classes provided by Werkzeug work on the
# HTTP and WSGI layer. There is no useful grouping for those which is why
# they are all importable from "werkzeug" instead of the modules where they are
# implemented. The downside of that is, that now everything would be loaded at
# once, even if unused.
#
# The implementation of a lazy-loading module in this file replaces the
# werkzeug package when imported from within. Attribute access to the werkzeug
# module will then lazily import from the modules that implement the objects.
# import mapping to objects in other modules
all_by_module = {
'werkzeug.debug': ['DebuggedApplication'],
'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy', 'LocalStack',
'release_local'],
'werkzeug.serving': ['run_simple'],
'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ',
'run_wsgi_app'],
'werkzeug.testapp': ['test_app'],
'werkzeug.exceptions': ['abort', 'Aborter'],
'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote',
'url_quote_plus', 'url_unquote', 'url_unquote_plus',
'url_fix', 'Href', 'iri_to_uri', 'uri_to_iri'],
'werkzeug.formparser': ['parse_form_data'],
'werkzeug.utils': ['escape', 'environ_property', 'append_slash_redirect',
'redirect', 'cached_property', 'import_string',
'dump_cookie', 'parse_cookie', 'unescape',
'format_string', 'find_modules', 'header_property',
'html', 'xhtml', 'HTMLBuilder', 'validate_arguments',
'ArgumentValidationError', 'bind_arguments',
'secure_filename'],
'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info',
'peek_path_info', 'SharedDataMiddleware',
'DispatcherMiddleware', 'ClosingIterator', 'FileWrapper',
'make_line_iter', 'LimitedStream', 'responder',
'wrap_file', 'extract_path_info'],
'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
'EnvironHeaders', 'ImmutableList',
'ImmutableDict', 'ImmutableMultiDict',
'TypeConversionDict',
'ImmutableTypeConversionDict', 'Accept',
'MIMEAccept', 'CharsetAccept',
'LanguageAccept', 'RequestCacheControl',
'ResponseCacheControl', 'ETags', 'HeaderSet',
'WWWAuthenticate', 'Authorization',
'FileMultiDict', 'CallbackDict', 'FileStorage',
'OrderedMultiDict', 'ImmutableOrderedMultiDict'
],
'werkzeug.useragents': ['UserAgent'],
'werkzeug.http': ['parse_etags', 'parse_date', 'http_date', 'cookie_date',
'parse_cache_control_header', 'is_resource_modified',
'parse_accept_header', 'parse_set_header', 'quote_etag',
'unquote_etag', 'generate_etag', 'dump_header',
'parse_list_header', 'parse_dict_header',
'parse_authorization_header',
'parse_www_authenticate_header', 'remove_entity_headers',
'is_entity_header', 'remove_hop_by_hop_headers',
'parse_options_header', 'dump_options_header',
'is_hop_by_hop_header', 'unquote_header_value',
'quote_header_value', 'HTTP_STATUS_CODES'],
'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request', 'Response',
'AcceptMixin', 'ETagRequestMixin',
'ETagResponseMixin', 'ResponseStreamMixin',
'CommonResponseDescriptorsMixin', 'UserAgentMixin',
'AuthorizationMixin', 'WWWAuthenticateMixin',
'CommonRequestDescriptorsMixin'],
'werkzeug.security': ['generate_password_hash', 'check_password_hash'],
# the undocumented easteregg ;-)
'werkzeug._internal': ['_easteregg']
}
# modules that should be imported when accessed as attributes of werkzeug
attribute_modules = frozenset(['exceptions', 'routing', 'script'])
object_origins = {}
for module, items in iteritems(all_by_module):
for item in items:
object_origins[item] = module
class module(ModuleType):
"""Automatically import objects from the modules."""
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
elif name in attribute_modules:
__import__('werkzeug.' + name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
"""Just show what we want to show."""
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__',
'__package__', '__version__'))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules['werkzeug']
# setup the new module and patch it into the dict of loaded modules
new_module = sys.modules['werkzeug'] = module('werkzeug')
new_module.__dict__.update({
'__file__': __file__,
'__package__': 'werkzeug',
'__path__': __path__,
'__doc__': __doc__,
'__version__': __version__,
'__all__': tuple(object_origins) + tuple(attribute_modules),
'__docformat__': 'restructuredtext en'
})
# Due to bootstrapping issues we need to import exceptions here.
# Don't ask :-(
__import__('werkzeug.exceptions')
| apache-2.0 | 1,952,082,143,620,249,600 | 44.506579 | 79 | 0.575394 | false |
goliate/sarakha63-persomov | couchpotato/core/media/_base/providers/nzb/binnewz/nzbdownloader.py | 7 | 2796 | import urllib2
from StringIO import StringIO
import gzip
import cookielib
import time
class NZBDownloader(object):
def __init__( self ):
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
self.lastRequestTime = None
def waitBeforeNextRequest(self):
if self.lastRequestTime and self.lastRequestTime > ( time.mktime(time.localtime()) - 10):
time.sleep( 10 )
self.lastRequestTime = time.gmtime()
def open(self, request):
self.waitBeforeNextRequest()
return self.opener.open(request)
class NZBSearchResult(object):
def __init__(self, downloader, sizeInMegs, refererURL, age, nzbid):
self.downloader = downloader
self.refererURL = refererURL
self.sizeInMegs = sizeInMegs
self.age = age
self.nzbid = nzbid
def readRequest(self, request):
request.add_header('Accept-encoding', 'gzip')
request.add_header('Referer', self.refererURL)
request.add_header('Accept-Encoding', 'gzip')
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.57 Safari/537.17')
response = self.downloader.open(request)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO( response.read())
f = gzip.GzipFile(fileobj=buf)
return f.read()
else:
return response.read()
def getNZB(self):
pass
class NZBGetURLSearchResult( NZBSearchResult ):
def __init__(self, downloader, nzburl, sizeInMegs, refererURL, age, nzbid):
NZBSearchResult.__init__(self, downloader, sizeInMegs, refererURL, age, nzbid)
self.nzburl = nzburl
def getNZB(self):
request = urllib2.Request( self.nzburl )
self.nzbdata = NZBSearchResult.readRequest( self, request )
return self.nzbdata
class NZBPostURLSearchResult( NZBSearchResult ):
def __init__(self, downloader, nzburl, postData, sizeInMegs, refererURL, age, nzbid):
NZBSearchResult.__init__(self, downloader, sizeInMegs, refererURL, age, nzbid)
self.nzburl = nzburl
self.postData = postData
def getNZB(self):
request = urllib2.Request( self.nzburl, self.postData )
self.nzbdata = NZBSearchResult.readRequest( self, request )
return self.nzbdata
class NZBDataSearchResult( NZBSearchResult ):
def __init__(self, nzbdata, sizeInMegs, refererURL, age, nzbid):
NZBSearchResult.__init__(self, None, refererURL, age, nzbid)
self.nzbdata = nzbdata
def getNZB(self):
return self.nzbdata
| gpl-3.0 | 3,298,821,325,182,967,300 | 34.405063 | 152 | 0.635551 | false |
ArthurGarnier/SickRage | lib/imdb/parser/http/searchCompanyParser.py | 76 | 2929 | """
parser.http.searchCompanyParser module (imdb package).
This module provides the HTMLSearchCompanyParser class (and the
search_company_parser instance), used to parse the results of a search
for a given company.
E.g., when searching for the name "Columbia Pictures", the parsed page would be:
http://akas.imdb.com/find?s=co;mx=20;q=Columbia+Pictures
Copyright 2008-2012 Davide Alberani <[email protected]>
2008 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_company_name, build_company_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicCompanyParser(DOMBasicMovieParser):
"""Simply get the name of a company and the imdbID.
It's used by the DOMHTMLSearchCompanyParser class to return a result
for a direct match (when a search on IMDb results in a single
company, the web server sends directly the company page.
"""
_titleFunct = lambda self, x: analyze_company_name(x or u'')
class DOMHTMLSearchCompanyParser(DOMHTMLSearchMovieParser):
_BaseParser = DOMBasicCompanyParser
_notDirectHitTitle = '<title>find - imdb'
_titleBuilder = lambda self, x: build_company_name(x)
_linkPrefix = '/company/co'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()",
'notes': "./text()[1]"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link')),
analyze_company_name(x.get('name')+(x.get('notes')
or u''), stripNotes=True)
))]
extractors = [Extractor(label='search',
path="//td[@class='result_text']/a[starts-with(@href, " \
"'/company/co')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_company_parser': ((DOMHTMLSearchCompanyParser,),
{'kind': 'company', '_basic_parser': DOMBasicCompanyParser})
}
| gpl-3.0 | -3,595,158,417,871,122,000 | 40.253521 | 85 | 0.629908 | false |
nvoron23/hadoop-fundamentals | tfidf/framework.py | 17 | 1300 | import os
import sys
from itertools import groupby
from operator import itemgetter
SEPARATOR = "\t"
class Streaming(object):
@staticmethod
def get_job_conf(name):
name = name.replace(".", "_").upper()
return os.environ.get(name)
def __init__(self, infile=sys.stdin, separator=SEPARATOR):
self.infile = infile
self.sep = separator
def status(self, message):
sys.stderr.write("reporter:status:%s" % message)
def counter(self, counter, amount=1, group="Python Streaming"):
sys.stderr.write("reporter:counter:%s,%s,%i" % group, counter, amount)
def emit(self, key, value):
sys.stdout.write("%s%s%s\n" % (key, self.sep, value))
def read(self):
for line in self.infile:
yield line.rstrip()
def __iter__(self):
for line in self.read():
yield line
class Mapper(Streaming):
def map(self):
raise NotImplementedError("Mappers must implement a map method")
class Reducer(Streaming):
def reduce(self):
raise NotImplementedError("Reducers must implement a reduce method")
def __iter__(self):
generator = (line.split(self.sep, 1) for line in self.read())
for item in groupby(generator, itemgetter(0)):
yield item
| mit | -3,567,691,209,392,632,000 | 25 | 78 | 0.623077 | false |
mikeh77/mi-instrument | mi/instrument/seabird/sbe37smb/ooicore/test/sample_data.py | 2 | 2612 |
from mi.instrument.seabird.sbe37smb.ooicore.driver import NEWLINE
SAMPLE_DS = "SBE37-SMP V 2.6 SERIAL NO. 2165 05 Feb 2013 19:11:43" + NEWLINE + \
"not logging: received stop command" + NEWLINE + \
"sample interval = 20208 seconds" + NEWLINE + \
"samplenumber = 0, free = 200000" + NEWLINE + \
"transmit real-time data" + NEWLINE + \
"do not output salinity with each sample" + NEWLINE + \
"do not output sound velocity with each sample" + NEWLINE + \
"do not store time with each sample" + NEWLINE + \
"number of samples to average = 0" + NEWLINE + \
"reference pressure = 0.0 db" + NEWLINE + \
"serial sync mode disabled" + NEWLINE + \
"wait time after serial sync sampling = 0 seconds" + NEWLINE + \
"internal pump is installed" + NEWLINE + \
"temperature = 7.54 deg C" + NEWLINE + \
"WARNING: LOW BATTERY VOLTAGE!!"
SAMPLE_DC = "SBE37-SM V 2.6b 3464" + NEWLINE + \
"temperature: 08-nov-05" + NEWLINE + \
" TA0 = -2.572242e-04" + NEWLINE + \
" TA1 = 3.138936e-04" + NEWLINE + \
" TA2 = -9.717158e-06" + NEWLINE + \
" TA3 = 2.138735e-07" + NEWLINE + \
"conductivity: 08-nov-05" + NEWLINE + \
" G = -9.870930e-01" + NEWLINE + \
" H = 1.417895e-01" + NEWLINE + \
" I = 1.334915e-04" + NEWLINE + \
" J = 3.339261e-05" + NEWLINE + \
" CPCOR = 9.570000e-08" + NEWLINE + \
" CTCOR = 3.250000e-06" + NEWLINE + \
" WBOTC = 1.202400e-05" + NEWLINE + \
"pressure S/N 4955, range = 10847.1964958 psia: 12-aug-05" + NEWLINE + \
" PA0 = 5.916199e+00" + NEWLINE + \
" PA1 = 4.851819e-01" + NEWLINE + \
" PA2 = 4.596432e-07" + NEWLINE + \
" PTCA0 = 2.762492e+02" + NEWLINE + \
" PTCA1 = 6.603433e-01" + NEWLINE + \
" PTCA2 = 5.756490e-03" + NEWLINE + \
" PTCSB0 = 2.461450e+01" + NEWLINE + \
" PTCSB1 = -9.000000e-04" + NEWLINE + \
" PTCSB2 = 0.000000e+00" + NEWLINE + \
" POFFSET = 0.000000e+00" + NEWLINE + \
"rtc: 08-nov-05" + NEWLINE + \
" RTCA0 = 9.999862e-01" + NEWLINE + \
" RTCA1 = 1.686132e-06" + NEWLINE + \
" RTCA2 = -3.022745e-08"
SAMPLE = "#55.9044,41.40609, 572.170, 34.2583, 1505.948, 05 Feb 2013, 19:16:59" + NEWLINE
| bsd-2-clause | -7,487,891,058,974,034,000 | 50.215686 | 91 | 0.487366 | false |
wzmao/mbio | mbio/Application/math.py | 1 | 11942 | # -*- coding: utf-8 -*-
"""This module contains some math and statistics functions.
In the future plan: Eigenvalue, Inverse, Matrix Multiplication,
SVD, PCA
"""
__author__ = 'Wenzhi Mao'
__all__ = ['isSquare', 'ANOVA', 'performRegression', 'performPolyRegression']
def isSquare(x):
"""It is a function to determine if the given integer is a square integer."""
try:
xi = int(x)
except:
return None
if xi != x:
from ..IO.output import printError
printError('The number is not integer.')
return None
if x < 0:
from ..IO.output import printError
printError('The number is negative.')
return None
x = xi
sq = x ** .5
if abs(int(round(sq, 0)) ** 2 - x) < 1e-10:
return True
else:
return False
def eigh(x):
"""This is a function to calculate eigenvalues and eigenvectors."""
try:
from scipy.linalg.lapack import dsyevr
return dsyevr(x)[:2]
except:
from numpy.linalg import eigh as n_eigh
return n_eigh(x)
def invsp(x):
"""This is a function to inverse a symetric postive definite matrix."""
try:
from numpy.linalg import inv
return inv(x)
except:
try:
from scipy.linalg.lapack import dgetrf, dgetri
d, e = dgetrf(x)[:2]
return dgetri(d, e)[0]
except:
from ..IO.output import printError
printError("There is no `inv` function found.")
return None
class ANOVA(object):
"""It is a class for ANOVA analysis. Given the analysis data,
output the test result.
1D data supported now. More dimension could be achieved in the future.
`data` should be n*2 numpy array or list. The first column is the value
and the second column is the label."""
def __init__(self, data=None, **kwargs):
"""Calculate the ANOVA for the data."""
from ..IO.output import printInfo
self.data = data
self.result = None
self.pvalue = self.f0 = self.fd = self.sst = self.sstreat = self.mstreat = self.sse = self.mse = self.n = None
if type(data) == type(None):
self._calculated = False
else:
self.performCalculation(**kwargs)
def performCalculation(self, alpha=0.05, outprint=True, **kwargs):
"""Perform the ANOVA calculation for the data."""
from ..IO.output import printInfo, printError
from numpy import array
from scipy.stats import f as F
self._calculated = None
self.pvalue = self.f0 = self.fd = self.sst = self.sstreat = self.mstreat = self.sse = self.mse = self.n = None
try:
self.data = array(self.data, dtype=float)
except:
printError("The data could not be transfered to numpy.array")
if self.data.ndim != 2:
printError("ANOVA class could only support 1D data now.")
return None
if self.data.shape[1] != 2:
printError("The data should be 2 column data.")
return None
labelset = set()
for i in self.data[:, 1]:
if not i in labelset:
labelset.add(i)
labelset = list(labelset)
labelset.sort()
printInfo("{} label(s) found".format(len(labelset)))
muall = self.data[:, 0].mean()
sst = ((self.data[:, 0] - muall) ** 2).sum()
n = self.data.shape[0]
ns = array([(self.data[:, 1] == i).sum()
for i in labelset], dtype=float)
mus = array([self.data[:, 0][
(self.data[:, 1] == i)].mean() - muall for i in labelset], dtype=float)
sstreat = (mus ** 2).dot(ns)
mstreat = sstreat * 1.0 / (len(ns) - 1)
mse = (0.0 + sst - sstreat) * 1.0 / (n - len(ns))
f0 = mstreat / mse
self.pvalue = 1. - F.cdf(f0, len(ns) - 1, n - len(ns))
self.f0 = f0
self.fd = (len(ns) - 1, n - len(ns))
self.sst = sst
self.sstreat = sstreat
self.mstreat = mstreat
self.sse = (0.0 + sst - sstreat)
self.mse = mse
self.n = n
self._calculated = True
if outprint:
printInfo("SS_Total = {0:13.8f} for {1} data".format(sst, n))
printInfo("MS_Treatment = {0:13.8f} with {1:6d} of free degrees".format(
mstreat, self.fd[0]))
printInfo(
"MS_Error = {0:13.8f} with {1:6d} of free degrees".format(mse, self.fd[1]))
printInfo("F0 = MS_Treatment/MS_Error = {0:12.8f}".format(f0))
printInfo(
"p-value = {0:13.8f} = {1:8.6f}%".format(self.pvalue, self.pvalue * 100))
if self.pvalue < alpha:
printInfo(
"Reject the null hypothesis at alpha = {}, each class are different.".format(alpha))
else:
printInfo(
"Accept the null hypothesis at alpha = {}, each class are the same.".format(alpha))
return None
def performRegression(x, y, const=True, alpha=0.05, label=None, output=True, **kwargs):
"""Make regression analysis of array data. And test each parameter using t-test.
`x` must be a N*a array. `y` must be a N*1 array.
If `x` or `y` just has one dimension, it could be a 1D array and converted automatically.
`const` is `True` default and it will detect the are there constant in `x`.
If no constant in `x`, it will add a new column at the end.
`alpha` is used to test each parameter.
`label` could be used for output."""
from numpy import ndarray, array, hstack, ones
from numpy.linalg.linalg import inv
from ..IO.output import printError, printInfo
from scipy.stats import t
if not isinstance(x, ndarray) or not isinstance(y, ndarray):
try:
x = array(x, dtype=float)
y = array(y, dtype=float)
except:
printError(
"x and y must be numpy array or could be converted to numpy array.")
return None
x = array(x, dtype=float)
y = array(y, dtype=float)
if x.ndim == 2:
pass
elif x.ndim == 1:
x.resize((x.size, 1))
else:
printError("x must be 1D or 2D data.")
return None
if y.ndim == 2:
if y.shape[1] != 1:
printInfo("Just take the first column of y.")
y = y[:, 0:1]
elif y.ndim == 1:
y.resize((y.size, 1))
else:
printError("y must be 1D or 2D data.")
return None
if x.shape[0] != y.shape[0]:
printError("x and y must have same first dimension.")
return None
if label is None:
label = ['x' + str(i + 1) for i in xrange(x.shape[1])]
else:
label = [str(i) for i in label]
if len(label) != x.shape[1]:
printError(
"The length of label does not match data. Dismiss the label.")
label = ['x' + str(i + 1) for i in xrange(x.shape[1])]
addconst = 0
if const:
hasconst = False
for i in xrange(x.shape[1]):
if len(set(x[:, i])) == 1:
hasconst = True
break
if not hasconst:
x = hstack((x, ones((x.shape[0], 1))))
addconst = 1
label.append('c')
if output:
printInfo(
"Add const automatically. If you don't want to add const, use `const = False`")
cov = inv(x.T.dot(x))
beta = cov.dot(x.T).dot(y)
r = y - x.dot(beta)
sigma2 = ((r.T.dot(r)) / (x.shape[0] - x.shape[1]))[0, 0]
if sigma2 == 0:
sigma2 = 5e-324
st = '\ty = '
for i in xrange(x.shape[1] - 1):
st += "{0:+10.6f}*{1:s} ".format(beta[i, 0], label[i])
if addconst:
st += "{0:+10.6f}".format(beta[-1, 0])
else:
st += "{0:+10.6f}*{1:s}".format(beta[-1, 0], label[x.shape[1] - 1])
if output:
printInfo("The result is :")
printInfo(st)
printInfo("Test each parameter.")
printInfo("\t{0:^5s}{1:^15s}{2:^15s}{3:^15s}{4:^5s}{5:^9s}{6:^5s}".format(
"xi", "Para", "Sigma", "t-statistics", 'FD', "p-value", 'Sig'))
p = []
ts = []
sig = []
sigma = []
for i in xrange(x.shape[1]):
sigma.append((sigma2 * cov[i, i]) ** .5)
ts.append(beta[i][0] / sigma[-1])
p.append((1. - t.cdf(abs(ts[-1]), x.shape[0] - x.shape[1])) * 2)
sig.append("Yes" if 2. * (1. - t.cdf(abs(beta[i][0] / (
(sigma2 * cov[i, i]) ** .5)), x.shape[0] - x.shape[1])) < alpha else 'No')
if output:
printInfo("\t{0:^5s}{1:^15.6e}{2:^15.6e}{3:^15.6e}{4:^5d}{5:^9f}"
"{6:^5s}".format(label[i],
beta[i][0],
sigma[-1],
ts[-1],
x.shape[0] - x.shape[1],
p[-1],
sig[-1]))
p = array(p)
ts = array(ts)
sig = array(sig)
sigma = array(sigma)
return {'beta': beta, 'p': p, 't': ts, "label": label, 'sig': sig, 'sigma': sigma}
def performPolyRegression(y, degree=2, **kwargs):
'''Build regression with higher degree of polynomial.
Use the index to build the polynomial.
The *orthogonal unit* scale is used up to 4 degrees. const is not included.
You could specific the `const=False` to disable the const.
'''
from numpy import ndarray, array, arange, zeros
from ..IO.output import printError, printInfo
if not isinstance(y, ndarray):
try:
y = array(y, dtype=float)
except:
printError(
"y must be numpy array or could be converted to numpy array.")
return None
y = array(y, dtype=float)
if y.ndim == 2:
if y.shape[1] != 1:
printInfo("Just take the first column of y.")
y = y[:, 0:1]
elif y.ndim == 1:
y.resize((y.size, 1))
else:
printError("y must be 1D or 2D data.")
return None
if not degree in [1, 2, 3, 4]:
printError("degree must between 1 and 4.")
if degree + 1 >= y.shape[0]:
printError("The degree must be less than the data size.")
return None
k = y.shape[0] * 1.0
poly = zeros((k, degree))
t = arange(k, dtype=float)
t = t - t.mean()
label = []
kwargs.pop('label', None)
for i in xrange(degree):
if i == 0:
label.append('x')
else:
label.append('x^' + str(i + 1))
if i == 0:
poly[:, i] = t
poly[:, i] = poly[:, i] / ((poly[:, i] ** 2).sum()) ** .5
elif i == 1:
poly[:, i] = t ** 2 - (k ** 2. - 1) / 12
poly[:, i] = poly[:, i] / ((poly[:, i] ** 2).sum()) ** .5
elif i == 2:
poly[:, i] = t ** 3 - t * ((3. * k ** 2 - 7) / 20)
poly[:, i] = poly[:, i] / ((poly[:, i] ** 2).sum()) ** .5
elif i == 3:
poly[:, i] = t ** 4 - (t ** 2) * ((3 * k ** 2 - 13) /
14.) + 3. * (k ** 2 - 1) * (k ** 2 - 9) / 560
poly[:, i] = poly[:, i] / ((poly[:, i] ** 2).sum()) ** .5
printInfo("The polynomial is listed.")
for i in xrange(degree):
if k > 6:
st = ""
for j in xrange(3):
st += " {0:>7.4f}".format(poly[j, i])
st += ' ...'
for j in xrange(-3, 0):
st += " {0:>7.4f}".format(poly[j, i])
else:
st = ""
for j in xrange(int(k)):
st += " {0:>7.4f}".format(poly[j, i])
printInfo("\t{0:^5s}:{1}".format(label[i], st))
result = performRegression(poly, y, label=label, **kwargs)
result['poly'] = poly
return result
| mit | 2,013,940,946,260,258,600 | 34.227139 | 118 | 0.504103 | false |
sysadminmatmoz/OCB | addons/auth_signup/res_users.py | 25 | 14601 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
import random
from urlparse import urljoin
import werkzeug
from openerp.addons.base.ir.ir_mail_server import MailDeliveryException
from openerp.osv import osv, fields
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT, ustr
from ast import literal_eval
from openerp.tools.translate import _
from openerp.exceptions import UserError
class SignupError(Exception):
pass
def random_token():
# the token has an entropy of about 120 bits (6 bits/char * 20 chars)
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.SystemRandom().choice(chars) for i in xrange(20))
def now(**kwargs):
dt = datetime.now() + timedelta(**kwargs)
return dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
class res_partner(osv.Model):
_inherit = 'res.partner'
def _get_signup_valid(self, cr, uid, ids, name, arg, context=None):
dt = now()
res = {}
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = bool(partner.signup_token) and \
(not partner.signup_expiration or dt <= partner.signup_expiration)
return res
def _get_signup_url_for_action(self, cr, uid, ids, action=None, view_type=None, menu_id=None, res_id=None, model=None, context=None):
""" generate a signup url for the given partner ids and action, possibly overriding
the url state components (menu_id, id, view_type) """
if context is None:
context= {}
res = dict.fromkeys(ids, False)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
for partner in self.browse(cr, uid, ids, context):
# when required, make sure the partner has a valid signup token
if context.get('signup_valid') and not partner.user_ids:
self.signup_prepare(cr, uid, [partner.id], context=context)
route = 'login'
# the parameters to encode for the query
query = dict(db=cr.dbname)
signup_type = context.get('signup_force_type_in_url', partner.signup_type or '')
if signup_type:
route = 'reset_password' if signup_type == 'reset' else signup_type
if partner.signup_token and signup_type:
query['token'] = partner.signup_token
elif partner.user_ids:
query['login'] = partner.user_ids[0].login
else:
continue # no signup token, no user, thus no signup url!
fragment = dict()
base = '/web#'
if action == '/mail/view':
base = '/mail/view?'
elif action:
fragment['action'] = action
if view_type:
fragment['view_type'] = view_type
if menu_id:
fragment['menu_id'] = menu_id
if model:
fragment['model'] = model
if res_id:
fragment['res_id'] = res_id
if fragment:
query['redirect'] = base + werkzeug.url_encode(fragment)
res[partner.id] = urljoin(base_url, "/web/%s?%s" % (route, werkzeug.url_encode(query)))
return res
def _get_signup_url(self, cr, uid, ids, name, arg, context=None):
""" proxy for function field towards actual implementation """
return self._get_signup_url_for_action(cr, uid, ids, context=context)
_columns = {
'signup_token': fields.char('Signup Token', copy=False),
'signup_type': fields.char('Signup Token Type', copy=False),
'signup_expiration': fields.datetime('Signup Expiration', copy=False),
'signup_valid': fields.function(_get_signup_valid, type='boolean', string='Signup Token is Valid'),
'signup_url': fields.function(_get_signup_url, type='char', string='Signup URL'),
}
def action_signup_prepare(self, cr, uid, ids, context=None):
return self.signup_prepare(cr, uid, ids, context=context)
def signup_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'signup_token': False, 'signup_type': False, 'signup_expiration': False}, context=context)
def signup_prepare(self, cr, uid, ids, signup_type="signup", expiration=False, context=None):
""" generate a new token for the partners with the given validity, if necessary
:param expiration: the expiration datetime of the token (string, optional)
"""
for partner in self.browse(cr, uid, ids, context):
if expiration or not partner.signup_valid:
token = random_token()
while self._signup_retrieve_partner(cr, uid, token, context=context):
token = random_token()
partner.write({'signup_token': token, 'signup_type': signup_type, 'signup_expiration': expiration})
return True
def _signup_retrieve_partner(self, cr, uid, token,
check_validity=False, raise_exception=False, context=None):
""" find the partner corresponding to a token, and possibly check its validity
:param token: the token to resolve
:param check_validity: if True, also check validity
:param raise_exception: if True, raise exception instead of returning False
:return: partner (browse record) or False (if raise_exception is False)
"""
partner_ids = self.search(cr, uid, [('signup_token', '=', token)], context=context)
if not partner_ids:
if raise_exception:
raise SignupError("Signup token '%s' is not valid" % token)
return False
partner = self.browse(cr, uid, partner_ids[0], context)
if check_validity and not partner.signup_valid:
if raise_exception:
raise SignupError("Signup token '%s' is no longer valid" % token)
return False
return partner
def signup_retrieve_info(self, cr, uid, token, context=None):
""" retrieve the user info about the token
:return: a dictionary with the user information:
- 'db': the name of the database
- 'token': the token, if token is valid
- 'name': the name of the partner, if token is valid
- 'login': the user login, if the user already exists
- 'email': the partner email, if the user does not exist
"""
partner = self._signup_retrieve_partner(cr, uid, token, raise_exception=True, context=None)
res = {'db': cr.dbname}
if partner.signup_valid:
res['token'] = token
res['name'] = partner.name
if partner.user_ids:
res['login'] = partner.user_ids[0].login
else:
res['email'] = res['login'] = partner.email or ''
return res
class res_users(osv.Model):
_inherit = 'res.users'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
for user in self.browse(cr, uid, ids, context):
res[user.id] = ('active' if user.login_date else 'new')
return res
_columns = {
'state': fields.function(_get_state, string='Status', type='selection',
selection=[('new', 'Never Connected'), ('active', 'Connected')]),
}
def signup(self, cr, uid, values, token=None, context=None):
""" signup a user, to either:
- create a new user (no token), or
- create a user for a partner (with token, but no user for partner), or
- change the password of a user (with token, and existing user).
:param values: a dictionary with field values that are written on user
:param token: signup token (optional)
:return: (dbname, login, password) for the signed up user
"""
if token:
# signup with a token: find the corresponding partner id
res_partner = self.pool.get('res.partner')
partner = res_partner._signup_retrieve_partner(
cr, uid, token, check_validity=True, raise_exception=True, context=None)
# invalidate signup token
partner.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
partner_user = partner.user_ids and partner.user_ids[0] or False
# avoid overwriting existing (presumably correct) values with geolocation data
if partner.country_id or partner.zip or partner.city:
values.pop('city', None)
values.pop('country_id', None)
if partner.lang:
values.pop('lang', None)
if partner_user:
# user exists, modify it according to values
values.pop('login', None)
values.pop('name', None)
partner_user.write(values)
return (cr.dbname, partner_user.login, values.get('password'))
else:
# user does not exist: sign up invited user
values.update({
'name': partner.name,
'partner_id': partner.id,
'email': values.get('email') or values.get('login'),
})
if partner.company_id:
values['company_id'] = partner.company_id.id
values['company_ids'] = [(6, 0, [partner.company_id.id])]
self._signup_create_user(cr, uid, values, context=context)
else:
# no token, sign up an external user
values['email'] = values.get('email') or values.get('login')
self._signup_create_user(cr, uid, values, context=context)
return (cr.dbname, values.get('login'), values.get('password'))
def _signup_create_user(self, cr, uid, values, context=None):
""" create a new user from the template user """
ir_config_parameter = self.pool.get('ir.config_parameter')
template_user_id = literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.template_user_id', 'False'))
assert template_user_id and self.exists(cr, uid, template_user_id, context=context), 'Signup: invalid template user'
# check that uninvited users may sign up
if 'partner_id' not in values:
if not literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')):
raise SignupError('Signup is not allowed for uninvited users')
assert values.get('login'), "Signup: no login given for new user"
assert values.get('partner_id') or values.get('name'), "Signup: no name or partner given for new user"
# create a copy of the template user (attached to a specific partner_id if given)
values['active'] = True
context = dict(context or {}, no_reset_password=True)
try:
with cr.savepoint():
return self.copy(cr, uid, template_user_id, values, context=context)
except Exception, e:
# copy may failed if asked login is not available.
raise SignupError(ustr(e))
def reset_password(self, cr, uid, login, context=None):
""" retrieve the user corresponding to login (login or email),
and reset their password
"""
user_ids = self.search(cr, uid, [('login', '=', login)], context=context)
if not user_ids:
user_ids = self.search(cr, uid, [('email', '=', login)], context=context)
if len(user_ids) != 1:
raise Exception(_('Reset password: invalid username or email'))
return self.action_reset_password(cr, uid, user_ids, context=context)
def action_reset_password(self, cr, uid, ids, context=None):
""" create signup token for each user, and send their signup url by email """
# prepare reset password signup
if not context:
context = {}
create_mode = bool(context.get('create_user'))
res_partner = self.pool.get('res.partner')
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context)]
# no time limit for initial invitation, only for reset password
expiration = False if create_mode else now(days=+1)
res_partner.signup_prepare(cr, uid, partner_ids, signup_type="reset", expiration=expiration, context=context)
context = dict(context or {})
# send email to users with their signup url
template = False
if create_mode:
try:
# get_object() raises ValueError if record does not exist
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'set_password_email')
except ValueError:
pass
if not bool(template):
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'reset_password_email')
assert template._name == 'mail.template'
for user in self.browse(cr, uid, ids, context):
if not user.email:
raise UserError(_("Cannot send email: user %s has no email address.") % user.name)
context['lang'] = user.lang
self.pool.get('mail.template').send_mail(cr, uid, template.id, user.id, force_send=True, raise_exception=True, context=context)
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
# overridden to automatically invite user to sign up
user_id = super(res_users, self).create(cr, uid, values, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.email and not context.get('no_reset_password'):
context = dict(context, create_user=True)
try:
self.action_reset_password(cr, uid, [user.id], context=context)
except MailDeliveryException:
self.pool.get('res.partner').signup_cancel(cr, uid, [user.partner_id.id], context=context)
return user_id
def copy(self, cr, uid, id, default=None, context=None):
if not default or not default.get('email'):
# avoid sending email to the user we are duplicating
context = dict(context or {}, reset_password=False)
return super(res_users, self).copy(cr, uid, id, default=default, context=context)
| agpl-3.0 | 7,313,346,925,181,784,000 | 46.405844 | 139 | 0.598041 | false |
arsenovic/clifford | clifford/test/test_g3c_tools.py | 1 | 54096 | import random
from functools import reduce
import time
import functools
import numpy as np
import numpy.testing as npt
from numpy import exp
import pytest
import numba
from clifford import Cl
from clifford.g3c import *
from clifford import general_exp
from clifford.tools.g3c import *
from clifford.tools.g3c.rotor_parameterisation import ga_log, ga_exp, general_logarithm, \
interpolate_rotors
from clifford.tools.g3c.rotor_estimation import *
from clifford.tools.g3c.object_clustering import *
from clifford.tools.g3c.scene_simplification import *
from clifford.tools.g3c.object_fitting import *
from clifford.tools.g3c.model_matching import *
from clifford.tools.g3 import random_euc_mv
from clifford.tools.g3c.GAOnline import draw_objects, GAScene, GanjaScene
from clifford._numba_utils import DISABLE_JIT
too_slow_without_jit = pytest.mark.skipif(
DISABLE_JIT, reason="test is too slow without JIT"
)
RTOL_DEFAULT = 1E-4
ATOL_DEFAULT = 1E-6
assert_allclose = functools.partial(npt.assert_allclose, rtol=RTOL_DEFAULT, atol=ATOL_DEFAULT)
@too_slow_without_jit
class TestRotorGeneration:
def test_generate_translation_rotor(self):
for i in range(10000):
euc_vector_a = random_euc_mv()
res = generate_translation_rotor(euc_vector_a)
res2 = (1 + ninf * euc_vector_a / 2)
assert_allclose(res.value, res2.value)
@too_slow_without_jit
class TestFitObjects:
def test_fit_circle(self):
noise = 0.1
trueP = random_circle()
point_list = project_points_to_circle([random_conformal_point() for i in range(100)], trueP)
point_list = [up(down(P) + noise * random_euc_mv()) for P in point_list]
print(trueP)
circle = fit_circle(point_list)
print(circle)
# draw(point_list + [circle], static=False, scale=0.1)
def test_fit_line(self):
noise = 0.1
trueP = random_line()
point_list = project_points_to_line([random_conformal_point() for i in range(100)], trueP)
point_list = [up(down(P) + noise * random_euc_mv()) for P in point_list]
print(trueP)
line = fit_line(point_list)
print(line)
# draw(point_list + [line], static=False, scale=0.1)
def test_fit_sphere(self):
noise = 0.1
trueP = random_sphere()
point_list = project_points_to_sphere([random_conformal_point() for i in range(100)], trueP)
point_list = [up(down(P) + noise * random_euc_mv()) for P in point_list]
print(trueP)
sphere = fit_sphere(point_list)
print(sphere)
# draw([sphere] + point_list, static=False, scale=0.1)
def test_fit_plane(self):
noise = 0.1
trueP = random_plane()
point_list = project_points_to_plane([random_conformal_point() for i in range(100)], trueP)
point_list = [up(down(P) + noise * random_euc_mv()) for P in point_list]
print(trueP)
plane = fit_plane(point_list)
print(plane)
# draw(point_list + [plane], static=False, scale=0.1)
@too_slow_without_jit
class TestGeneralLogarithm:
def test_general_logarithm_rotation(self):
# Check we can reverse rotations
for i in range(50):
R = random_rotation_rotor()
biv_2 = general_logarithm(R)
biv_3 = ga_log(R)
assert_allclose(biv_2.value, biv_3.value)
def test_general_logarithm_translation(self):
# Check we can reverse translation
for i in range(50):
t = random_euc_mv()
biv = ninf * t / 2
R = general_exp(biv).normal()
biv_2 = general_logarithm(R)
assert_allclose(biv.value, biv_2.value)
def test_general_logarithm_scaling(self):
# Check we can reverse scaling
for i in range(50):
scale = 0.5 + np.random.rand()
biv = -np.log(scale) * e45 / 2
R = general_exp(biv).normal()
biv_2 = general_logarithm(R)
assert_allclose(biv.value, biv_2.value)
def test_general_logarithm_RS(self):
for i in range(5):
scale = 0.5 + np.random.rand()
S = generate_dilation_rotor(scale).normal()
R = generate_rotation_rotor(0.5, e1, e2).normal()
V = (R * S).normal()
biv_test = general_logarithm(R) + general_logarithm(S)
biv = general_logarithm(V)
biv_alt = ga_log(R) + general_logarithm(S)
assert_allclose(biv.value, biv_test.value)
assert_allclose(biv.value, biv_alt.value)
def test_general_logarithm_TR(self):
for i in range(5):
# R = generate_rotation_rotor(0.5, e1, e2).normal()
# T = generate_translation_rotor(e3 + 7 * e2 - e1).normal()
# V = (T*R).normal()
biv_true = random_bivector()
V = general_exp(biv_true).normal()
biv = general_logarithm(V)
V_rebuilt = (general_exp(biv)).normal()
C1 = random_point_pair()
C2 = (V * C1 * ~V).normal()
C3 = (V_rebuilt * C1 * ~V_rebuilt).normal()
assert_allclose(C2.value, C3.value)
def test_general_logarithm_TS(self):
for i in range(5):
scale = 0.5 + np.random.rand()
t = random_euc_mv()
S = generate_dilation_rotor(scale)
T = generate_translation_rotor(t)
V = (T * S).normal()
biv = general_logarithm(V)
V_rebuilt = (general_exp(biv)).normal()
C1 = random_point_pair()
C2 = (V * C1 * ~V).normal()
C3 = (V_rebuilt * C1 * ~V_rebuilt).normal()
assert_allclose(C2.value, C3.value)
def test_general_logarithm_TRS(self):
for i in range(5):
scale = 0.5 + np.random.rand()
S = generate_dilation_rotor(scale)
R = generate_rotation_rotor(0.5, e1, e2)
T = generate_translation_rotor(e3 + 7 * e2 - e1)
V = (T * R * S).normal()
biv = general_logarithm(V)
V_rebuilt = general_exp(biv).normal()
biv2 = general_logarithm(V)
C1 = random_point_pair()
C2 = (V * C1 * ~V).normal()
C3 = (V_rebuilt * C1 * ~V_rebuilt).normal()
assert_allclose(C2.value, C3.value)
@pytest.mark.parametrize('obj_gen', [
random_point_pair, random_line, random_circle, random_plane
])
def test_general_logarithm_conformal(self, obj_gen):
for i in range(1000):
X = obj_gen()
Y = obj_gen()
R = rotor_between_objects(X, Y)
biv = general_logarithm(R)
R_recon = general_exp(biv).normal()
assert_allclose(R.value, R_recon.value)
class TestVisualisation:
def test_draw_objects(self):
scene = ConformalMVArray([random_line() for i in range(100)])
sc_a = str(draw_objects(scene))
scene.save('test.ga')
sc_b = str(draw_objects('test.ga'))
assert sc_a == sc_b
def test_ganja_scene(self):
scene = ConformalMVArray([up(0)^up(e1)^einf, up(0)^up(e2)^einf, up(0)^up(e3)^einf]
+ [random_line() for i in range(2)])
sc = GanjaScene()
sc.add_objects(scene)
sc.save_to_file('test.json')
class TestConformalArray:
def test_up_down(self):
mv = []
up_mv = []
for i in range(100):
p = random_euc_mv()
mv.append(p)
up_mv.append(up(p))
test_array = ConformalMVArray(mv)
up_array = test_array.up()
down_array = up_array.down()
for a, b in zip(up_array, up_mv):
assert_allclose(a.value, b.value)
for a, b in zip(down_array, mv):
assert_allclose(a.value, b.value)
@too_slow_without_jit
def test_apply_rotor(self):
mv = []
for i in range(100):
p = random_euc_mv()
mv.append(p)
test_array = ConformalMVArray(mv)
up_array = test_array.up()
# Test apply rotor
for i in range(100):
R = ConformalMVArray([layout.randomRotor()])
rotated_array = up_array.apply_rotor(R)
for i, v in enumerate(rotated_array):
res = apply_rotor(up_array[i], R[0]).value
assert_allclose(v.value, res)
def test_dual(self):
mv = []
for i in range(100):
p = random_euc_mv()
mv.append(p)
test_array = ConformalMVArray(mv)
up_array = test_array.up()
I5 = layout.blades['e12345']
assert_allclose(
(up_array * ConformalMVArray([I5])).value,
ConformalMVArray([i * I5 for i in up_array]).value)
def test_from_value_array(self):
mv = []
for i in range(100):
p = random_euc_mv()
mv.append(p)
test_array = ConformalMVArray(mv)
up_array = test_array.up()
new_mv_array = ConformalMVArray.from_value_array(up_array.value)
npt.assert_almost_equal(new_mv_array.value, up_array.value)
@too_slow_without_jit
class TestG3CTools:
@pytest.fixture(params=[
random_point_pair,
random_line,
random_circle,
random_plane,
random_sphere
])
def obj_gen(self, request):
return request.param
def test_factorise(self, obj_gen):
n_repeats = 50
for i in range(n_repeats):
X1 = obj_gen()
basis, scale = X1.factorise()
for b in basis:
gpres = b.grades(eps=0.0001)
assert gpres == {1}
new_blade = (reduce(lambda a, b: a ^ b, basis) * scale)
try:
assert_allclose(new_blade.value, X1.value)
except AssertionError:
print(X1)
print(new_blade)
raise
def test_is_blade(self):
a = random_bivector() + random_circle()
assert not a.isBlade()
a = random_translation_rotor()
assert not a.isBlade()
def test_is_blade_generated(self, obj_gen):
n_repeats = 5
for i in range(n_repeats):
a = obj_gen()
assert a.isBlade()
def test_average_objects(self, obj_gen):
n_repeats = 1000
for i in range(n_repeats):
X1 = obj_gen()
X2 = obj_gen()
obj_list = [X1, X2]
average_objects(obj_list, weights=[0.5, 0.5])
def test_point_beyond_plane(self):
for i in range(200):
normal = random_euc_mv().normal()
euc_perp_dist = np.random.randn()*3
plane = I5 * (normal + euc_perp_dist * einf)
P1 = up(normal * (euc_perp_dist+1))
assert point_beyond_plane(P1, plane)
P2 = up(normal * (euc_perp_dist-1))
assert not point_beyond_plane(P2, plane)
def test_unsign_sphere(self):
for i in range(100):
S = unsign_sphere(random_sphere())
r = np.random.randn()
assert_allclose(unsign_sphere(S*r).value, S.value)
def test_sphere_line_intersect(self):
for i in range(100):
S = random_sphere()
L = ((S*einf*S)^random_conformal_point()^einf).normal()
assert sphere_line_intersect(S, L)
def test_sphere_beyond_behind_plane(self):
for i in range(100):
normal = random_euc_mv().normal()
euc_perp_dist = np.random.randn() * 3
plane = I5 * (normal + euc_perp_dist * einf)
radius = abs(np.random.randn() * 2)
sphere1 = I5*(up(normal * (euc_perp_dist + radius*1.1)) - 0.5*radius**2*einf)
assert sphere_beyond_plane(sphere1, plane)
assert not sphere_behind_plane(sphere1, plane)
sphere2 = I5*(up(normal * (euc_perp_dist - radius*1.1)) - 0.5*radius**2*einf)
assert not sphere_beyond_plane(sphere2, plane)
assert sphere_behind_plane(sphere2, plane)
sphere3 = I5*(up(normal * (euc_perp_dist - radius*0.5)) - 0.5*radius**2*einf)
assert not sphere_beyond_plane(sphere3, plane)
assert not sphere_behind_plane(sphere3, plane)
def test_join_spheres(self):
for j in range(1000):
s1 = random_sphere()
s2 = random_sphere()
s3 = join_spheres(s1, s2)
assert sphere_in_sphere(s1, s3)
assert sphere_in_sphere(s2, s3)
def test_enclosing_spheres(self):
n_spheres = 10
for j in range(1000):
spheres = [random_sphere() for i in range(n_spheres)]
s4 = normalised(enclosing_sphere(spheres))
for s in spheres:
assert sphere_in_sphere(s, s4)
def test_closest_furthest_circle_points(self):
"""
This just checks if the function calls do not crash at the moment
Not that it is correct
"""
for _ in range(100):
C1 = random_circle()
C2 = random_circle()
pclose = iterative_closest_points_on_circles(C1, C2)
pfar = iterative_furthest_points_on_circles(C1, C2)
def test_closest_points_circle_line(self):
"""
This checks that the functions do not fail
It also checks that the points produced do lie on the circle and line
It does not as of yet check that they actually produce the minimum distance
"""
for i in range(10):
L = random_line()
C = random_circle()
X1, X2 = iterative_closest_points_circle_line(C, L, niterations=50)
X1Andreas = closest_point_on_circle_from_line(C, L)
X2Andreas = closest_point_on_line_from_circle(C, L)
assert_allclose((X1 ^ C).value, 0)
assert_allclose((X1Andreas ^ C).value, 0)
assert_allclose((X2 ^ L).value, 0)
assert_allclose((X2Andreas ^ L).value, 0)
def test_closest_points_circle_line_corner_cases(self):
# Choose explicit cases to ensure test coverage
# The line and plane of the circle are parallel
# line is not in the plane and the projection does meet the circle
L = (up(e3)^up(e1+e3)^einf).normal()
C = (up(e1)^up(e2)^up(-e1)).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert (X == up(e1) or X == up(-e1))
# The line and plane of the circle are parallel
# line is not in the plane and the projection does not meet the circle
L = (up(e3 + 5*e2) ^ up(e1 + e3 + 5*e2) ^ einf).normal()
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert X == up(e2)
# Line passes through the centre of the circle and is
# perpendicular to the circle
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
L = (up(0) ^ up(e3) ^ einf).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
# Line passes through the circle and is perpendicular to the circle
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
L = (up(0.5*e2) ^ up(e3 + 0.5*e2) ^ einf).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert X == up(e2)
# Line passes through the centre of the circle and is not
# perpendicular to the circle
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
L = (up(0) ^ up(e3 + 0.1 * e2) ^ einf).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert (X == up(e2) or X == up(-e2))
# Line passes through the circle and is not
# perpendicular to the circle
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
L = (up(0.1 * e2) ^ up(e3 + 0.2 * e2) ^ einf).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert (X == up(e2) or X == up(-e2))
# Line passes outside the circle and is not
# perpendicular to the circle
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
L = (up(5 * e1) ^ up(e3 + 5 * e1 + e2) ^ einf).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert X == up(e1)
def test_get_line_reflection_matrix(self):
for i in range(10):
lines = [random_line() for i in range(10)]
point = random_conformal_point()
Lmat = get_line_reflection_matrix(lines, 1)
res = layout.MultiVector([email protected])
new_point = 0
for l in lines:
new_point += l*point*l
new_point = new_point/len(lines)
assert_allclose(new_point.value, res.value)
def test_get_truncated_line_reflection_matrix(self):
for i in range(10):
lines = [random_line() for i in range(10)]
Lmat = get_line_reflection_matrix(lines, 1)
Lmat_trunc = val_truncated_get_line_reflection_matrix(np.array([l.value for l in lines]), 1)
assert_allclose(Lmat_trunc, Lmat[1:6, 1:6])
def test_get_midpoint_between_lines(self):
for i in range(50):
P = random_conformal_point()
T1 = random_translation_rotor()
T2 = random_translation_rotor()
P1 = apply_rotor(P, T1)
P2 = apply_rotor(P, T2)
L1 = (P ^ P1 ^ einf).normal()
L2 = (P ^ P2 ^ einf).normal()
Pmid = midpoint_between_lines(L1, L2)
assert_allclose(Pmid.value, P.value)
for i in range(50):
L1 = random_line()
L2 = random_line()
Pmid = midpoint_between_lines(L1, L2)
L1point = project_points_to_line([Pmid], L1)[0]
L2point = project_points_to_line([Pmid], L2)[0]
dst = euc_dist(L1point, L2point)
middst1 = euc_dist(Pmid, L1point)
middst2 = euc_dist(Pmid, L2point)
npt.assert_allclose(dst, 2 * middst1)
npt.assert_allclose(dst, 2 * middst2)
def test_get_nearest_plane_point(self):
for i in range(100):
plane = random_plane()
pnt = get_nearest_plane_point(plane)
s2 = eo + normalise_n_minus_1((plane*eo*plane)(1))
pnt2 = normalise_n_minus_1((s2*einf*s2)(1))
assert_allclose(pnt.value, pnt2.value)
def test_general_object_interpolation(self):
R_r = generate_rotation_rotor(np.pi / 16, e2, e3) * generate_rotation_rotor(np.pi / 4, e1, e2)
R_d = generate_dilation_rotor(1.5)
R_t = generate_translation_rotor(e3)
R = (R_t * R_r * R_d).normal()
# C1 = (up(0+3*e1)^up(2*e1+3*e1)).normal()
C1 = (up(0 + 3 * e1) ^ up(2 * e1 + 3 * e1) ^ up(e1 + e3 + 3 * e1)).normal()
C2 = (R * C1 * ~R).normal()(3)
C3 = (R * C2 * ~R).normal()(3)
C4 = (R * C3 * ~R).normal()(3)
C5 = (R * C4 * ~R).normal()(3)
object_list = [C1, C2, C3, C4, C5]
object_alpha_array = np.array([0.0, 0.25, 0.5, 0.75, 1.0])
new_alpha_array = np.linspace(0.0, 1.0)
new_object_list = general_object_interpolation(object_alpha_array, object_list, new_alpha_array,
kind='quadratic')
new_object_list = [o(3) for o in new_object_list]
draw_objects(object_list, 'circle', color='rgb(255,0,0)')
draw_objects(new_object_list, 'circle', color='rgb(0,255,0)')
time.sleep(1)
def test_n_th_root(self):
for i in range(200):
a = random_point_pair()
b = random_point_pair()
R = rotor_between_objects(a, b)
for n in [1, 2, 4, 8, 16, 32]:
R_n = n_th_rotor_root(R, n)
assert_allclose((R_n ** n).value, R.value)
def test_random_point_pair_at_origin(self):
pp_list = [random_point_pair_at_origin() for i in range(10)]
sc = GAScene()
for pp in pp_list:
sc.add_point_pair(pp)
print(sc)
def test_random_line_at_origin(self):
pp_list = [random_line_at_origin() for i in range(10)]
sc = GAScene()
for pp in pp_list:
sc.add_line(pp)
print(sc)
def test_random_circle_at_origin(self):
pp_list = [random_circle_at_origin() for i in range(10)]
sc = GAScene()
for pp in pp_list:
sc.add_circle(pp)
print(sc)
def test_random_sphere_at_origin(self):
pp_list = [random_sphere_at_origin() for i in range(10)]
sc = GAScene()
for pp in pp_list:
sc.add_sphere(pp)
print(sc)
def test_random_plane_at_origin(self):
pp_list = [random_plane_at_origin() for i in range(10)]
sc = GAScene()
for pp in pp_list:
sc.add_plane(pp)
print(sc)
def test_generate_translation_rotor(self):
""" Tests translation rotor generation """
for i in range(100):
rand = random_euc_mv()
starting_point = up(random_euc_mv())
r_trans = generate_translation_rotor(rand)
end_point = r_trans * starting_point * ~r_trans
translation_vec = down(end_point) - down(starting_point)
assert_allclose(translation_vec.value, rand.value)
def test_intersect_line_and_plane_to_point(self):
""" Intersection of a line and a plane """
# First the case that they intersect
line = (up(2*e1) ^ up(2*e1 + e3) ^ ninf).normal()
plane = (up(e3) ^ up(e3 + e1) ^ up(e3 + e2) ^ ninf).normal()
point_result = intersect_line_and_plane_to_point(line, plane)
npt.assert_almost_equal(point_result.value, up(e3 + 2*e1).value)
# Next the case that the do not intersect
line = (up(0) ^ up(e1) ^ ninf).normal()
point_result = intersect_line_and_plane_to_point(line, plane)
assert point_result is None
for i in range(200):
line = random_line()
plane = random_plane()
point_result = intersect_line_and_plane_to_point(line, plane)
# draw_objects([line], mv_type='line')
# draw_objects([plane], mv_type='plane', color='rgb(0,255,0)')
# draw_objects([point_result], mv_type='euc_point', color='rgb(255,0,0)')
def test_normalise_n_minus_1(self):
for i in range(500):
mv = np.random.rand() * random_conformal_point()
mv_normed = normalise_n_minus_1(mv)
npt.assert_almost_equal((mv_normed | ninf)[()], -1.0)
def test_get_properties_of_sphere(self):
for i in range(100):
# Make a sphere
scale_factor = np.random.rand()
sphere = (up(scale_factor * e1) ^ up(-scale_factor * e1) ^ up(scale_factor * e3) ^ up(
scale_factor * e2)).normal()
# Translate it
rand_trans = random_euc_mv()
trans_rot = generate_translation_rotor(rand_trans)
sphere = (trans_rot * sphere * ~trans_rot).normal()
center = get_center_from_sphere(sphere)
radius = get_radius_from_sphere(sphere)
assert_allclose(down(center).value, rand_trans.value)
npt.assert_almost_equal(radius, scale_factor)
def test_point_pair_to_end_points(self):
for i in range(100):
point_a = random_conformal_point()
point_b = random_conformal_point()
pp = (point_a ^ point_b).normal()
p_a, p_b = point_pair_to_end_points(pp)
assert_allclose(p_a.value, point_a.value)
assert_allclose(p_b.value, point_b.value)
def test_euc_distance(self):
for i in range(100):
point_a = random_conformal_point()
point_b = random_conformal_point()
dist = euc_dist(point_a, point_b)
dist_alt = float(abs(down(point_a) - down(point_b)))
assert_allclose(dist, dist_alt)
def test_dilation_rotor(self):
for i in range(100):
scale = 2 * np.random.rand()
r = generate_dilation_rotor(scale)
sphere = random_sphere()
radius = get_radius_from_sphere(sphere)
sphere2 = (r * sphere * ~r).normal()
radius2 = get_radius_from_sphere(sphere2)
npt.assert_almost_equal(scale, radius2 / radius)
def test_calculate_S_over_mu_general(self, obj_gen):
# Repeats for each fuzz test
n_repeats = 100
# Test the general case
for i in range(n_repeats):
X1 = obj_gen()
X2 = obj_gen()
S = calculate_S_over_mu(X1, X2)
X3 = -S*(X1 + X2)
X4 = average_objects([X1, X2], [0.5, 0.5]).normal()
if sum(np.abs((X3 + X4).value)) < 0.000001:
print(' SIGN FLIP')
X4 = -X4
try:
assert_allclose(X3.value, X4.value)
except AssertionError:
print(X3)
print(X4)
raise
def test_general_rotor_between_objects(self, obj_gen):
# Repeats for each fuzz test
n_repeats = 1000
# Test the general case
for i in range(n_repeats):
C1 = obj_gen()
C2 = obj_gen()
R = rotor_between_objects(C1, C2)
C3 = (R * C1 * ~R).normal()
if sum(np.abs((C2 + C3).value)) < 0.0001:
print('SIGN FLIP ', obj_gen.__name__)
C3 = -C3
try:
assert_allclose(C2.value, C3.value)
except AssertionError:
print(R)
print(C2*C1 + C1*C2)
raise
@pytest.mark.parametrize(('obj_gen', 'grd'), [
(random_point_pair, 2),
(random_circle, 3),
pytest.param(random_sphere, 4, marks=[
# gh-104
pytest.mark.xfail(reason="Non-deterministic, sometimes fails", strict=False),
])
])
def test_motor_between_rounds(self, obj_gen, grd):
# Repeats for each fuzz test
n_repeats = 1000
# Test the general case
for i in range(n_repeats):
C1 = obj_gen()
Rt = random_rotation_translation_rotor()
C2 = (Rt * C1 * ~Rt)(grd).normal()
R = motor_between_rounds(C1, C2)
C3 = (R * C1 * ~R)(grd).normal()
if sum(np.abs((C2 + C3).value)) < 0.0001:
print('SIGN FLIP ', obj_gen.__name__)
C3 = -C3
try:
assert_allclose(C2.value, C3.value)
except AssertionError:
print(C2.normal())
print(C3.normal())
raise
# @pytest.mark.skip(reason="unknown") # Skip this because we know that it is a breaking case
def test_general_rotor_between_objects_specific_cases(self):
C1 = -(2.48651^e1234) - (2.48651^e1235) - (1.0^e1245) + (3e-05^e1345) - (0.0^e2345)
C2 = -(25.8135^e1234) - (25.8135^e1235) + (1.0^e1245) - (3e-05^e1345) - (0.0^e2345)
R = rotor_between_objects(C1, C2)
C3 = (R * C1 * ~R).normal()
if sum(np.abs((C2 + C3).value)) < 0.0001:
C3 = -C3
assert_allclose(C2.value, C3.value)
# @pytest.mark.skip(reason="unknown") # Skip this because we know that it is a breaking case
def test_rotor_between_non_overlapping_spheres(self):
C1 = random_sphere()
rad = get_radius_from_sphere(C1)
t_r = generate_translation_rotor(2.5*rad*e1)
C2 = (t_r * C1 * ~t_r)(4).normal()
rad2 = get_radius_from_sphere(C2)
R = rotor_between_objects(C1, C2)
C3 = (R * C1 * ~R).normal()
if sum(np.abs((C2 + C3).value)) < 0.0001:
print('SIGN FLIP ')
C3 = -C3
assert_allclose(C2.value, C3.value)
@too_slow_without_jit
class TestRotorEstimation:
def run_rotor_estimation(self, object_generator, estimation_function,
n_runs=20, n_objects_per_run=10):
error_count = 0
for i in range(n_runs):
query_model = [object_generator().normal() for i in range(n_objects_per_run)]
r = (generate_translation_rotor(random_euc_mv(l_max=0.01)) * generate_rotation_rotor(np.random.randn() / 10,
random_euc_mv().normal(),
random_euc_mv().normal())).normal()
reference_model = [(r * l * ~r).normal() for l in query_model]
r_est = estimation_function(reference_model, query_model)
error_flag = False
for a, b in zip([(r_est * l * ~r_est).normal() for l in query_model], reference_model):
if abs(a + b) < 0.0001:
c = -b
print('SIGN FLIP')
else:
c = b
if np.any(np.abs(a.value - c.value) > 0.01):
error_flag = True
if error_flag:
error_count += 1
print(i, error_count)
print('\n\nESTIMATION SUMMARY')
print('OBJECTS ', n_objects_per_run)
print('RUNS ', n_runs)
print('ERRORS ', error_count)
print('ERROR percentage ', 100 * error_count / float(n_runs), '%')
def test_de_keninck_twist(self):
X = MVArray([random_conformal_point() for i in range(100)])
R = random_rotation_rotor()
noise_std = 0.0
Y = MVArray([normalise_n_minus_1(apply_rotor(x, random_translation_rotor(noise_std) * R)) for x in X])
res = de_keninck_twist(Y, X)
try:
assert_allclose(R.value, res.value)
except AssertionError:
assert_allclose(R.value, -res.value)
def test_direct_TRS_extraction(self):
X = MVArray([random_conformal_point() for i in range(100)])
R = (random_rotation_translation_rotor(maximum_translation=100) * generate_dilation_rotor(
0.5 + 2 * np.random.rand())).normal()
noise_std = 0.0
Y = MVArray([normalise_n_minus_1(apply_rotor(x, random_translation_rotor(noise_std) * R)) for x in X])
res = direct_TRS_extraction(Y, X)
try:
assert_allclose(R.value, res.value)
except AssertionError:
assert_allclose(R.value, -res.value)
def test_dorst_motor_points(self):
X = MVArray([random_conformal_point() for i in range(100)])
R = random_rotation_translation_rotor(maximum_translation=100)
noise_std = 0.0
Y = MVArray([normalise_n_minus_1(apply_rotor(x, random_translation_rotor(noise_std) * R)) for x in X])
res = dorst_motor_estimate(Y, X)
try:
assert_allclose(R.value, res.value)
except AssertionError:
assert_allclose(R.value, -res.value)
@pytest.mark.parametrize('obj_gen', [
random_line,
random_circle,
random_point_pair,
random_plane,
random_sphere,
])
def test_dorst_motor_estimate(self, obj_gen):
self.run_rotor_estimation(obj_gen, dorst_motor_estimate)
def test_estimate_rotor_lines_average_then_opt(self):
def estimation_func(pp_list_a, pp_list_b):
r_start = average_estimator(pp_list_a, pp_list_b)
query_start = [apply_rotor(b, r_start)(3).normal() for b in pp_list_b]
r_est, costs = estimate_rotor_objects(pp_list_a, query_start)
return (r_est*r_start).normal()
self.run_rotor_estimation(random_line, estimation_func)
@pytest.mark.parametrize('obj_gen', [
random_line,
random_circle,
random_point_pair,
random_plane,
pytest.param(random_sphere, marks=pytest.mark.skip(reason="unknown")),
])
def test_estimate_motor_optimisation(self, obj_gen):
def estimation_func(pp_list_a, pp_list_b):
r_est, costs = estimate_rotor_objects(pp_list_a, pp_list_b, motor=True)
return r_est
self.run_rotor_estimation(obj_gen, estimation_func)
@pytest.mark.parametrize('obj_gen', [
random_line,
random_circle,
random_point_pair,
random_plane,
random_sphere,
])
def test_estimate_rotor_optimisation(self, obj_gen):
def estimation_func(pp_list_a, pp_list_b):
r_est, costs = estimate_rotor_objects(pp_list_a, pp_list_b)
return r_est
self.run_rotor_estimation(obj_gen, estimation_func)
@pytest.mark.parametrize('obj_gen', [
random_line,
random_circle,
pytest.param(random_point_pair, marks=pytest.mark.skip(reason="unknown")),
random_plane,
random_sphere
])
def test_estimate_rotor_sequential(self, obj_gen):
def estimation_func(pp_list_a, pp_list_b):
r_est, exit_flag = sequential_object_rotor_estimation(pp_list_a, pp_list_b)
print(exit_flag)
return r_est
self.run_rotor_estimation(obj_gen, estimation_func)
@pytest.mark.skip(reason="unknown")
def test_estimate_rotor_circles_sequential_then_opt(self):
def estimation_func(pp_list_a, pp_list_b):
r_est_1, exit_flag = sequential_object_rotor_estimation(pp_list_a, pp_list_b)
r_est_2 = 1.0
if exit_flag == 1:
object_set_a = [apply_rotor(l, r_est_1).normal() for l in pp_list_a]
r_est_2, costs = estimate_rotor_objects(object_set_a, pp_list_b)
return r_est_2 * r_est_1
self.run_rotor_estimation(random_circle, estimation_func)
@too_slow_without_jit
class TestSceneSimplification:
def test_simplify_recursive(self):
object_generator = random_line
n_clusters = 3
n_objects_per_cluster = 5
threshold = 0.5
all_objects, object_clusters = generate_n_clusters(object_generator,
n_clusters,
n_objects_per_cluster)
all_object_copy = [o for o in all_objects]
all_object_copy = simplify_scene_recursive(all_object_copy, threshold)
print(n_clusters)
# assert len(all_object_copy) == n_clusters
def test_simplify_scene(self):
object_generator = random_line
n_clusters = 3
n_objects_per_cluster = 5
threshold = 2.0
all_objects, object_clusters = generate_n_clusters(object_generator,
n_clusters,
n_objects_per_cluster)
all_object_copy1 = [o for o in all_objects]
all_object_copy1 = simplify_scene(all_object_copy1, threshold)
print(len(all_object_copy1))
# assert len(all_object_copy) == n_clusters
all_object_copy2 = [o for o in all_objects]
all_object_copy2 = simplify_scene(all_object_copy2, threshold)
print(len(all_object_copy2))
draw_objects(all_object_copy1)
draw_objects(all_object_copy2, color='rgb(255,0,0)')
@too_slow_without_jit
class TestObjectClustering:
def run_n_clusters(self, object_generator, n_clusters, n_objects_per_cluster, n_shotgunning):
all_objects, object_clusters = generate_n_clusters(object_generator, n_clusters, n_objects_per_cluster)
[new_labels, centroids, start_labels, start_centroids] = n_clusters_objects(n_clusters, all_objects,
initial_centroids=None,
n_shotgunning=n_shotgunning,
averaging_method='unweighted')
return all_objects, new_labels, centroids
def test_clustering_point_pairs(self):
object_generator = random_point_pair
n_clusters = 3
n_objects_per_cluster = 10
n_shotgunning = 60
all_objects, labels, centroids = self.run_n_clusters(object_generator, n_clusters,
n_objects_per_cluster, n_shotgunning)
sc = visualise_n_clusters(all_objects, centroids, labels, object_type='point_pair',
color_1=np.array([255, 0, 0]), color_2=np.array([0, 255, 0]))
print(sc)
def test_clustering_lines(self):
object_generator = random_line
n_clusters = 3
n_objects_per_cluster = 10
n_shotgunning = 60
all_objects, labels, centroids = self.run_n_clusters(object_generator, n_clusters,
n_objects_per_cluster, n_shotgunning)
sc = visualise_n_clusters(all_objects, centroids, labels, object_type='line',
color_1=np.array([255, 0, 0]), color_2=np.array([0, 255, 0]))
print(sc)
def test_clustering_circles(self):
object_generator = random_circle
n_clusters = 3
n_objects_per_cluster = 10
n_shotgunning = 60
all_objects, labels, centroids = self.run_n_clusters(object_generator, n_clusters,
n_objects_per_cluster, n_shotgunning)
sc = visualise_n_clusters(all_objects, centroids, labels, object_type='circle',
color_1=np.array([255, 0, 0]), color_2=np.array([0, 255, 0]))
print(sc)
def test_clustering_spheres(self):
object_generator = random_sphere
n_clusters = 3
n_objects_per_cluster = 10
n_shotgunning = 60
all_objects, labels, centroids = self.run_n_clusters(object_generator, n_clusters,
n_objects_per_cluster, n_shotgunning)
sc = visualise_n_clusters(all_objects, centroids, labels, object_type='sphere',
color_1=np.array([255, 0, 0]), color_2=np.array([0, 255, 0]))
print(sc)
def test_clustering_planes(self):
object_generator = random_plane
n_clusters = 3
n_objects_per_cluster = 10
n_shotgunning = 60
all_objects, labels, centroids = self.run_n_clusters(object_generator, n_clusters,
n_objects_per_cluster, n_shotgunning)
sc = visualise_n_clusters(all_objects, centroids, labels, object_type='plane',
color_1=np.array([255, 0, 0]), color_2=np.array([0, 255, 0]))
print(sc)
@pytest.mark.parametrize('obj_gen', [
random_point_pair,
random_line,
random_circle,
random_plane,
random_sphere
])
def test_assign_objects_to_objects(self, obj_gen):
n_repeats = 5
for i in range(n_repeats):
object_set_a = [obj_gen() for i in range(20)]
object_set_b = [l for l in object_set_a]
label_a, costs_a = assign_measurements_to_objects_matrix(object_set_a, object_set_b)
npt.assert_equal(label_a, np.array(range(len(label_a))))
n_repeats = 5
for i in range(n_repeats):
r = random_rotation_translation_rotor(0.001, np.pi / 32)
object_set_a = [obj_gen() for i in range(20)]
object_set_b = [l for l in object_set_a]
label_a, costs_a = assign_measurements_to_objects_matrix(object_set_a, object_set_b)
npt.assert_equal(label_a, np.array(range(len(label_a))))
@too_slow_without_jit
class TestModelMatching:
@pytest.mark.veryslow
def test_fingerprint_match(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
sum_p = 0
n_runs = 20
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=10, maximum_angle=np.pi / 2).normal()
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, min_costs = match_by_fingerprint(target, cluster_objects)
pcorrect = 100*np.sum([l == i for i, l in enumerate(labels)])/n_objects_per_cluster
sum_p += pcorrect
print('Percent correct: ', pcorrect)
print('av_p_correct ', sum_p/n_runs)
print('\n', flush=True)
def test_iterative_model_match_line_optimised(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_iterations = 30
n_runs = 5
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = iterative_model_match(target, cluster_objects, n_iterations, object_type='lines')
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
def test_iterative_model_match(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_iterations = 30
n_runs = 5
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8).normal()
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = iterative_model_match(target, cluster_objects, n_iterations, object_type='generic')
r_est = r_est.normal()
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_iterative_model_match_cuda(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_runs = 5
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = iterative_model_match(target, cluster_objects, 30,
object_type='generic', cuda=True)
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
def test_iterative_model_match_sequential(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_runs = 5
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = iterative_model_match_sequential(target, cluster_objects, 30, object_type='generic')
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_iterative_model_match_sequential_cuda(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_runs = 5
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = iterative_model_match_sequential(target, cluster_objects, 30,
object_type='generic', cuda=True)
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_REFORM(self):
object_generator = random_line
n_objects_per_cluster = 20
objects_per_sample = 10
iterations = 30
pool_size = 8
n_samples = 8
error_count = 0
n_runs = 10
for i in range(n_runs):
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = REFORM(target, cluster_objects, n_samples, objects_per_sample,
iterations, covergence_threshold=0.00000001, pool_size=pool_size)
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_REFORM_sequential(self):
object_generator = random_line
n_objects_per_cluster = 20
objects_per_sample = 10
iterations = 30
pool_size = 8
n_samples = 8
error_count = 0
n_runs = 10
for i in range(n_runs):
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = REFORM_sequential(target, cluster_objects, n_samples, objects_per_sample,
iterations, covergence_threshold=0.00000001, pool_size=pool_size)
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_REFORM_line_optimised(self):
object_generator = random_line
n_objects_per_cluster = 20
objects_per_sample = 5
iterations = 30
pool_size = 8
n_samples = pool_size
error_count = 0
n_runs = 10
for i in range(n_runs):
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = REFORM(target, cluster_objects, n_samples, objects_per_sample,
iterations, covergence_threshold=0.00000001, pool_size=pool_size,
object_type='lines')
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_iterative_model_match_incomplete_query(self):
# Set the generator
object_generator = random_line
n_objects_per_cluster = 100
n_keep = 50
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_runs = 10
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
# Keep only a fixed number of the query model objects
sample_indices = random.sample(range(n_objects_per_cluster), n_keep)
query_model = [cluster_objects[i] for i in sample_indices]
labels, costs, r_est = iterative_model_match(target, query_model, 30, object_type='generic')
try:
assert np.sum(labels == sample_indices) == n_keep
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_REFORM_incomplete_query(self):
object_generator = random_line
n_objects_per_cluster = 100
n_keep = 50
objects_per_sample = 10
iterations = 30
pool_size = 8
n_samples = 8
error_count = 0
n_runs = 10
for i in range(n_runs):
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
# Keep only a fixed number of the query model objects
sample_indices = random.sample(range(n_objects_per_cluster), n_keep)
query_model = [cluster_objects[i] for i in sample_indices]
labels, costs, r_est = REFORM_sequential(target, query_model, n_samples, objects_per_sample,
iterations, covergence_threshold=0.00000001, pool_size=pool_size)
try:
assert np.sum(labels == sample_indices) == n_keep
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
| bsd-3-clause | 4,710,371,365,385,834,000 | 38.399854 | 132 | 0.551723 | false |
wummel/linkchecker | third_party/dnspython/tests/set.py | 9 | 5255 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
import dns.set
# for convenience
S = dns.set.Set
class SimpleSetTestCase(unittest.TestCase):
def testLen1(self):
s1 = S()
self.failUnless(len(s1) == 0)
def testLen2(self):
s1 = S([1, 2, 3])
self.failUnless(len(s1) == 3)
def testLen3(self):
s1 = S([1, 2, 3, 3, 3])
self.failUnless(len(s1) == 3)
def testUnion1(self):
s1 = S([1, 2, 3])
s2 = S([1, 2, 3])
e = S([1, 2, 3])
self.failUnless(s1 | s2 == e)
def testUnion2(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([1, 2, 3])
self.failUnless(s1 | s2 == e)
def testUnion3(self):
s1 = S([1, 2, 3])
s2 = S([3, 4])
e = S([1, 2, 3, 4])
self.failUnless(s1 | s2 == e)
def testIntersection1(self):
s1 = S([1, 2, 3])
s2 = S([1, 2, 3])
e = S([1, 2, 3])
self.failUnless(s1 & s2 == e)
def testIntersection2(self):
s1 = S([0, 1, 2, 3])
s2 = S([1, 2, 3, 4])
e = S([1, 2, 3])
self.failUnless(s1 & s2 == e)
def testIntersection3(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([])
self.failUnless(s1 & s2 == e)
def testIntersection4(self):
s1 = S([1, 2, 3])
s2 = S([5, 4])
e = S([])
self.failUnless(s1 & s2 == e)
def testDifference1(self):
s1 = S([1, 2, 3])
s2 = S([5, 4])
e = S([1, 2, 3])
self.failUnless(s1 - s2 == e)
def testDifference2(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([1, 2, 3])
self.failUnless(s1 - s2 == e)
def testDifference3(self):
s1 = S([1, 2, 3])
s2 = S([3, 2])
e = S([1])
self.failUnless(s1 - s2 == e)
def testDifference4(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
e = S([])
self.failUnless(s1 - s2 == e)
def testSubset1(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
self.failUnless(s1.issubset(s2))
def testSubset2(self):
s1 = S([1, 2, 3])
self.failUnless(s1.issubset(s1))
def testSubset3(self):
s1 = S([])
s2 = S([1, 2, 3])
self.failUnless(s1.issubset(s2))
def testSubset4(self):
s1 = S([1])
s2 = S([1, 2, 3])
self.failUnless(s1.issubset(s2))
def testSubset5(self):
s1 = S([])
s2 = S([])
self.failUnless(s1.issubset(s2))
def testSubset6(self):
s1 = S([1, 4])
s2 = S([1, 2, 3])
self.failUnless(not s1.issubset(s2))
def testSuperset1(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
self.failUnless(s1.issuperset(s2))
def testSuperset2(self):
s1 = S([1, 2, 3])
self.failUnless(s1.issuperset(s1))
def testSuperset3(self):
s1 = S([1, 2, 3])
s2 = S([])
self.failUnless(s1.issuperset(s2))
def testSuperset4(self):
s1 = S([1, 2, 3])
s2 = S([1])
self.failUnless(s1.issuperset(s2))
def testSuperset5(self):
s1 = S([])
s2 = S([])
self.failUnless(s1.issuperset(s2))
def testSuperset6(self):
s1 = S([1, 2, 3])
s2 = S([1, 4])
self.failUnless(not s1.issuperset(s2))
def testUpdate1(self):
s1 = S([1, 2, 3])
u = (4, 5, 6)
e = S([1, 2, 3, 4, 5, 6])
s1.update(u)
self.failUnless(s1 == e)
def testUpdate2(self):
s1 = S([1, 2, 3])
u = []
e = S([1, 2, 3])
s1.update(u)
self.failUnless(s1 == e)
def testGetitem(self):
s1 = S([1, 2, 3])
i0 = s1[0]
i1 = s1[1]
i2 = s1[2]
s2 = S([i0, i1, i2])
self.failUnless(s1 == s2)
def testGetslice(self):
s1 = S([1, 2, 3])
slice = s1[0:2]
self.failUnless(len(slice) == 2)
item = s1[2]
slice.append(item)
s2 = S(slice)
self.failUnless(s1 == s2)
def testDelitem(self):
s1 = S([1, 2, 3])
del s1[0]
i1 = s1[0]
i2 = s1[1]
self.failUnless(i1 != i2)
self.failUnless(i1 == 1 or i1 == 2 or i1 == 3)
self.failUnless(i2 == 1 or i2 == 2 or i2 == 3)
def testDelslice(self):
s1 = S([1, 2, 3])
del s1[0:2]
i1 = s1[0]
self.failUnless(i1 == 1 or i1 == 2 or i1 == 3)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -4,161,545,223,461,990,000 | 24.264423 | 72 | 0.492864 | false |
siddhuwarrier/lockindicator-applet | src/xkb/XkbWrapper.py | 2 | 14017 | import ctypes.util
import types
import logging.handlers
from typeutils.TypeChecker import require
# Copyright (c) 2010 Siddhu Warrier (http://siddhuwarrier.homelinux.org,
# siddhuwarrier AT gmail DOT com).
#
# This file is part of the xkb package.
# The xkb package is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This code has been produced heavily modifying:
# On screen display for learning the keyboard layout Neo2
# Copyright (c) 2009 Martin Zuther (http://www.mzuther.de/)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Thank you for using free software!
__all__ = ["XkbWrapper"]
## @brief Class for providing a simple Xkb Wrapper.
#
# This class provides a simple XKB wrapper, and has been created by extensively
# refactoring the Neo OSD2 XKB wrapper.
# @ingroup xkb
# @author Siddhu Warrier ([email protected])
# @date 31/01/2009.
class XkbWrapper:
##@brief XkbOpenDisplay error codes as a dictionary
# See http://linux.die.net/man/3/xkbopendisplay for more details.
# The values for these are obtained from file /usr/include/X11/XKBlib.h (Ubuntu 9.04):
#these error codes are not visible in a __dict__(). Best we can do for obj abstraction
#in Py, or so methinks.
__errcodes_xkbOpenDisplay = {
"Xkb0D_Success": 0, #success - XkbOpenDisplay worked!
"XkbOD_BadLibraryVersion": 1, #XkbLibraryVersion returned False.
"XkbOD_ConnectionRefused": 2, #the display could not be opened.
"XkbOD_NonXkbServer": 3, #the library and the server have incompatible extension versions.
"XkbOD_BadServerVersion": 4 #the extension is not present in the X server.
}
##@brief XKB constants as a dictionary
constants_xkb = {"XkbUseCoreKbd":0x0100}
## @brief XkbWrapper constructor. Extensively refactored from Neo OSD2.
#
# This constructor maps the C functions to Python equivalents, and thereby
# sets stuff up for future calls.
#
# @date 31/01/2010
def __init__(self):
#set the logger up
self.logger = logging.getLogger("utils")
self.logger.setLevel(logging.FATAL) #by default, only FATAL messages are processed
#add the handler
self.loggerHandler = logging.handlers.RotatingFileHandler("/tmp/logging-utils.log")
#self.loggerHandler = logging.StreamHandler()
#create a formatter
self.loggerFormatter = logging.Formatter("%(asctime)s- %(name)s %(levelname)s: %(message)s")
#set the formatter to the handler
self.loggerHandler.setFormatter(self.loggerFormatter)
#add the handler
self.logger.addHandler(self.loggerHandler)
# dynamically link to "X Keyboard Extension" library
library_xf86misc = ctypes.CDLL(ctypes.util.find_library('Xxf86misc'))
####################################################################################
# Parameter defintions
# define the parameters the function(s) take, and whether they're in, out, or inout.
# 1 => in, 2=> out, 3=>inout
####################################################################################
#The prototype of the XkbOpenDisplay function may be found here:
# http://linux.die.net/man/3/xkbopendisplay
xkbOpenDisplay_params = ((1, 'display_name'), (2, 'event_rtrn'),
(2, 'error_rtrn'), (3, 'major_in_out'),
(3, 'minor_in_out'), (2, 'reason_rtrn'))
#The prototype of the XkbGetIndicatorState function may be found here:
# http://linux.die.net/man/3/xkbgetindicatorstate
xkbGetIndicatorState_params = ((1, 'display'), (1, 'device_spec'),(3, 'state_return'))
####################################################################################
# Prototype defintions
#define the prototype; specifying the types of the arguments that should go in and out.
####################################################################################
#define the XkbOpenDisplay prototype
xkbOpenDisplay_prototype = ctypes.CFUNCTYPE(
ctypes.c_uint, #return type
ctypes.c_char_p,#display_name:h/w display name
ctypes.POINTER(ctypes.c_int),#event_rtrn:backfilled with the extension base event code
ctypes.POINTER(ctypes.c_int),#error_rtrn:backfilled with the extension base error code
ctypes.POINTER(ctypes.c_int),#major_in_out:compile time lib major version in, server major version out
ctypes.POINTER(ctypes.c_int),#minor_in_out:compile time lib min version in, server minor version out
ctypes.POINTER(ctypes.c_int))#reason_rtrn:backfilled with a status code
#(see __errcodes_xkbOpenDisplay to see acceptable values)
#define the XkbGetIndicatorState prototype
xkbGetIndicatorState_prototype = ctypes.CFUNCTYPE(
ctypes.c_bool,#return type: Will not work in Python 2.5
ctypes.c_uint,#display: connection to the X server; obtained using xkbOpenDisplay
ctypes.c_uint,#device_spec: device ID, or XkbUseCoreKbd
ctypes.POINTER(ctypes.c_uint))#backfilled with a mask of the indicator state
####################################################################################
# Actual Definitions.
# Define the actual C functions using low-level wrappers.
#This is a hidden method as we want the API to expose
# the high-level python wrapper that performs type checking etc.
####################################################################################
#define XkbOpenDisplay C function
self.__XkbOpenDisplay__ = xkbOpenDisplay_prototype(('XkbOpenDisplay', library_xf86misc),
xkbOpenDisplay_params)
self.__XkbGetIndicatorState__ = xkbGetIndicatorState_prototype(('XkbGetIndicatorState',
library_xf86misc), xkbGetIndicatorState_params)
####################################################################################
# Error Checker methods.
# Add error checkers.
####################################################################################
self.__XkbOpenDisplay__.errcheck = self.errCheck_openDisplayAndInitXkb
## @brief high-level Python function to encapsulate XkbOpenDisplay(...) function.
#
# Opens a connection to an X server, checks for a compatible version of the Xkb extension
# in both the library and the server, and initializes the extension for use.
#
# The equiv C function's prototype may be found here: http://linux.die.net/man/3/xkbopendisplay
# Please note that we are using C-style var names to maintain consistency with the C
# functions it is wrapping. The most important change tothis function is using my TypeChecker
# decorator to perform type checking, instead of using boilerplate asserts!
#
# However, the wrapper function name uses CamelCase with the first letter uncapitalised.
#
# @param[in] display_name (NoneType or StringType): The name of the display to connect to.
# @param[in,out] major_in_out (Int): compile time lib major version in, server major version out
# @param[in,out] minor_in_out (Int): compile time lib min version in, server minor version out
# @date 31/01/2010
@require(validKwargs = [], display_name = (types.StringType, types.NoneType), major_in_out = types.IntType, minor_in_out = types.IntType)
def openDisplayAndInitXkb(self, display_name, major_in_out, minor_in_out):
self.logger.info("Opening display...")
# convert function arguments to "ctypes", ...
__display_name__ = ctypes.c_char_p(display_name)
__major_in_out__ = ctypes.c_int(major_in_out)
__minor_in_out__ = ctypes.c_int(minor_in_out)
# ... call low-level function ...
ret = self.__XkbOpenDisplay__(__display_name__, __major_in_out__, \
__minor_in_out__)
# ... and return converted return value and function arguments
self.logger.info("...done")
return {'display_handle': ret[0].value, \
'server_major_version': ret[1][3].value, \
'server_minor_version': ret[1][4].value}
## @brief high-level Python function to encapsulate XkbGetIndicatorStates function.
# Obtains the current state of the keyboard indicators
#
# The equiv C function's prototype may be found here:
# http://linux.die.net/man/3/xkbgetindicatorstate
# Please note that we are using C-style var names to maintain consistency with the C
# functions it is wrapping. The most important change to this function is using my TypeChecker
# decorator to perform type checking, instead of using boilerplate asserts!
#
# However, the wrapper function name uses CamelCase with the first letter uncapitalised.
#
# @param[in] display_handle (LongType): The display handler to connect to
# (get it using openDisplayAndInitXkb).
# @param[in] device_spec (Int): The device spec. By default XkbUseCoreKbd
# (get it using constants_xkb)
# @retval indicatorMask (ctypes.c_ulong): The indicator mask
# (by default on Linux: 1 for Caps Lock on, 2 for Num Lock on)
# @date 31/01/2010
@require(validKwargs = [], display_handle = types.LongType, device_spec = types.IntType)
def getIndicatorStates(self, display_handle, device_spec):
self.logger.info("Getting indicator states...")
# convert function arguments to "ctypes", ...
__display_handle__ = ctypes.c_uint(display_handle)
__device_spec__ = ctypes.c_uint(device_spec)
__state_return = ctypes.c_uint()
# ... call low-level function ...
indicatorMask = self.__XkbGetIndicatorState__(__display_handle__, __device_spec__, __state_return)
#...and return this value
self.logger.info("...done")
return indicatorMask
## @brief Error checker for openDisplayAndInitXkb.
#
# @param[in,out] result
# @param[in] func
# @param[in,out] args
# @date 31/01/2010
def errCheck_openDisplayAndInitXkb(self, result, func, args):
# print debugging information if requested
# function didn't return display handle, so let's see why
# not
self.logger.debug( ' [XkbOpenDisplay]')
self.logger.debug( ' Display: %#010x' % result)
self.logger.debug( ' display_name: %s' % args[0].value)
self.logger.debug( ' event_rtrn: %d' % args[1].value)
self.logger.debug( ' error_rtrn: %d' % args[2].value)
self.logger.debug( ' major_in_out: %d' % args[3].value)
self.logger.debug( ' minor_in_out: %d' % args[4].value)
self.logger.debug( ' reason_rt: %d' % args[5].value)
#resut should normally be the display; 0 indicates epic fail.
if result == 0:
# values were taken from file /usr/include/X11/XKBlib.h (Ubuntu 9.04):
# $XFree86: xc/lib/X11/XKBlib.h,v 3.5 2003/04/17 02:06:31 dawes Exp $ #
errorID = args[5].value
for errorCode in self.__errcodes_xkbOpenDisplay.keys():
if errorID == self.__errcodes_xkbOpenDisplay[errorCode]:
break
self.logger.debug( "Error code" + errorCode)
error_message = '"XkbOpenDisplay" reported an error (%s).'%errorCode
raise OSError(error_message)
# return display handle and all function arguments
return (ctypes.c_uint(result), args)
##@brief Changes logging level and logging handler (optional).
#
#@param logLevel (int): logLevel should be a recognised log level.
#@param handler (logging.handlers): The logging handler.
def changeLoggingPreferences(self, logLevel, handler = None):
self.logger.setLevel(logLevel)
if handler != None:
self.logger.removeHandler(self.loggerHandler)
self.loggerHandler = handler
self.loggerHandler.setFormatter(self.loggerFormatter)
self.logger.addHandler(self.loggerHandler)
self.logger.debug("Changed logger level")
#test exec
if __name__ == "__main__":
xkbWrapper = XkbWrapper()
try:
ret = xkbWrapper.openDisplayAndInitXkb(None, 1, 0)
except OSError as osError:
print osError.args[0]
displayHandle = ret['display_handle']
deviceSpec = xkbWrapper.constants_xkb['XkbUseCoreKbd']
print type(xkbWrapper.getIndicatorStates(displayHandle, deviceSpec))
| gpl-3.0 | 4,055,779,405,065,198,600 | 50.160584 | 142 | 0.624099 | false |
snnn/tensorflow | tensorflow/python/kernel_tests/matrix_solve_op_test.py | 47 | 7968 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class MatrixSolveOpTest(test.TestCase):
def _verifySolve(self, x, y, batch_dims=None):
for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
if np_type == np.float32 or np_type == np.complex64:
tol = 1e-5
else:
tol = 1e-12
for adjoint in False, True:
if np_type is [np.float32, np.float64]:
a = x.real().astype(np_type)
b = y.real().astype(np_type)
else:
a = x.astype(np_type)
b = y.astype(np_type)
a_np = np.conj(np.transpose(a)) if adjoint else a
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
np_ans = np.linalg.solve(a_np, b)
for use_placeholder in False, True:
with self.test_session(use_gpu=True) as sess:
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
else:
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
out = tf_ans.eval()
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
def _generateMatrix(self, m, n):
matrix = (np.random.normal(-5, 5,
m * n).astype(np.complex128).reshape([m, n]))
matrix.imag = (np.random.normal(-5, 5, m * n).astype(np.complex128).reshape(
[m, n]))
return matrix
def testSolve(self):
for n in 1, 2, 4, 9:
matrix = self._generateMatrix(n, n)
for nrhs in 1, 2, n:
rhs = self._generateMatrix(n, nrhs)
self._verifySolve(matrix, rhs)
def testSolveBatch(self):
for n in 2, 5:
matrix = self._generateMatrix(n, n)
for nrhs in 1, n:
rhs = self._generateMatrix(n, nrhs)
for batch_dims in [[2], [2, 2], [7, 4]]:
self._verifySolve(matrix, rhs, batch_dims=batch_dims)
def testNonSquareMatrix(self):
# When the solve of a non-square matrix is attempted we should return
# an error
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
matrix = constant_op.constant([[1., 2., 3.], [3., 4., 5.]])
linalg_ops.matrix_solve(matrix, matrix)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.test_session(use_gpu=True):
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
linalg_ops.matrix_solve(matrix, rhs)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session(use_gpu=True):
with self.assertRaisesOpError("Input matrix is not invertible."):
# All rows of the matrix below add to zero
matrix = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_solve(matrix, matrix).eval()
def testConcurrent(self):
with self.test_session(use_gpu=True) as sess:
all_ops = []
for adjoint_ in False, True:
lhs1 = random_ops.random_normal([3, 3], seed=42)
lhs2 = random_ops.random_normal([3, 3], seed=42)
rhs1 = random_ops.random_normal([3, 3], seed=42)
rhs2 = random_ops.random_normal([3, 3], seed=42)
s1 = linalg_ops.matrix_solve(lhs1, rhs1, adjoint=adjoint_)
s2 = linalg_ops.matrix_solve(lhs2, rhs2, adjoint=adjoint_)
all_ops += [s1, s2]
val = sess.run(all_ops)
self.assertAllEqual(val[0], val[1])
self.assertAllEqual(val[2], val[3])
class MatrixSolveBenchmark(test.Benchmark):
matrix_shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1001, 1001),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateTestData(self, matrix_shape, num_rhs):
batch_shape = matrix_shape[:-2]
matrix_shape = matrix_shape[-2:]
assert matrix_shape[0] == matrix_shape[1]
n = matrix_shape[0]
matrix = (np.ones(matrix_shape).astype(np.float32) /
(2.0 * n) + np.diag(np.ones(n).astype(np.float32)))
rhs = np.ones([n, num_rhs]).astype(np.float32)
matrix = variables.Variable(
np.tile(matrix, batch_shape + (1, 1)), trainable=False)
rhs = variables.Variable(
np.tile(rhs, batch_shape + (1, 1)), trainable=False)
return matrix, rhs
def benchmarkMatrixSolveOp(self):
run_gpu_test = test.is_gpu_available(True)
for adjoint in False, True:
for matrix_shape in self.matrix_shapes:
for num_rhs in 1, 2, matrix_shape[-1]:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}_"
"adjoint_{adjoint}").format(
matrix_shape=matrix_shape,
num_rhs=num_rhs,
adjoint=adjoint))
if run_gpu_test:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/gpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_gpu_shape_{matrix_shape}_num_rhs_"
"{num_rhs}_adjoint_{adjoint}").format(
matrix_shape=matrix_shape, num_rhs=num_rhs,
adjoint=adjoint))
if __name__ == "__main__":
test.main()
| apache-2.0 | 7,698,070,976,769,442,000 | 37.868293 | 80 | 0.584086 | false |
boyxuper/urllib3 | test/test_util.py | 3 | 17347 | import hashlib
import warnings
import logging
import unittest
import ssl
from itertools import chain
from mock import patch, Mock
from urllib3 import add_stderr_logger, disable_warnings
from urllib3.util.request import make_headers
from urllib3.util.timeout import Timeout
from urllib3.util.url import (
get_host,
parse_url,
split_first,
Url,
)
from urllib3.util.ssl_ import (
resolve_cert_reqs,
ssl_wrap_socket,
_const_compare_digest_backport,
)
from urllib3.exceptions import (
LocationParseError,
TimeoutStateError,
InsecureRequestWarning,
SSLError,
SNIMissingWarning,
)
from urllib3.util import is_fp_closed, ssl_
from . import clear_warnings
# This number represents a time in seconds, it doesn't mean anything in
# isolation. Setting to a high-ish value to avoid conflicts with the smaller
# numbers used for timeouts
TIMEOUT_EPOCH = 1000
class TestUtil(unittest.TestCase):
def test_get_host(self):
url_host_map = {
# Hosts
'http://google.com/mail': ('http', 'google.com', None),
'http://google.com/mail/': ('http', 'google.com', None),
'google.com/mail': ('http', 'google.com', None),
'http://google.com/': ('http', 'google.com', None),
'http://google.com': ('http', 'google.com', None),
'http://www.google.com': ('http', 'www.google.com', None),
'http://mail.google.com': ('http', 'mail.google.com', None),
'http://google.com:8000/mail/': ('http', 'google.com', 8000),
'http://google.com:8000': ('http', 'google.com', 8000),
'https://google.com': ('https', 'google.com', None),
'https://google.com:8000': ('https', 'google.com', 8000),
'http://user:[email protected]:1234': ('http', '127.0.0.1', 1234),
'http://google.com/foo=http://bar:42/baz': ('http', 'google.com', None),
'http://google.com?foo=http://bar:42/baz': ('http', 'google.com', None),
'http://google.com#foo=http://bar:42/baz': ('http', 'google.com', None),
# IPv4
'173.194.35.7': ('http', '173.194.35.7', None),
'http://173.194.35.7': ('http', '173.194.35.7', None),
'http://173.194.35.7/test': ('http', '173.194.35.7', None),
'http://173.194.35.7:80': ('http', '173.194.35.7', 80),
'http://173.194.35.7:80/test': ('http', '173.194.35.7', 80),
# IPv6
'[2a00:1450:4001:c01::67]': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]/test': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]:80': ('http', '[2a00:1450:4001:c01::67]', 80),
'http://[2a00:1450:4001:c01::67]:80/test': ('http', '[2a00:1450:4001:c01::67]', 80),
# More IPv6 from http://www.ietf.org/rfc/rfc2732.txt
'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:8000/index.html': ('http', '[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]', 8000),
'http://[1080:0:0:0:8:800:200C:417A]/index.html': ('http', '[1080:0:0:0:8:800:200C:417A]', None),
'http://[3ffe:2a00:100:7031::1]': ('http', '[3ffe:2a00:100:7031::1]', None),
'http://[1080::8:800:200C:417A]/foo': ('http', '[1080::8:800:200C:417A]', None),
'http://[::192.9.5.5]/ipng': ('http', '[::192.9.5.5]', None),
'http://[::FFFF:129.144.52.38]:42/index.html': ('http', '[::FFFF:129.144.52.38]', 42),
'http://[2010:836B:4179::836B:4179]': ('http', '[2010:836B:4179::836B:4179]', None),
}
for url, expected_host in url_host_map.items():
returned_host = get_host(url)
self.assertEqual(returned_host, expected_host)
def test_invalid_host(self):
# TODO: Add more tests
invalid_host = [
'http://google.com:foo',
'http://::1/',
'http://::1:80/',
]
for location in invalid_host:
self.assertRaises(LocationParseError, get_host, location)
parse_url_host_map = {
'http://google.com/mail': Url('http', host='google.com', path='/mail'),
'http://google.com/mail/': Url('http', host='google.com', path='/mail/'),
'http://google.com/mail': Url('http', host='google.com', path='mail'),
'google.com/mail': Url(host='google.com', path='/mail'),
'http://google.com/': Url('http', host='google.com', path='/'),
'http://google.com': Url('http', host='google.com'),
'http://google.com?foo': Url('http', host='google.com', path='', query='foo'),
# Path/query/fragment
'': Url(),
'/': Url(path='/'),
'#?/!google.com/?foo#bar': Url(path='', fragment='?/!google.com/?foo#bar'),
'/foo': Url(path='/foo'),
'/foo?bar=baz': Url(path='/foo', query='bar=baz'),
'/foo?bar=baz#banana?apple/orange': Url(path='/foo', query='bar=baz', fragment='banana?apple/orange'),
# Port
'http://google.com/': Url('http', host='google.com', path='/'),
'http://google.com:80/': Url('http', host='google.com', port=80, path='/'),
'http://google.com:80': Url('http', host='google.com', port=80),
# Auth
'http://foo:bar@localhost/': Url('http', auth='foo:bar', host='localhost', path='/'),
'http://foo@localhost/': Url('http', auth='foo', host='localhost', path='/'),
'http://foo:bar@baz@localhost/': Url('http', auth='foo:bar@baz', host='localhost', path='/'),
'http://@': Url('http', host=None, auth='')
}
non_round_tripping_parse_url_host_map = {
# Path/query/fragment
'?': Url(path='', query=''),
'#': Url(path='', fragment=''),
# Empty Port
'http://google.com:': Url('http', host='google.com'),
'http://google.com:/': Url('http', host='google.com', path='/'),
}
def test_parse_url(self):
for url, expected_Url in chain(self.parse_url_host_map.items(), self.non_round_tripping_parse_url_host_map.items()):
returned_Url = parse_url(url)
self.assertEqual(returned_Url, expected_Url)
def test_unparse_url(self):
for url, expected_Url in self.parse_url_host_map.items():
self.assertEqual(url, expected_Url.url)
def test_parse_url_invalid_IPv6(self):
self.assertRaises(ValueError, parse_url, '[::1')
def test_Url_str(self):
U = Url('http', host='google.com')
self.assertEqual(str(U), U.url)
def test_request_uri(self):
url_host_map = {
'http://google.com/mail': '/mail',
'http://google.com/mail/': '/mail/',
'http://google.com/': '/',
'http://google.com': '/',
'': '/',
'/': '/',
'?': '/?',
'#': '/',
'/foo?bar=baz': '/foo?bar=baz',
}
for url, expected_request_uri in url_host_map.items():
returned_url = parse_url(url)
self.assertEqual(returned_url.request_uri, expected_request_uri)
def test_netloc(self):
url_netloc_map = {
'http://google.com/mail': 'google.com',
'http://google.com:80/mail': 'google.com:80',
'google.com/foobar': 'google.com',
'google.com:12345': 'google.com:12345',
}
for url, expected_netloc in url_netloc_map.items():
self.assertEqual(parse_url(url).netloc, expected_netloc)
def test_make_headers(self):
self.assertEqual(
make_headers(accept_encoding=True),
{'accept-encoding': 'gzip,deflate'})
self.assertEqual(
make_headers(accept_encoding='foo,bar'),
{'accept-encoding': 'foo,bar'})
self.assertEqual(
make_headers(accept_encoding=['foo', 'bar']),
{'accept-encoding': 'foo,bar'})
self.assertEqual(
make_headers(accept_encoding=True, user_agent='banana'),
{'accept-encoding': 'gzip,deflate', 'user-agent': 'banana'})
self.assertEqual(
make_headers(user_agent='banana'),
{'user-agent': 'banana'})
self.assertEqual(
make_headers(keep_alive=True),
{'connection': 'keep-alive'})
self.assertEqual(
make_headers(basic_auth='foo:bar'),
{'authorization': 'Basic Zm9vOmJhcg=='})
self.assertEqual(
make_headers(proxy_basic_auth='foo:bar'),
{'proxy-authorization': 'Basic Zm9vOmJhcg=='})
self.assertEqual(
make_headers(disable_cache=True),
{'cache-control': 'no-cache'})
def test_split_first(self):
test_cases = {
('abcd', 'b'): ('a', 'cd', 'b'),
('abcd', 'cb'): ('a', 'cd', 'b'),
('abcd', ''): ('abcd', '', None),
('abcd', 'a'): ('', 'bcd', 'a'),
('abcd', 'ab'): ('', 'bcd', 'a'),
}
for input, expected in test_cases.items():
output = split_first(*input)
self.assertEqual(output, expected)
def test_add_stderr_logger(self):
handler = add_stderr_logger(level=logging.INFO) # Don't actually print debug
logger = logging.getLogger('urllib3')
self.assertTrue(handler in logger.handlers)
logger.debug('Testing add_stderr_logger')
logger.removeHandler(handler)
def test_disable_warnings(self):
with warnings.catch_warnings(record=True) as w:
clear_warnings()
warnings.warn('This is a test.', InsecureRequestWarning)
self.assertEqual(len(w), 1)
disable_warnings()
warnings.warn('This is a test.', InsecureRequestWarning)
self.assertEqual(len(w), 1)
def _make_time_pass(self, seconds, timeout, time_mock):
""" Make some time pass for the timeout object """
time_mock.return_value = TIMEOUT_EPOCH
timeout.start_connect()
time_mock.return_value = TIMEOUT_EPOCH + seconds
return timeout
def test_invalid_timeouts(self):
try:
Timeout(total=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
try:
Timeout(connect=2, total=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
try:
Timeout(read=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
# Booleans are allowed also by socket.settimeout and converted to the
# equivalent float (1.0 for True, 0.0 for False)
Timeout(connect=False, read=True)
try:
Timeout(read="foo")
self.fail("string value should not be allowed")
except ValueError as e:
self.assertTrue('int or float' in str(e))
@patch('urllib3.util.timeout.current_time')
def test_timeout(self, current_time):
timeout = Timeout(total=3)
# make 'no time' elapse
timeout = self._make_time_pass(seconds=0, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 3)
self.assertEqual(timeout.connect_timeout, 3)
timeout = Timeout(total=3, connect=2)
self.assertEqual(timeout.connect_timeout, 2)
timeout = Timeout()
self.assertEqual(timeout.connect_timeout, Timeout.DEFAULT_TIMEOUT)
# Connect takes 5 seconds, leaving 5 seconds for read
timeout = Timeout(total=10, read=7)
timeout = self._make_time_pass(seconds=5, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 5)
# Connect takes 2 seconds, read timeout still 7 seconds
timeout = Timeout(total=10, read=7)
timeout = self._make_time_pass(seconds=2, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 7)
timeout = Timeout(total=10, read=7)
self.assertEqual(timeout.read_timeout, 7)
timeout = Timeout(total=None, read=None, connect=None)
self.assertEqual(timeout.connect_timeout, None)
self.assertEqual(timeout.read_timeout, None)
self.assertEqual(timeout.total, None)
timeout = Timeout(5)
self.assertEqual(timeout.total, 5)
def test_timeout_str(self):
timeout = Timeout(connect=1, read=2, total=3)
self.assertEqual(str(timeout), "Timeout(connect=1, read=2, total=3)")
timeout = Timeout(connect=1, read=None, total=3)
self.assertEqual(str(timeout), "Timeout(connect=1, read=None, total=3)")
@patch('urllib3.util.timeout.current_time')
def test_timeout_elapsed(self, current_time):
current_time.return_value = TIMEOUT_EPOCH
timeout = Timeout(total=3)
self.assertRaises(TimeoutStateError, timeout.get_connect_duration)
timeout.start_connect()
self.assertRaises(TimeoutStateError, timeout.start_connect)
current_time.return_value = TIMEOUT_EPOCH + 2
self.assertEqual(timeout.get_connect_duration(), 2)
current_time.return_value = TIMEOUT_EPOCH + 37
self.assertEqual(timeout.get_connect_duration(), 37)
def test_resolve_cert_reqs(self):
self.assertEqual(resolve_cert_reqs(None), ssl.CERT_NONE)
self.assertEqual(resolve_cert_reqs(ssl.CERT_NONE), ssl.CERT_NONE)
self.assertEqual(resolve_cert_reqs(ssl.CERT_REQUIRED), ssl.CERT_REQUIRED)
self.assertEqual(resolve_cert_reqs('REQUIRED'), ssl.CERT_REQUIRED)
self.assertEqual(resolve_cert_reqs('CERT_REQUIRED'), ssl.CERT_REQUIRED)
def test_is_fp_closed_object_supports_closed(self):
class ClosedFile(object):
@property
def closed(self):
return True
self.assertTrue(is_fp_closed(ClosedFile()))
def test_is_fp_closed_object_has_none_fp(self):
class NoneFpFile(object):
@property
def fp(self):
return None
self.assertTrue(is_fp_closed(NoneFpFile()))
def test_is_fp_closed_object_has_fp(self):
class FpFile(object):
@property
def fp(self):
return True
self.assertTrue(not is_fp_closed(FpFile()))
def test_is_fp_closed_object_has_neither_fp_nor_closed(self):
class NotReallyAFile(object):
pass
self.assertRaises(ValueError, is_fp_closed, NotReallyAFile())
def test_ssl_wrap_socket_loads_the_cert_chain(self):
socket = object()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, sock=socket,
certfile='/path/to/certfile')
mock_context.load_cert_chain.assert_called_once_with(
'/path/to/certfile', None)
def test_ssl_wrap_socket_loads_verify_locations(self):
socket = object()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, ca_certs='/path/to/pem',
sock=socket)
mock_context.load_verify_locations.assert_called_once_with(
'/path/to/pem', None)
def test_ssl_wrap_socket_loads_certificate_directories(self):
socket = object()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, ca_cert_dir='/path/to/pems',
sock=socket)
mock_context.load_verify_locations.assert_called_once_with(
None, '/path/to/pems')
def test_ssl_wrap_socket_with_no_sni(self):
socket = object()
mock_context = Mock()
# Ugly preservation of original value
HAS_SNI = ssl_.HAS_SNI
ssl_.HAS_SNI = False
ssl_wrap_socket(ssl_context=mock_context, sock=socket)
mock_context.wrap_socket.assert_called_once_with(socket)
ssl_.HAS_SNI = HAS_SNI
def test_ssl_wrap_socket_with_no_sni_warns(self):
socket = object()
mock_context = Mock()
# Ugly preservation of original value
HAS_SNI = ssl_.HAS_SNI
ssl_.HAS_SNI = False
with patch('warnings.warn') as warn:
ssl_wrap_socket(ssl_context=mock_context, sock=socket)
mock_context.wrap_socket.assert_called_once_with(socket)
ssl_.HAS_SNI = HAS_SNI
self.assertTrue(warn.call_count >= 1)
warnings = [call[0][1] for call in warn.call_args_list]
self.assertTrue(SNIMissingWarning in warnings)
def test_const_compare_digest_fallback(self):
target = hashlib.sha256(b'abcdef').digest()
self.assertTrue(_const_compare_digest_backport(target, target))
prefix = target[:-1]
self.assertFalse(_const_compare_digest_backport(target, prefix))
suffix = target + b'0'
self.assertFalse(_const_compare_digest_backport(target, suffix))
incorrect = hashlib.sha256(b'xyz').digest()
self.assertFalse(_const_compare_digest_backport(target, incorrect))
| mit | -5,254,778,066,646,307,000 | 38.06982 | 140 | 0.575777 | false |
nckx/dstat | plugins/dstat_vm_mem.py | 4 | 1136 | ### Author: Bert de Bruijn <bert+dstat$debruijn,be>
### VMware memory stats
### Displays memory stats coming from the hypervisor inside VMware VMs.
### The vmGuestLib API from VMware Tools needs to be installed
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vmware memory'
self.vars = ('active', 'ballooned', 'mapped', 'swapped', 'used')
self.nick = ('active', 'balln', 'mappd', 'swapd', 'used')
self.type = 'd'
self.width = 5
self.scale = 1024
def check(self):
try:
global vmguestlib
import vmguestlib
self.gl = vmguestlib.VMGuestLib()
except:
raise Exception, 'Needs python-vmguestlib module'
def extract(self):
self.gl.UpdateInfo()
self.val['active'] = self.gl.GetMemActiveMB() * 1024 ** 2
self.val['ballooned'] = self.gl.GetMemBalloonedMB() * 1024 ** 2
self.val['mapped'] = self.gl.GetMemMappedMB() * 1024 ** 2
self.val['swapped'] = self.gl.GetMemSwappedMB() * 1024 ** 2
self.val['used'] = self.gl.GetMemUsedMB() * 1024 ** 2
# vim:ts=4:sw=4 | gpl-2.0 | 6,929,635,710,274,059,000 | 33.454545 | 73 | 0.589789 | false |
jbogaardt/chainladder-python | chainladder/development/tests/test_munich.py | 1 | 1554 | import numpy as np
import chainladder as cl
from rpy2.robjects.packages import importr
from rpy2.robjects import r
CL = importr("ChainLadder")
def test_mcl_paid():
df = r("MunichChainLadder(MCLpaid, MCLincurred)").rx("MCLPaid")
p = cl.MunichAdjustment(paid_to_incurred=("paid", "incurred")).fit(
cl.Development(sigma_interpolation="mack").fit_transform(cl.load_sample("mcl"))
)
xp = p.ldf_.get_array_module()
arr = xp.array(df[0])
assert xp.allclose(arr, p.munich_full_triangle_[0, 0, 0, :, :], atol=1e-5)
def test_mcl_incurred():
df = r("MunichChainLadder(MCLpaid, MCLincurred)").rx("MCLIncurred")
p = cl.MunichAdjustment(paid_to_incurred=[("paid", "incurred")]).fit(
cl.Development(sigma_interpolation="mack").fit_transform(cl.load_sample("mcl"))
)
xp = p.ldf_.get_array_module()
arr = xp.array(df[0])
assert xp.allclose(arr, p.munich_full_triangle_[1, 0, 0, :, :], atol=1e-5)
def test_mcl_ult():
mcl = cl.load_sample("mcl")
dev = cl.Development().fit_transform(mcl)
cl_traditional = cl.Chainladder().fit(dev).ultimate_
dev_munich = cl.MunichAdjustment(
paid_to_incurred=[("paid", "incurred")]
).fit_transform(dev)
cl_munich = cl.Chainladder().fit(dev_munich).ultimate_
def test_mcl_rollforward():
mcl = cl.load_sample("mcl")
mcl_prior = mcl[mcl.valuation < mcl.valuation_date]
munich = cl.MunichAdjustment(paid_to_incurred=[("paid", "incurred")]).fit(mcl_prior)
new = munich.transform(mcl)
cl.Chainladder().fit(new).ultimate_
| mit | -1,914,535,040,318,041,600 | 34.318182 | 88 | 0.660232 | false |
meteorcloudy/tensorflow | tensorflow/contrib/image/python/ops/image_ops.py | 6 | 21765 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.ops import gen_image_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
_image_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_image_ops.so"))
_IMAGE_DTYPES = set(
[dtypes.uint8, dtypes.int32, dtypes.int64,
dtypes.float16, dtypes.float32, dtypes.float64])
ops.RegisterShape("ImageConnectedComponents")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ImageProjectiveTransform")(common_shapes.call_cpp_shape_fn)
def rotate(images, angles, interpolation="NEAREST", name=None):
"""Rotate image(s) counterclockwise by the passed angle(s) in radians.
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
angles: A scalar angle to rotate all images by, or (if images has rank 4)
a vector of length num_images, with an angle for each image in the batch.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, rotated by the given
angle(s). Empty space due to the rotation will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
with ops.name_scope(name, "rotate"):
image_or_images = ops.convert_to_tensor(images)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
elif image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
image_height = math_ops.cast(array_ops.shape(images)[1],
dtypes.float32)[None]
image_width = math_ops.cast(array_ops.shape(images)[2],
dtypes.float32)[None]
output = transform(
images,
angles_to_projective_transforms(angles, image_height, image_width),
interpolation=interpolation)
if image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
def translate(images, translations, interpolation="NEAREST", name=None):
"""Translate image(s) by the passed vectors(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
translations: A vector representing [dx, dy] or (if images has rank 4)
a matrix of length num_images, with a [dx, dy] vector for each image in
the batch.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, translated by the given
vector(s). Empty space due to the translation will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
with ops.name_scope(name, "translate"):
return transform(
images,
translations_to_projective_transforms(translations),
interpolation=interpolation)
def angles_to_projective_transforms(angles,
image_height,
image_width,
name=None):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images)
a vector with an angle to rotate each image in the batch. The rank must
be statically known (the shape is not `TensorShape(None)`.
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to `tf.contrib.image.transform`.
"""
with ops.name_scope(name, "angles_to_projective_transforms"):
angle_or_angles = ops.convert_to_tensor(
angles, name="angles", dtype=dtypes.float32)
if len(angle_or_angles.get_shape()) == 0: # pylint: disable=g-explicit-length-test
angles = angle_or_angles[None]
elif len(angle_or_angles.get_shape()) == 1:
angles = angle_or_angles
else:
raise TypeError("Angles should have rank 0 or 1.")
x_offset = ((image_width - 1) - (math_ops.cos(angles) *
(image_width - 1) - math_ops.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) - (math_ops.sin(angles) *
(image_width - 1) + math_ops.cos(angles) *
(image_height - 1))) / 2.0
num_angles = array_ops.shape(angles)[0]
return array_ops.concat(
values=[
math_ops.cos(angles)[:, None],
-math_ops.sin(angles)[:, None],
x_offset[:, None],
math_ops.sin(angles)[:, None],
math_ops.cos(angles)[:, None],
y_offset[:, None],
array_ops.zeros((num_angles, 2), dtypes.float32),
],
axis=1)
def translations_to_projective_transforms(translations, name=None):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A 2-element list representing [dx, dy] or a matrix of
2-element lists representing [dx, dy] to translate for each image
(for a batch of images). The rank must be statically known (the shape
is not `TensorShape(None)`.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8) projective transforms which can be given
to `tf.contrib.image.transform`.
"""
with ops.name_scope(name, "translations_to_projective_transforms"):
translation_or_translations = ops.convert_to_tensor(
translations, name="translations", dtype=dtypes.float32)
if translation_or_translations.get_shape().ndims is None:
raise TypeError(
"translation_or_translations rank must be statically known")
elif len(translation_or_translations.get_shape()) == 1:
translations = translation_or_translations[None]
elif len(translation_or_translations.get_shape()) == 2:
translations = translation_or_translations
else:
raise TypeError("Translations should have rank 1 or 2.")
num_translations = array_ops.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return array_ops.concat(
values=[
array_ops.ones((num_translations, 1), dtypes.float32),
array_ops.zeros((num_translations, 1), dtypes.float32),
-translations[:, 0, None],
array_ops.zeros((num_translations, 1), dtypes.float32),
array_ops.ones((num_translations, 1), dtypes.float32),
-translations[:, 1, None],
array_ops.zeros((num_translations, 2), dtypes.float32),
],
axis=1)
def transform(images, transforms, interpolation="NEAREST", name=None):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`. The transforms are *inverted* compared to
the transform mapping input points to output points. Note that gradients
are not backpropagated into transformation parameters.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
with ops.name_scope(name, "transform"):
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
elif image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif transform_or_transforms.get_shape().ndims is None:
raise TypeError(
"transform_or_transforms rank must be statically known")
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
output = gen_image_ops.image_projective_transform(
images, transforms, interpolation=interpolation.upper())
if len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
def compose_transforms(*transforms):
"""Composes the transforms tensors.
Args:
*transforms: List of image projective transforms to be composed. Each
transform is length 8 (single transform) or shape (N, 8) (batched
transforms). The shapes of all inputs must be equal, and at least one
input must be given.
Returns:
A composed transform tensor. When passed to `tf.contrib.image.transform`,
equivalent to applying each of the given transforms to the image in
order.
"""
assert transforms, "transforms cannot be empty"
with ops.name_scope("compose_transforms"):
composed = flat_transforms_to_matrices(transforms[0])
for tr in transforms[1:]:
# Multiply batches of matrices.
composed = math_ops.matmul(composed, flat_transforms_to_matrices(tr))
return matrices_to_flat_transforms(composed)
def flat_transforms_to_matrices(transforms):
"""Converts `tf.contrib.image` projective transforms to affine matrices.
Note that the output matrices map output coordinates to input coordinates. For
the forward transformation matrix, call `tf.linalg.inv` on the result.
Args:
transforms: Vector of length 8, or batches of transforms with shape
`(N, 8)`.
Returns:
3D tensor of matrices with shape `(N, 3, 3)`. The output matrices map the
*output coordinates* (in homogeneous coordinates) of each transform to the
corresponding *input coordinates*.
Raises:
ValueError: If `transforms` have an invalid shape.
"""
with ops.name_scope("flat_transforms_to_matrices"):
transforms = ops.convert_to_tensor(transforms, name="transforms")
if transforms.shape.ndims not in (1, 2):
raise ValueError("Transforms should be 1D or 2D, got: %s" % transforms)
# Make the transform(s) 2D in case the input is a single transform.
transforms = array_ops.reshape(transforms, constant_op.constant([-1, 8]))
num_transforms = array_ops.shape(transforms)[0]
# Add a column of ones for the implicit last entry in the matrix.
return array_ops.reshape(
array_ops.concat(
[transforms, array_ops.ones([num_transforms, 1])], axis=1),
constant_op.constant([-1, 3, 3]))
def matrices_to_flat_transforms(transform_matrices):
"""Converts affine matrices to `tf.contrib.image` projective transforms.
Note that we expect matrices that map output coordinates to input coordinates.
To convert forward transformation matrices, call `tf.linalg.inv` on the
matrices and use the result here.
Args:
transform_matrices: One or more affine transformation matrices, for the
reverse transformation in homogeneous coordinates. Shape `(3, 3)` or
`(N, 3, 3)`.
Returns:
2D tensor of flat transforms with shape `(N, 8)`, which may be passed into
`tf.contrib.image.transform`.
Raises:
ValueError: If `transform_matrices` have an invalid shape.
"""
with ops.name_scope("matrices_to_flat_transforms"):
transform_matrices = ops.convert_to_tensor(
transform_matrices, name="transform_matrices")
if transform_matrices.shape.ndims not in (2, 3):
raise ValueError(
"Matrices should be 2D or 3D, got: %s" % transform_matrices)
# Flatten each matrix.
transforms = array_ops.reshape(transform_matrices,
constant_op.constant([-1, 9]))
# Divide each matrix by the last entry (normally 1).
transforms /= transforms[:, 8:9]
return transforms[:, :8]
@ops.RegisterGradient("ImageProjectiveTransform")
def _image_projective_transform_grad(op, grad):
"""Computes the gradient for ImageProjectiveTransform."""
images = op.inputs[0]
transforms = op.inputs[1]
interpolation = op.get_attr("interpolation")
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4")
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
# Invert transformations
transforms = flat_transforms_to_matrices(transforms=transforms)
inverse = linalg_ops.matrix_inverse(transforms)
transforms = matrices_to_flat_transforms(inverse)
output = gen_image_ops.image_projective_transform(
grad, transforms, interpolation=interpolation)
if len(image_or_images.get_shape()) == 2:
return [output[0, :, :, 0], None]
elif len(image_or_images.get_shape()) == 3:
return [output[0, :, :, :], None]
else:
return [output, None]
def bipartite_match(distance_mat,
num_valid_rows,
top_k=-1,
name="bipartite_match"):
"""Find bipartite matching based on a given distance matrix.
A greedy bi-partite matching algorithm is used to obtain the matching with
the (greedy) minimum distance.
Args:
distance_mat: A 2-D float tensor of shape `[num_rows, num_columns]`. It is a
pair-wise distance matrix between the entities represented by each row and
each column. It is an asymmetric matrix. The smaller the distance is, the
more similar the pairs are. The bipartite matching is to minimize the
distances.
num_valid_rows: A scalar or a 1-D tensor with one element describing the
number of valid rows of distance_mat to consider for the bipartite
matching. If set to be negative, then all rows from `distance_mat` are
used.
top_k: A scalar that specifies the number of top-k matches to retrieve.
If set to be negative, then is set according to the maximum number of
matches from `distance_mat`.
name: The name of the op.
Returns:
row_to_col_match_indices: A vector of length num_rows, which is the number
of rows of the input `distance_matrix`. If `row_to_col_match_indices[i]`
is not -1, row i is matched to column `row_to_col_match_indices[i]`.
col_to_row_match_indices: A vector of length num_columns, which is the
number of columns of the input distance matrix.
If `col_to_row_match_indices[j]` is not -1, column j is matched to row
`col_to_row_match_indices[j]`.
"""
result = gen_image_ops.bipartite_match(
distance_mat, num_valid_rows, top_k, name=name)
return result
def connected_components(images):
"""Labels the connected components in a batch of images.
A component is a set of pixels in a single input image, which are all adjacent
and all have the same non-zero value. The components using a squared
connectivity of one (all True entries are joined with their neighbors above,
below, left, and right). Components across all images have consecutive ids 1
through n. Components are labeled according to the first pixel of the
component appearing in row-major order (lexicographic order by
image_index_in_batch, row, col). Zero entries all have an output id of 0.
This op is equivalent with `scipy.ndimage.measurements.label` on a 2D array
with the default structuring element (which is the connectivity used here).
Args:
images: A 2D (H, W) or 3D (N, H, W) Tensor of boolean image(s).
Returns:
Components with the same shape as `images`. False entries in `images` have
value 0, and all True entries map to a component id > 0.
Raises:
TypeError: if `images` is not 2D or 3D.
"""
with ops.name_scope("connected_components"):
image_or_images = ops.convert_to_tensor(images, name="images")
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images
else:
raise TypeError(
"images should have rank 2 (HW) or 3 (NHW). Static shape is %s" %
image_or_images.get_shape())
components = gen_image_ops.image_connected_components(images)
# TODO(ringwalt): Component id renaming should be done in the op, to avoid
# constructing multiple additional large tensors.
components_flat = array_ops.reshape(components, [-1])
unique_ids, id_index = array_ops.unique(components_flat)
id_is_zero = array_ops.where(math_ops.equal(unique_ids, 0))[:, 0]
# Map each nonzero id to consecutive values.
nonzero_consecutive_ids = math_ops.range(
array_ops.shape(unique_ids)[0] - array_ops.shape(id_is_zero)[0]) + 1
def no_zero():
# No need to insert a zero into the ids.
return nonzero_consecutive_ids
def has_zero():
# Insert a zero in the consecutive ids where zero appears in unique_ids.
# id_is_zero has length 1.
zero_id_ind = math_ops.to_int32(id_is_zero[0])
ids_before = nonzero_consecutive_ids[:zero_id_ind]
ids_after = nonzero_consecutive_ids[zero_id_ind:]
return array_ops.concat([ids_before, [0], ids_after], axis=0)
new_ids = control_flow_ops.cond(
math_ops.equal(array_ops.shape(id_is_zero)[0], 0), no_zero, has_zero)
components = array_ops.reshape(
array_ops.gather(new_ids, id_index), array_ops.shape(components))
if len(image_or_images.get_shape()) == 2:
return components[0, :, :]
else:
return components
ops.NotDifferentiable("BipartiteMatch")
ops.NotDifferentiable("ImageConnectedComponents")
| apache-2.0 | 9,122,117,178,000,511,000 | 41.344358 | 87 | 0.666713 | false |
simongoffin/my_odoo_tutorial | addons/crm/report/crm_phonecall_report.py | 16 | 4157 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.addons.crm import crm
from openerp.osv import fields, osv
AVAILABLE_STATES = [
('draft', 'Draft'),
('open', 'Todo'),
('cancel', 'Cancelled'),
('done', 'Held'),
('pending', 'Pending')
]
class crm_phonecall_report(osv.osv):
""" Phone calls by user and section """
_name = "crm.phonecall.report"
_description = "Phone calls by user and section"
_auto = False
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Section', readonly=True),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'nbr': fields.integer('# of Cases', readonly=True),
'state': fields.selection(AVAILABLE_STATES, 'Status', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'delay_close': fields.float('Delay to close', digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'duration': fields.float('Duration', digits=(16,2),readonly=True, group_operator="avg"),
'delay_open': fields.float('Delay to open',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="[('section_id','=',section_id),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'opening_date': fields.date('Opening Date', readonly=True, select=True),
'creation_date': fields.date('Creation Date', readonly=True, select=True),
'date_closed': fields.date('Close Date', readonly=True, select=True),
}
def init(self, cr):
""" Phone Calls By User And Section
@param cr: the current row, from the database cursor,
"""
tools.drop_view_if_exists(cr, 'crm_phonecall_report')
cr.execute("""
create or replace view crm_phonecall_report as (
select
id,
to_char(c.create_date, 'YYYY-MM-DD') as creation_date,
to_char(c.date_open, 'YYYY-MM-DD') as opening_date,
to_char(c.date_closed, 'YYYY-mm-dd') as date_closed,
c.state,
c.user_id,
c.section_id,
c.categ_id,
c.partner_id,
c.duration,
c.company_id,
c.priority,
1 as nbr,
date_trunc('day',c.create_date) as create_date,
extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
from
crm_phonecall c
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,779,903,230,843,479,000 | 44.681319 | 145 | 0.565552 | false |
pratikmallya/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf/manifest.py | 84 | 1387 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
#
from namespaces import MANIFESTNS
from element import Element
# Autogenerated
def Manifest(**args):
return Element(qname = (MANIFESTNS,'manifest'), **args)
def FileEntry(**args):
return Element(qname = (MANIFESTNS,'file-entry'), **args)
def EncryptionData(**args):
return Element(qname = (MANIFESTNS,'encryption-data'), **args)
def Algorithm(**args):
return Element(qname = (MANIFESTNS,'algorithm'), **args)
def KeyDerivation(**args):
return Element(qname = (MANIFESTNS,'key-derivation'), **args)
| apache-2.0 | 4,005,676,801,716,220,000 | 32.804878 | 80 | 0.733045 | false |
sureleo/leetcode | archive/python/math/MaxPointsOnALine.py | 2 | 1654 | # Definition for a point
class Point:
def __init__(self, a=0, b=0):
self.x = a
self.y = b
class Solution:
# @param points, a list of Points
# @return an integer
def maxPoints(self, points):
result = 0
for i in xrange(len(points)):
d = {}
duplicate = 0
for j in xrange(len(points)):
if i == j:
d["me"] = 1
continue
deltax = points[i].x - points[j].x
deltay = points[i].y - points[j].y
if deltax == 0:
if deltay == 0:
duplicate += 1
else:
if "inf" not in d:
d["inf"] = 1
else:
d["inf"] += 1
else:
deltay = points[i].y - points[j].y
slope = float(deltay) / float(deltax)
if slope not in d:
d[slope] = 1
else:
d[slope] += 1
for key in d:
# the point it self
if key != "me":
d[key] += 1
d[key] += duplicate
result = max(d[key], result)
return result
if __name__ == "__main__":
solution = Solution()
point0 = Point(1, 1)
point1 = Point(1, 1)
point2 = Point(2, 2)
point3 = Point(2, 2)
#points = [point0, point1, point2, point3]
points = [point0]
#points = [point0, point1]
print solution.maxPoints(points)
| mit | -8,209,997,378,053,230,000 | 29.62963 | 57 | 0.384522 | false |
kubeflow/kfp-tekton-backend | sdk/python/kfp/azure.py | 1 | 3009 | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def use_azure_secret(secret_name='azcreds'):
"""An operator that configures the container to use Azure user credentials.
The azcreds secret is created as part of the kubeflow deployment that
stores the client ID and secrets for the kubeflow azure service principal.
With this service principal, the container has a range of Azure APIs to
access to.
"""
def _use_azure_secret(task):
from kubernetes import client as k8s_client
(
task.container
.add_env_variable(
k8s_client.V1EnvVar(
name='AZ_SUBSCRIPTION_ID',
value_from=k8s_client.V1EnvVarSource(
secret_key_ref=k8s_client.V1SecretKeySelector(
name=secret_name,
key='AZ_SUBSCRIPTION_ID'
)
)
)
)
.add_env_variable(
k8s_client.V1EnvVar(
name='AZ_TENANT_ID',
value_from=k8s_client.V1EnvVarSource(
secret_key_ref=k8s_client.V1SecretKeySelector(
name=secret_name,
key='AZ_TENANT_ID'
)
)
)
)
.add_env_variable(
k8s_client.V1EnvVar(
name='AZ_CLIENT_ID',
value_from=k8s_client.V1EnvVarSource(
secret_key_ref=k8s_client.V1SecretKeySelector(
name=secret_name,
key='AZ_CLIENT_ID'
)
)
)
)
.add_env_variable(
k8s_client.V1EnvVar(
name='AZ_CLIENT_SECRET',
value_from=k8s_client.V1EnvVarSource(
secret_key_ref=k8s_client.V1SecretKeySelector(
name=secret_name,
key='AZ_CLIENT_SECRET'
)
)
)
)
)
return task
return _use_azure_secret
| apache-2.0 | -4,223,764,411,932,221,000 | 38.592105 | 82 | 0.473579 | false |
rockerbox/kazoo | docs/conf.py | 8 | 8004 | # -*- coding: utf-8 -*-
#
# kazoo documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 11 13:23:01 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
class Mock(object):
def __init__(self, *args):
pass
def __getattr__(self, name):
return Mock
MOCK_MODULES = ['zookeeper']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kazoo'
copyright = u'2011-2014, Kazoo team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'kazoodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'kazoo.tex', u'kazoo Documentation',
u'Various Authors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kazoo', u'kazoo Documentation',
[u'Various Authors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'kazoo', u'kazoo Documentation', u'Various Authors',
'kazoo', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| apache-2.0 | -2,662,345,440,492,113,000 | 30.143969 | 80 | 0.69965 | false |
kevin-who/socket-chat | node_modules/node-gyp/gyp/pylib/gyp/common.py | 366 | 19638 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to normalized (and therefore absolute paths).
path = os.path.realpath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| mit | -7,770,871,766,627,708,000 | 31.784641 | 83 | 0.662898 | false |
rokups/Urho3D | Source/Tools/BindTool/util.py | 1 | 2213 | import os
import subprocess
def cpp_demangle(name):
return subprocess.check_output(['c++filt', name]).decode('utf-8').strip()
def split_identifier(identifier):
"""Splits string at _ or between lower case and uppercase letters."""
prev_split = 0
parts = []
if '_' in identifier:
parts = [s.lower() for s in identifier.split('_')]
else:
for i in range(len(identifier) - 1):
if identifier[i + 1].isupper():
parts.append(identifier[prev_split:i + 1].lower())
prev_split = i + 1
last = identifier[prev_split:]
if last:
parts.append(last.lower())
return parts
def camel_case(identifier):
identifier = identifier.strip('_')
return_string = False
if isinstance(identifier, str):
if identifier.isupper() and '_' not in identifier:
identifier = identifier.lower()
name_parts = split_identifier(identifier)
return_string = True
elif isinstance(identifier, (list, tuple)):
name_parts = identifier
else:
raise ValueError('identifier must be a list, tuple or string.')
for i in range(len(name_parts)):
name_parts[i] = name_parts[i][0].upper() + name_parts[i][1:]
if return_string:
return ''.join(name_parts)
return name_parts
def get_subsystem_name(src_path):
cwd = os.path.abspath('.')
rel_path = os.path.relpath(src_path, cwd)
subsystem = rel_path[:rel_path.index('/')]
if subsystem == '..':
subsystem = 'global'
return subsystem
def has_base_class(node, base_class_name):
for base in node.bases:
if base.cls is None or isinstance(base.cls, str):
continue
if base.cls.infer_fqn() == base_class_name:
return True
elif has_base_class(base.cls, base_class_name):
return True
return False
def find_value(n):
inner = n.get('inner', ())
if len(inner) == 1:
n = inner[0]
if n['kind'] in ('FloatingLiteral', 'IntegerLiteral', 'ImplicitCastExpr', 'ConstantExpr'):
value = n.get('value')
if value:
return value
return find_value(n)
| mit | 3,886,588,096,399,914,500 | 28.118421 | 98 | 0.586082 | false |
mhnatiuk/phd_sociology_of_religion | scrapper/build/Twisted/twisted/trial/_dist/workerreporter.py | 43 | 3922 | # -*- test-case-name: twisted.trial._dist.test.test_workerreporter -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test reporter forwarding test results over trial distributed AMP commands.
@since: 12.3
"""
from twisted.python.failure import Failure
from twisted.python.reflect import qual
from twisted.trial.reporter import TestResult
from twisted.trial._dist import managercommands
class WorkerReporter(TestResult):
"""
Reporter for trial's distributed workers. We send things not through a
stream, but through an C{AMP} protocol's C{callRemote} method.
"""
def __init__(self, ampProtocol):
"""
@param ampProtocol: The communication channel with the trial
distributed manager which collects all test results.
@type ampProtocol: C{AMP}
"""
super(WorkerReporter, self).__init__()
self.ampProtocol = ampProtocol
def _getFailure(self, error):
"""
Convert a C{sys.exc_info()}-style tuple to a L{Failure}, if necessary.
"""
if isinstance(error, tuple):
return Failure(error[1], error[0], error[2])
return error
def _getFrames(self, failure):
"""
Extract frames from a C{Failure} instance.
"""
frames = []
for frame in failure.frames:
frames.extend([frame[0], frame[1], str(frame[2])])
return frames
def addSuccess(self, test):
"""
Send a success over.
"""
super(WorkerReporter, self).addSuccess(test)
self.ampProtocol.callRemote(managercommands.AddSuccess,
testName=test.id())
def addError(self, test, error):
"""
Send an error over.
"""
super(WorkerReporter, self).addError(test, error)
failure = self._getFailure(error)
frames = self._getFrames(failure)
self.ampProtocol.callRemote(managercommands.AddError,
testName=test.id(),
error=failure.getErrorMessage(),
errorClass=qual(failure.type),
frames=frames)
def addFailure(self, test, fail):
"""
Send a Failure over.
"""
super(WorkerReporter, self).addFailure(test, fail)
failure = self._getFailure(fail)
frames = self._getFrames(failure)
self.ampProtocol.callRemote(managercommands.AddFailure,
testName=test.id(),
fail=failure.getErrorMessage(),
failClass=qual(failure.type),
frames=frames)
def addSkip(self, test, reason):
"""
Send a skip over.
"""
super(WorkerReporter, self).addSkip(test, reason)
self.ampProtocol.callRemote(managercommands.AddSkip,
testName=test.id(), reason=str(reason))
def addExpectedFailure(self, test, error, todo):
"""
Send an expected failure over.
"""
super(WorkerReporter, self).addExpectedFailure(test, error, todo)
self.ampProtocol.callRemote(managercommands.AddExpectedFailure,
testName=test.id(),
error=error.getErrorMessage(),
todo=todo.reason)
def addUnexpectedSuccess(self, test, todo):
"""
Send an unexpected success over.
"""
super(WorkerReporter, self).addUnexpectedSuccess(test, todo)
self.ampProtocol.callRemote(managercommands.AddUnexpectedSuccess,
testName=test.id(), todo=todo.reason)
def printSummary(self):
"""
I{Don't} print a summary
"""
| gpl-2.0 | 2,528,751,842,829,035,500 | 30.886179 | 78 | 0.559663 | false |
arowla/commando | commando/tests/test_conf.py | 5 | 1988 | from commando.conf import AutoProp, ConfigDict
class TestClass(AutoProp):
@AutoProp.default
def source(self):
return 'source'
def test_auto():
t = TestClass()
assert t.source == 'source'
def test_override():
t = TestClass()
t.source = 'source1'
assert t.source == 'source1'
t.source = 'source2'
assert t.source == 'source2'
t.source = None
assert t.source == 'source'
def test_init():
c = ConfigDict({"a": 1})
assert c.a == 1
assert c["a"] == 1
def test_change():
c = ConfigDict({"a": 1})
assert c.a == 1
c.a = 2
assert c["a"] == 2
def test_two_levels():
c = ConfigDict({"a": 1, "b": {"c": 3}})
assert c.b.c == 3
def test_two_levels_assignment():
c = ConfigDict({"a": 1, "b": {"c": 3}})
d = {"d": 5}
c.b = d
assert c.b.d == 5
assert c.b == d
def test_two_levels_patch():
c = ConfigDict({"a": 1, "b": {"c": 3}})
d = {"d": 5}
c.b.d = d
assert c.b.c == 3
assert c.b.d == d
def test_copy():
c = ConfigDict({"a": 1, "b": {"c": 3}})
d = c.copy()
assert c == d
c.b.c = 4
assert c != d
def test_list():
c = ConfigDict({"a": 1, "b": {"c": 3}})
c.d = [dict(e=1), dict(f=2)]
assert c.d[0].e == 1
assert c.d[1].f == 2
def test_operator():
c = ConfigDict({"a": 1, "b": {"c": 3}})
from operator import attrgetter
assert attrgetter('b.c')(c) == 3
def test_patch_simple():
c = ConfigDict({"a": 1, "b": {"c": 3, "e": 4}})
d = {"b": {"e": 5}}
c.patch(d)
assert c.b.c == 3
assert c.b.e == 5
def test_patch_complex():
c = ConfigDict({
"a": 1,
"b": {"x": 3, "y": 4},
"c": {"x": 5, "y": 6},
"d": {"x": 7, "y": 8}
})
d = {"a": 2, "b": {"z": 5}, "c": [1, 2], "d": {"y": 9}}
c.patch(d)
assert c.a == 2
assert c.b.x == 3
assert c.b.y == 4
assert c.b.z == 5
assert c.c == [1, 2]
assert c.d.x == 7
assert c.d.y == 9
| mit | 3,387,926,526,823,716,000 | 17.933333 | 59 | 0.464789 | false |
jss-emr/openerp-7-src | openerp/addons/l10n_in_hr_payroll/wizard/hr_salary_employee_bymonth.py | 51 | 2829 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_salary_employee_bymonth(osv.osv_memory):
_name = 'hr.salary.employee.month'
_description = 'Hr Salary Employee By Month Report'
_columns = {
'start_date': fields.date('Start Date', required=True),
'end_date': fields.date('End Date', required=True),
'employee_ids': fields.many2many('hr.employee', 'payroll_year_rel', 'payroll_year_id', 'employee_id', 'Employees', required=True),
'category_id': fields.many2one('hr.salary.rule.category', 'Category', required=True),
}
def _get_default_category(self, cr, uid, context=None):
category_ids = self.pool.get('hr.salary.rule.category').search(cr, uid, [('code', '=', 'NET')], context=context)
return category_ids and category_ids[0] or False
_defaults = {
'start_date': lambda *a: time.strftime('%Y-01-01'),
'end_date': lambda *a: time.strftime('%Y-%m-%d'),
'category_id': _get_default_category
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, context=context)
res = res and res[0] or {}
datas.update({'form': res})
return {
'type': 'ir.actions.report.xml',
'report_name': 'salary.employee.bymonth',
'datas': datas,
}
hr_salary_employee_bymonth()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 | -7,732,602,998,527,396,000 | 38.859155 | 138 | 0.600566 | false |
VaibhavAgarwalVA/sympy | sympy/physics/optics/medium.py | 93 | 4875 | """
**Contains**
* Medium
"""
from __future__ import division
__all__ = ['Medium']
from sympy import Symbol, sympify, sqrt
from sympy.physics.units import c, u0, e0
class Medium(Symbol):
"""
This class represents an optical medium. The prime reason to implement this is
to facilitate refraction, Fermat's priciple, etc.
An optical medium is a material through which electromagnetic waves propagate.
The permittivity and permeability of the medium define how electromagnetic
waves propagate in it.
Parameters
==========
name: string
The display name of the Medium.
permittivity: Sympifyable
Electric permittivity of the space.
permeability: Sympifyable
Magnetic permeability of the space.
n: Sympifyable
Index of refraction of the medium.
Examples
========
>>> from sympy.abc import epsilon, mu
>>> from sympy.physics.optics import Medium
>>> m1 = Medium('m1')
>>> m2 = Medium('m2', epsilon, mu)
>>> m1.intrinsic_impedance
149896229*pi*kg*m**2/(1250000*A**2*s**3)
>>> m2.refractive_index
299792458*m*sqrt(epsilon*mu)/s
References
==========
.. [1] http://en.wikipedia.org/wiki/Optical_medium
"""
def __new__(cls, name, permittivity=None, permeability=None, n=None):
obj = super(Medium, cls).__new__(cls, name)
obj._permittivity = sympify(permittivity)
obj._permeability = sympify(permeability)
obj._n = sympify(n)
if n is not None:
if permittivity != None and permeability == None:
obj._permeability = n**2/(c**2*obj._permittivity)
if permeability != None and permittivity == None:
obj._permittivity = n**2/(c**2*obj._permeability)
if permittivity != None and permittivity != None:
if abs(n - c*sqrt(obj._permittivity*obj._permeability)) > 1e-6:
raise ValueError("Values are not consistent.")
elif permittivity is not None and permeability is not None:
obj._n = c*sqrt(permittivity*permeability)
elif permittivity is None and permeability is None:
obj._permittivity = e0
obj._permeability = u0
return obj
@property
def intrinsic_impedance(self):
"""
Returns intrinsic impedance of the medium.
The intrinsic impedance of a medium is the ratio of the
transverse components of the electric and magnetic fields
of the electromagnetic wave travelling in the medium.
In a region with no electrical conductivity it simplifies
to the square root of ratio of magnetic permeability to
electric permittivity.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.intrinsic_impedance
149896229*pi*kg*m**2/(1250000*A**2*s**3)
"""
return sqrt(self._permeability/self._permittivity)
@property
def speed(self):
"""
Returns speed of the electromagnetic wave travelling in the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.speed
299792458*m/s
"""
return 1/sqrt(self._permittivity*self._permeability)
@property
def refractive_index(self):
"""
Returns refractive index of the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.refractive_index
1
"""
return c/self.speed
@property
def permittivity(self):
"""
Returns electric permittivity of the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.permittivity
625000*A**2*s**4/(22468879468420441*pi*kg*m**3)
"""
return self._permittivity
@property
def permeability(self):
"""
Returns magnetic permeability of the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.permeability
pi*kg*m/(2500000*A**2*s**2)
"""
return self._permeability
def __str__(self):
from sympy.printing import sstr
return type(self).__name__ + sstr(self.args)
def __lt__(self, other):
"""
Compares based on refractive index of the medium.
"""
return self.refractive_index < other.refractive_index
def __gt__(self, other):
return not self.__lt__(other)
def __eq__(self, other):
return self.refractive_index == other.refractive_index
def __ne__(self, other):
return not self.__eq__(other)
| bsd-3-clause | -4,062,103,619,393,661,000 | 25.351351 | 82 | 0.582359 | false |
robinro/ansible | lib/ansible/playbook/loop_control.py | 66 | 1328 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
class LoopControl(Base):
_loop_var = FieldAttribute(isa='str')
_label = FieldAttribute(isa='str')
_pause = FieldAttribute(isa='int')
def __init__(self):
super(LoopControl, self).__init__()
@staticmethod
def load(data, variable_manager=None, loader=None):
t = LoopControl()
return t.load_data(data, variable_manager=variable_manager, loader=loader)
| gpl-3.0 | 6,573,447,607,688,028,000 | 33.947368 | 82 | 0.72741 | false |
soldag/home-assistant | homeassistant/components/nexia/sensor.py | 10 | 7063 | """Support for Nexia / Trane XL Thermostats."""
from nexia.const import UNIT_CELSIUS
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from .const import DOMAIN, NEXIA_DEVICE, UPDATE_COORDINATOR
from .entity import NexiaThermostatEntity, NexiaThermostatZoneEntity
from .util import percent_conv
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up sensors for a Nexia device."""
nexia_data = hass.data[DOMAIN][config_entry.entry_id]
nexia_home = nexia_data[NEXIA_DEVICE]
coordinator = nexia_data[UPDATE_COORDINATOR]
entities = []
# Thermostat / System Sensors
for thermostat_id in nexia_home.get_thermostat_ids():
thermostat = nexia_home.get_thermostat_by_id(thermostat_id)
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_system_status",
"System Status",
None,
None,
)
)
# Air cleaner
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_air_cleaner_mode",
"Air Cleaner Mode",
None,
None,
)
)
# Compressor Speed
if thermostat.has_variable_speed_compressor():
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_current_compressor_speed",
"Current Compressor Speed",
None,
PERCENTAGE,
percent_conv,
)
)
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_requested_compressor_speed",
"Requested Compressor Speed",
None,
PERCENTAGE,
percent_conv,
)
)
# Outdoor Temperature
if thermostat.has_outdoor_temperature():
unit = (
TEMP_CELSIUS
if thermostat.get_unit() == UNIT_CELSIUS
else TEMP_FAHRENHEIT
)
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_outdoor_temperature",
"Outdoor Temperature",
DEVICE_CLASS_TEMPERATURE,
unit,
)
)
# Relative Humidity
if thermostat.has_relative_humidity():
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_relative_humidity",
"Relative Humidity",
DEVICE_CLASS_HUMIDITY,
PERCENTAGE,
percent_conv,
)
)
# Zone Sensors
for zone_id in thermostat.get_zone_ids():
zone = thermostat.get_zone_by_id(zone_id)
unit = (
TEMP_CELSIUS
if thermostat.get_unit() == UNIT_CELSIUS
else TEMP_FAHRENHEIT
)
# Temperature
entities.append(
NexiaThermostatZoneSensor(
coordinator,
zone,
"get_temperature",
"Temperature",
DEVICE_CLASS_TEMPERATURE,
unit,
None,
)
)
# Zone Status
entities.append(
NexiaThermostatZoneSensor(
coordinator,
zone,
"get_status",
"Zone Status",
None,
None,
)
)
# Setpoint Status
entities.append(
NexiaThermostatZoneSensor(
coordinator,
zone,
"get_setpoint_status",
"Zone Setpoint Status",
None,
None,
)
)
async_add_entities(entities, True)
class NexiaThermostatSensor(NexiaThermostatEntity):
"""Provides Nexia thermostat sensor support."""
def __init__(
self,
coordinator,
thermostat,
sensor_call,
sensor_name,
sensor_class,
sensor_unit,
modifier=None,
):
"""Initialize the sensor."""
super().__init__(
coordinator,
thermostat,
name=f"{thermostat.get_name()} {sensor_name}",
unique_id=f"{thermostat.thermostat_id}_{sensor_call}",
)
self._call = sensor_call
self._class = sensor_class
self._state = None
self._unit_of_measurement = sensor_unit
self._modifier = modifier
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._class
@property
def state(self):
"""Return the state of the sensor."""
val = getattr(self._thermostat, self._call)()
if self._modifier:
val = self._modifier(val)
if isinstance(val, float):
val = round(val, 1)
return val
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
class NexiaThermostatZoneSensor(NexiaThermostatZoneEntity):
"""Nexia Zone Sensor Support."""
def __init__(
self,
coordinator,
zone,
sensor_call,
sensor_name,
sensor_class,
sensor_unit,
modifier=None,
):
"""Create a zone sensor."""
super().__init__(
coordinator,
zone,
name=f"{zone.get_name()} {sensor_name}",
unique_id=f"{zone.zone_id}_{sensor_call}",
)
self._call = sensor_call
self._class = sensor_class
self._state = None
self._unit_of_measurement = sensor_unit
self._modifier = modifier
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._class
@property
def state(self):
"""Return the state of the sensor."""
val = getattr(self._zone, self._call)()
if self._modifier:
val = self._modifier(val)
if isinstance(val, float):
val = round(val, 1)
return val
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
| apache-2.0 | 1,420,966,559,329,984,000 | 27.946721 | 77 | 0.485488 | false |
samsu/neutron | tests/unit/services/vpn/test_vpnaas_driver_plugin.py | 12 | 7430 | # Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from neutron.common import constants
from neutron import context
from neutron.db.vpn import vpn_validator
from neutron import manager
from neutron.plugins.common import constants as p_constants
from neutron.services.vpn.service_drivers import ipsec as ipsec_driver
from neutron.tests.unit.db.vpn import test_db_vpnaas
from neutron.tests.unit.openvswitch import test_agent_scheduler
from neutron.tests.unit import test_agent_ext_plugin
FAKE_HOST = test_agent_ext_plugin.L3_HOSTA
VPN_DRIVER_CLASS = 'neutron.services.vpn.plugin.VPNDriverPlugin'
class TestVPNDriverPlugin(test_db_vpnaas.TestVpnaas,
test_agent_scheduler.AgentSchedulerTestMixIn,
test_agent_ext_plugin.AgentDBTestMixIn):
def setUp(self):
self.adminContext = context.get_admin_context()
driver_cls_p = mock.patch(
'neutron.services.vpn.'
'service_drivers.ipsec.IPsecVPNDriver')
driver_cls = driver_cls_p.start()
self.driver = mock.Mock()
self.driver.service_type = ipsec_driver.IPSEC
self.driver.validator = vpn_validator.VpnReferenceValidator()
driver_cls.return_value = self.driver
super(TestVPNDriverPlugin, self).setUp(
vpnaas_plugin=VPN_DRIVER_CLASS)
def test_create_ipsec_site_connection(self, **extras):
super(TestVPNDriverPlugin, self).test_create_ipsec_site_connection()
self.driver.create_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
self.driver.delete_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
def test_delete_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_delete_vpnservice()
self.driver.delete_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY)
def test_update_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_update_vpnservice()
self.driver.update_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY)
@contextlib.contextmanager
def vpnservice_set(self):
"""Test case to create a ipsec_site_connection."""
vpnservice_name = "vpn1"
ipsec_site_connection_name = "ipsec_site_connection"
ikename = "ikepolicy1"
ipsecname = "ipsecpolicy1"
description = "my-vpn-connection"
keys = {'name': vpnservice_name,
'description': "my-vpn-connection",
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'dpd_action': 'hold',
'dpd_interval': 40,
'dpd_timeout': 120,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'PENDING_CREATE',
'admin_state_up': True}
with self.ikepolicy(name=ikename) as ikepolicy:
with self.ipsecpolicy(name=ipsecname) as ipsecpolicy:
with self.subnet() as subnet:
with self.router() as router:
plugin = manager.NeutronManager.get_plugin()
agent = {'host': FAKE_HOST,
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'fake-binary',
'topic': 'fake-topic'}
plugin.create_or_update_agent(self.adminContext, agent)
plugin.schedule_router(
self.adminContext, router['router']['id'])
with self.vpnservice(name=vpnservice_name,
subnet=subnet,
router=router) as vpnservice1:
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = (
ipsecpolicy['ipsecpolicy']['id']
)
keys['vpnservice_id'] = (
vpnservice1['vpnservice']['id']
)
with self.ipsec_site_connection(
self.fmt,
ipsec_site_connection_name,
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
keys['dpd_action'],
keys['dpd_interval'],
keys['dpd_timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=description,
):
yield vpnservice1['vpnservice']
def test_get_agent_hosting_vpn_services(self):
with self.vpnservice_set():
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservices = vpnservices.all()
self.assertEqual(1, len(vpnservices))
vpnservice_db = vpnservices[0]
self.assertEqual(1, len(vpnservice_db.ipsec_site_connections))
ipsec_site_connection = vpnservice_db.ipsec_site_connections[0]
self.assertIsNotNone(
ipsec_site_connection['ikepolicy'])
self.assertIsNotNone(
ipsec_site_connection['ipsecpolicy'])
def test_update_status(self):
with self.vpnservice_set() as vpnservice:
self._register_agent_states()
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
service_plugin.update_status_by_agent(
self.adminContext,
[{'status': 'ACTIVE',
'ipsec_site_connections': {},
'updated_pending_status': True,
'id': vpnservice['id']}])
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservice_db = vpnservices[0]
self.assertEqual(p_constants.ACTIVE, vpnservice_db['status'])
| apache-2.0 | -1,457,303,419,768,473,600 | 45.149068 | 79 | 0.543742 | false |
Openlights/firemix | plugins/fixture_step.py | 1 | 2195 | # This file is part of Firemix.
#
# Copyright 2013-2016 Jonathan Evans <[email protected]>
#
# Firemix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Firemix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firemix. If not, see <http://www.gnu.org/licenses/>.
from builtins import range
import numpy as np
from lib.transition import Transition
from lib.buffer_utils import BufferUtils, struct_flat
class FixtureStep(Transition):
"""
"""
def __init__(self, app):
Transition.__init__(self, app)
def __str__(self):
return "Fixture Step"
def reset(self):
self.fixtures = self._app.scene.fixtures()
buffer_size = BufferUtils.get_buffer_size()
self.mask = np.tile(False, buffer_size)
np.random.seed()
self.rand_index = np.arange(len(self.fixtures))
np.random.shuffle(self.rand_index)
self.last_idx = 0
def render(self, start, end, progress, out):
start[self.mask] = (0.0, 0.0, 0.0)
end[np.invert(self.mask)] = (0.0, 0.0, 0.0)
idx = int(progress * len(self.rand_index))
if idx >= self.last_idx:
for i in range(self.last_idx, idx):
fix = self.fixtures[self.rand_index[i]]
pix_start, pix_end = BufferUtils.get_fixture_extents(fix.strand, fix.address)
self.mask[pix_start:pix_end] = True
else:
for i in range(idx, self.last_idx):
fix = self.fixtures[self.rand_index[i]]
pix_start, pix_end = BufferUtils.get_fixture_extents(fix.strand, fix.address)
self.mask[pix_start:pix_end]= False
self.last_idx = idx
np.add(struct_flat(start), struct_flat(end), struct_flat(out))
| gpl-3.0 | 3,343,524,579,706,074,000 | 32.769231 | 93 | 0.642369 | false |
sgallagher/anaconda | pyanaconda/modules/storage/storage.py | 1 | 14418 | #
# Kickstart module for the storage.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.core.signal import Signal
from pyanaconda.core.dbus import DBus
from pyanaconda.core.storage import blivet_version
from pyanaconda.modules.common.base import KickstartService
from pyanaconda.modules.common.constants.services import STORAGE
from pyanaconda.modules.common.containers import TaskContainer
from pyanaconda.modules.common.errors.storage import InvalidStorageError
from pyanaconda.modules.common.structures.requirement import Requirement
from pyanaconda.modules.storage.bootloader import BootloaderModule
from pyanaconda.modules.storage.checker import StorageCheckerModule
from pyanaconda.modules.storage.dasd import DASDModule
from pyanaconda.modules.storage.devicetree import DeviceTreeModule, create_storage
from pyanaconda.modules.storage.disk_initialization import DiskInitializationModule
from pyanaconda.modules.storage.disk_selection import DiskSelectionModule
from pyanaconda.modules.storage.fcoe import FCOEModule
from pyanaconda.modules.storage.installation import MountFilesystemsTask, CreateStorageLayoutTask, \
WriteConfigurationTask
from pyanaconda.modules.storage.iscsi import ISCSIModule
from pyanaconda.modules.storage.kickstart import StorageKickstartSpecification
from pyanaconda.modules.storage.nvdimm import NVDIMMModule
from pyanaconda.modules.storage.partitioning.constants import PartitioningMethod
from pyanaconda.modules.storage.partitioning.factory import PartitioningFactory
from pyanaconda.modules.storage.partitioning.validate import StorageValidateTask
from pyanaconda.modules.storage.reset import ScanDevicesTask
from pyanaconda.modules.storage.snapshot import SnapshotModule
from pyanaconda.modules.storage.storage_interface import StorageInterface
from pyanaconda.modules.storage.teardown import UnmountFilesystemsTask, TeardownDiskImagesTask
from pyanaconda.modules.storage.zfcp import ZFCPModule
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
class StorageService(KickstartService):
"""The Storage service."""
def __init__(self):
super().__init__()
# The storage model.
self._current_storage = None
self._storage_playground = None
self.storage_changed = Signal()
# The created partitioning modules.
self._created_partitioning = []
self.created_partitioning_changed = Signal()
# The applied partitioning module.
self._applied_partitioning = None
self.applied_partitioning_changed = Signal()
self.partitioning_reset = Signal()
# Initialize modules.
self._modules = []
self._storage_checker_module = StorageCheckerModule()
self._add_module(self._storage_checker_module)
self._device_tree_module = DeviceTreeModule()
self._add_module(self._device_tree_module)
self._disk_init_module = DiskInitializationModule()
self._add_module(self._disk_init_module)
self._disk_selection_module = DiskSelectionModule()
self._add_module(self._disk_selection_module)
self._snapshot_module = SnapshotModule()
self._add_module(self._snapshot_module)
self._bootloader_module = BootloaderModule()
self._add_module(self._bootloader_module)
self._fcoe_module = FCOEModule()
self._add_module(self._fcoe_module)
self._iscsi_module = ISCSIModule()
self._add_module(self._iscsi_module)
self._nvdimm_module = NVDIMMModule()
self._add_module(self._nvdimm_module)
self._dasd_module = DASDModule()
self._add_module(self._dasd_module)
self._zfcp_module = ZFCPModule()
self._add_module(self._zfcp_module)
# Connect modules to signals.
self.storage_changed.connect(
self._device_tree_module.on_storage_changed
)
self.storage_changed.connect(
self._disk_init_module.on_storage_changed
)
self.storage_changed.connect(
self._disk_selection_module.on_storage_changed
)
self.storage_changed.connect(
self._snapshot_module.on_storage_changed
)
self.storage_changed.connect(
self._bootloader_module.on_storage_changed
)
self.storage_changed.connect(
self._dasd_module.on_storage_changed
)
self._disk_init_module.format_unrecognized_enabled_changed.connect(
self._dasd_module.on_format_unrecognized_enabled_changed
)
self._disk_init_module.format_ldl_enabled_changed.connect(
self._dasd_module.on_format_ldl_enabled_changed
)
self._disk_selection_module.protected_devices_changed.connect(
self.on_protected_devices_changed
)
# After connecting modules to signals, create the initial
# storage model. It will be propagated to all modules.
self._set_storage(create_storage())
def _add_module(self, storage_module):
"""Add a base kickstart module."""
self._modules.append(storage_module)
def publish(self):
"""Publish the module."""
TaskContainer.set_namespace(STORAGE.namespace)
for kickstart_module in self._modules:
kickstart_module.publish()
DBus.publish_object(STORAGE.object_path, StorageInterface(self))
DBus.register_service(STORAGE.service_name)
@property
def kickstart_specification(self):
"""Return the kickstart specification."""
return StorageKickstartSpecification
def process_kickstart(self, data):
"""Process the kickstart data."""
# Process the kickstart data in modules.
for kickstart_module in self._modules:
kickstart_module.process_kickstart(data)
# Set the default filesystem type.
if data.autopart.autopart and data.autopart.fstype:
self.storage.set_default_fstype(data.autopart.fstype)
# Create a new partitioning module.
partitioning_method = PartitioningFactory.get_method_for_kickstart(data)
if partitioning_method:
partitioning_module = self.create_partitioning(partitioning_method)
partitioning_module.process_kickstart(data)
def setup_kickstart(self, data):
"""Set up the kickstart data."""
for kickstart_module in self._modules:
kickstart_module.setup_kickstart(data)
if self.applied_partitioning:
self.applied_partitioning.setup_kickstart(data)
def generate_kickstart(self):
"""Generate kickstart string representation of this module's data
Adds Blivet version to the output because most of the strings come from Blivet anyway.
"""
return "# Generated using Blivet version {}\n{}".format(
blivet_version,
super().generate_kickstart()
)
@property
def storage(self):
"""The storage model.
:return: an instance of Blivet
"""
if self._storage_playground:
return self._storage_playground
if not self._current_storage:
self._set_storage(create_storage())
return self._current_storage
def _set_storage(self, storage):
"""Set the current storage model.
The current storage is the latest model of
the system’s storage configuration created
by scanning all devices.
:param storage: a storage
"""
self._current_storage = storage
if self._storage_playground:
return
self.storage_changed.emit(storage)
log.debug("The storage model has changed.")
def _set_storage_playground(self, storage):
"""Set the storage playground.
The storage playground is a model of a valid
partitioned storage configuration, that can be
used for an installation.
:param storage: a storage or None
"""
self._storage_playground = storage
if storage is None:
storage = self.storage
self.storage_changed.emit(storage)
log.debug("The storage model has changed.")
def on_protected_devices_changed(self, protected_devices):
"""Update the protected devices in the storage model."""
if not self._current_storage:
return
self.storage.protect_devices(protected_devices)
def scan_devices_with_task(self):
"""Scan all devices with a task.
We will reset a copy of the current storage model
and switch the models if the reset is successful.
:return: a task
"""
# Copy the storage.
storage = self.storage.copy()
# Set up the storage.
storage.ignored_disks = self._disk_selection_module.ignored_disks
storage.exclusive_disks = self._disk_selection_module.exclusive_disks
storage.protected_devices = self._disk_selection_module.protected_devices
storage.disk_images = self._disk_selection_module.disk_images
# Create the task.
task = ScanDevicesTask(storage)
task.succeeded_signal.connect(lambda: self._set_storage(storage))
return task
def create_partitioning(self, method: PartitioningMethod):
"""Create a new partitioning.
Allowed values:
AUTOMATIC
CUSTOM
MANUAL
INTERACTIVE
BLIVET
:param PartitioningMethod method: a partitioning method
:return: a partitioning module
"""
module = PartitioningFactory.create_partitioning(method)
# Update the module.
module.on_storage_changed(
self._current_storage
)
module.on_selected_disks_changed(
self._disk_selection_module.selected_disks
)
# Connect the callbacks to signals.
self.storage_changed.connect(
module.on_storage_changed
)
self.partitioning_reset.connect(
module.on_partitioning_reset
)
self._disk_selection_module.selected_disks_changed.connect(
module.on_selected_disks_changed
)
# Update the list of modules.
self._add_created_partitioning(module)
return module
@property
def created_partitioning(self):
"""List of all created partitioning modules."""
return self._created_partitioning
def _add_created_partitioning(self, module):
"""Add a created partitioning module."""
self._created_partitioning.append(module)
self.created_partitioning_changed.emit(module)
log.debug("Created the partitioning %s.", module)
def apply_partitioning(self, module):
"""Apply a partitioning.
:param module: a partitioning module
:raise: InvalidStorageError of the partitioning is not valid
"""
# Validate the partitioning.
storage = module.storage.copy()
task = StorageValidateTask(storage)
report = task.run()
if not report.is_valid():
raise InvalidStorageError(" ".join(report.error_messages))
# Apply the partitioning.
self._set_storage_playground(storage)
self._set_applied_partitioning(module)
@property
def applied_partitioning(self):
"""The applied partitioning."""
return self._applied_partitioning
def _set_applied_partitioning(self, module):
"""Set the applied partitioning.
:param module: a partitioning module or None
"""
self._applied_partitioning = module
self.applied_partitioning_changed.emit()
if module is None:
module = "NONE"
log.debug("The partitioning %s is applied.", module)
def reset_partitioning(self):
"""Reset the partitioning."""
self._set_storage_playground(None)
self._set_applied_partitioning(None)
self.partitioning_reset.emit()
def collect_requirements(self):
"""Return installation requirements for this module.
:return: a list of requirements
"""
requirements = []
# Add the storage requirements.
for name in self.storage.packages:
requirements.append(Requirement.for_package(
name, reason="Required to manage storage devices."
))
# Add other requirements, for example for bootloader.
for kickstart_module in self._modules:
requirements.extend(kickstart_module.collect_requirements())
return requirements
def install_with_tasks(self):
"""Returns installation tasks of this module.
:returns: list of installation tasks
"""
storage = self.storage
return [
CreateStorageLayoutTask(storage),
MountFilesystemsTask(storage)
]
def write_configuration_with_task(self):
"""Write the storage configuration with a task.
FIXME: This is a temporary workaround.
:return: an installation task
"""
return WriteConfigurationTask(self.storage)
def teardown_with_tasks(self):
"""Returns teardown tasks for this module.
:return: a list installation tasks
"""
storage = self.storage
return [
UnmountFilesystemsTask(storage),
TeardownDiskImagesTask(storage)
]
| gpl-2.0 | -1,372,997,665,625,709,300 | 33.905569 | 100 | 0.669742 | false |
LLNL/spack | var/spack/repos/builtin/packages/muparser/package.py | 5 | 1625 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Muparser(Package):
"""C++ math expression parser library."""
homepage = "http://muparser.beltoforion.de/"
url = "https://github.com/beltoforion/muparser/archive/v2.2.5.tar.gz"
version('2.2.6.1', sha256='d2562853d972b6ddb07af47ce8a1cdeeb8bb3fa9e8da308746de391db67897b3')
version('2.2.5', sha256='0666ef55da72c3e356ca85b6a0084d56b05dd740c3c21d26d372085aa2c6e708')
# Replace std::auto_ptr by std::unique_ptr
# https://github.com/beltoforion/muparser/pull/46
patch('auto_ptr.patch',
when='@2.2.5')
depends_on('[email protected]:', when='@2.2.6:', type='build')
# Cmake build since 2.2.6
@when('@2.2.6:')
def install(self, spec, prefix):
cmake_args = [
'-DENABLE_SAMPLES=OFF',
'-DENABLE_OPENMP=OFF',
'-DBUILD_SHARED_LIBS=ON'
]
cmake_args.extend(std_cmake_args)
with working_dir('spack-build', create=True):
cmake('..', *cmake_args)
make()
make('install')
@when('@2.2.5')
def install(self, spec, prefix):
options = ['--disable-debug',
'--disable-samples',
'--disable-dependency-tracking',
'CXXFLAGS={0}'.format(self.compiler.cxx11_flag),
'--prefix=%s' % prefix]
configure(*options)
make(parallel=False)
make("install")
| lgpl-2.1 | -7,941,525,877,107,056,000 | 30.862745 | 97 | 0.597538 | false |
pkuyym/Paddle | paddle/contrib/float16/float16_transpiler.py | 3 | 11063 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.framework import Program
from paddle.fluid.executor import global_scope
class Float16Transpiler:
def transpile(self, program, place, scope=None):
'''
Transpile the program desc and cast the weights to float16 data type to
enable float16 inference.
Since the operator in a program desc will automatically choose the
right compute kernel to run based on the data type of the input tensor.
We actually don't need to change the program desc to run in float16 mode.
However, in this way, users who are used to feeding and fetching tensors
of float32 data type when running typical inference may find it confusing
and difficult to run inference in float16 mode as they need to convert
input data to float16 dtype and then convert the results back to float32
dtype to match the rest of code.
So this function appends cast ops to the program desc where necessary so
that users are able to run inference in float16 mode while providing input
tensor (feed_holder) of float data type and obtaining output tensor
(fetch_holder) of float data type.
Moreover, it is desired that when we have the scope and program desc to run
inference in float32 mode, we can use a single API to do the necessary
modification and then user can run float16 inference on the fly. To make
this happen, this function also create new parameters in the scope to have the
converted float16 weights and change the operators in program desc to use
these new parameters.
:param program: program to transpile
:type program: Program
:param place: inference place
:type place: Place
:param scope: inference scope
:type scope: Scope
'''
if not isinstance(program, Program):
raise TypeError("program should be as Program type")
if not isinstance(place, core.CPUPlace) and not isinstance(
place, core.CUDAPlace):
raise TypeError("place should be as CPUPlace/CUDAPlace type")
if scope is None:
scope = global_scope()
if not isinstance(scope, core.Scope):
raise TypeError("scope should be as Scope type or None")
self.scope = scope
self.place = place
self.block = program.block(0)
self.input_map = {} # store the input names should be adjusted
self._modify_feed_fetch()
self._convert_param_to_float16()
self._adjust_input(skip=True)
self._remove_unused_var()
# TODO(luotao): use clone() method to flush the program.desc in force,
# since some large program.desc will not be flushed immediately.
# And a better solution will be considered later.
program = program.clone()
# ====================== private transpiler functions =====================
def _adjust_input(self, skip=False):
'''
Change the input variable name in operators.
When we are in the process of modifying a program desc, we usually
replace some variables with some other variables, where we create
a dictionary input_map to record the one-to-one correspondence
between each old variable and the new one.
After that, this function will search all the operators that use the
old variables and change the info in op to use the new variables. There
maybe some exceptions to this rule when we are using the float16 transpiler
and insert cast ops to cast float32 variable to float16 one. After we
insert the cast op to cast var_1 to var_1_fp16, we don't want to change
the input of cast op to var_1_fp16 after using this function.
'''
skip_ops = {"cast"}
for i in range(len(self.block.ops)):
current_op = self.block.ops[i]
if skip and current_op.type in skip_ops:
continue
for input_arg in current_op.input_arg_names:
if input_arg in self.input_map:
current_op.rename_input(input_arg,
self.input_map[input_arg])
def _remove_unused_var(self):
'''
remove unused varibles in program
'''
args = []
for i in range(len(self.block.ops)):
current_op = self.block.ops[i]
args += current_op.input_arg_names
args += current_op.output_arg_names
args = list(set(args)) # unique the input and output arguments
for var in self.block.vars.keys():
if var not in args:
self.block.remove_var(var)
def _modify_feed_fetch(self):
'''
Modify feed fetch op/vars for float16 inference.
For each feed op:
feed_op->feed_target_var
Change it to:
feed_op->feed_target_var->cast_op(from other dtype to float16)->tmp_var
For each fetch op:
fetch_target_var->fetch_op
Change it to:
tmp_var->cast_op(from float16 to other dtype)->fetch_target_var->fetch_op
:return: None
'''
def find_op(var):
# It is possible that var.op is not up to date after some
# modifications to program desc. Here we force to make it up to date.
var.op = None
for op in self.block.ops:
if var.name in op.output_arg_names:
var.op = op
break
if var.op is None:
raise ValueError("The target variable must have an "
"associated operator that generates it.")
i = 0
while i < len(self.block.ops):
cur_op = self.block.ops[i]
if cur_op.type == "feed":
var_name = cur_op.output("Out")[0]
tmp_var_name = var_name + ".fp16"
var = self.block.vars[var_name]
tmp_var = self.block.create_var(
name=tmp_var_name.encode('ascii'),
type=var.type,
dtype=core.VarDesc.VarType.FP16,
shape=var.shape,
persistable=var.persistable)
self.block.insert_op(
i + 1,
type="cast",
inputs={"X": var},
outputs={"Out": tmp_var},
attrs={
'in_dtype': int(var.dtype),
'out_dtype': int(tmp_var.dtype)
})
self.input_map[var_name] = tmp_var_name
i = i + 1
elif cur_op.type == "fetch":
var_name = cur_op.input("X")[0]
tmp_var_name = var_name + ".fp16"
var = self.block.vars[var_name]
tmp_var = self.block.create_var(
name=tmp_var_name.encode('ascii'),
type=var.type,
dtype=core.VarDesc.VarType.FP16,
shape=var.shape,
persistable=var.persistable)
find_op(var)
var.op.rename_output(var_name, tmp_var_name)
self.block.insert_op(
i,
type="cast",
inputs={"X": tmp_var},
outputs={"Out": var},
attrs={
'in_dtype': int(tmp_var.dtype),
'out_dtype': int(var.dtype)
})
i = i + 1
i = i + 1
def _convert_param_to_float16(self):
def _get_no_fp16_conversion_var_names():
'''
Get the set of input variable names that shouldn't be converted to float16.
When we want to run inference in float16 mode, most parameters need to be
firstly converted to float16. However, there are some parameters that
shouldn't be converted to float16 because the corresponding operator
requires float32 parameters even in float16 mode (when the input data is
of float16 data type). Currently, the only operator that has this exclusion
is the batch norm op.
:return: set of input variable names
:type var_names: set
'''
op_names = {'batch_norm'}
var_names = []
for op in self.block.ops:
if op.type in op_names:
var_names += op.input_arg_names
return set(var_names)
def _should_be_converted(var):
return var.persistable and \
var.name not in self.no_conversion_vars and \
var.type != core.VarDesc.VarType.FEED_MINIBATCH and \
var.type != core.VarDesc.VarType.FETCH_LIST
self.no_conversion_vars = _get_no_fp16_conversion_var_names()
conversion_var_list = filter(_should_be_converted,
self.block.vars.values())
for var in conversion_var_list:
fp16_var_name = var.name + ".fp16"
fp16_var = self.block.create_parameter(
name=fp16_var_name.encode('ascii'),
type=var.type,
dtype=core.VarDesc.VarType.FP16,
shape=var.shape)
# cast the data in the tensor of the original var to float16
# data type and store it in the tensor of the new float16 var
self.scope.var(fp16_var_name)
fp16_tensor = self.scope.find_var(fp16_var_name).get_tensor()
tensor = np.array(self.scope.find_var(var.name).get_tensor())
# After the old tensor data is converted to np.float16, view(np.uint16)
# is used so that the internal memory of the numpy array will be
# reinterpreted to be of np.uint16 data type, which is binded to fluid
# float16 data type via the help of pybind in tensor_py.h.
fp16_tensor.set(
tensor.astype(np.float16).view(np.uint16), self.place)
# old var will be replaced by the fp16 var in program desc
self.input_map[var.name] = fp16_var_name
self.block.remove_var(var.name)
| apache-2.0 | 1,695,814,833,258,386,400 | 42.214844 | 88 | 0.573443 | false |
lshain-android-source/external-chromium_org | media/tools/constrained_network_server/traffic_control_test.py | 187 | 5943 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""End-to-end tests for traffic control library."""
import os
import re
import sys
import unittest
import traffic_control
class TrafficControlTests(unittest.TestCase):
"""System tests for traffic_control functions.
These tests require root access.
"""
# A dummy interface name to use instead of real interface.
_INTERFACE = 'myeth'
def setUp(self):
"""Setup a dummy interface."""
# If we update to python version 2.7 or newer we can use setUpClass() or
# unittest.skipIf().
if os.getuid() != 0:
sys.exit('You need root access to run these tests.')
command = ['ip', 'link', 'add', 'name', self._INTERFACE, 'type', 'dummy']
traffic_control._Exec(command, 'Error creating dummy interface %s.' %
self._INTERFACE)
def tearDown(self):
"""Teardown the dummy interface and any network constraints on it."""
# Deleting the dummy interface deletes all associated constraints.
command = ['ip', 'link', 'del', self._INTERFACE]
traffic_control._Exec(command)
def testExecOutput(self):
output = traffic_control._Exec(['echo', ' Test '])
self.assertEqual(output, 'Test')
def testExecException(self):
self.assertRaises(traffic_control.TrafficControlError,
traffic_control._Exec, command=['ls', '!doesntExist!'])
def testExecErrorCustomMsg(self):
try:
traffic_control._Exec(['ls', '!doesntExist!'], msg='test_msg')
self.fail('No exception raised for invalid command.')
except traffic_control.TrafficControlError as e:
self.assertEqual(e.msg, 'test_msg')
def testAddRootQdisc(self):
"""Checks adding a root qdisc is successful."""
config = {'interface': self._INTERFACE}
root_detail = 'qdisc htb 1: root'
# Assert no htb root at startup.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(root_detail in output)
traffic_control._AddRootQdisc(config['interface'])
output = traffic_control._Exec(command)
# Assert htb root is added.
self.assertTrue(root_detail in output)
def testConfigureClassAdd(self):
"""Checks adding and deleting a class to the root qdisc."""
config = {
'interface': self._INTERFACE,
'port': 12345,
'server_port': 33333,
'bandwidth': 2000
}
class_detail = ('class htb 1:%x root prio 0 rate %dKbit ceil %dKbit' %
(config['port'], config['bandwidth'], config['bandwidth']))
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Assert class does not exist prior to adding it.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(class_detail in output)
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Assert class is added.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertTrue(class_detail in output)
# Delete class.
traffic_control._ConfigureClass('del', config)
# Assert class is deleted.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(class_detail in output)
def testAddSubQdisc(self):
"""Checks adding a sub qdisc to existing class."""
config = {
'interface': self._INTERFACE,
'port': 12345,
'server_port': 33333,
'bandwidth': 2000,
'latency': 250,
'loss': 5
}
qdisc_re_detail = ('qdisc netem %x: parent 1:%x .* delay %d.0ms loss %d%%' %
(config['port'], config['port'], config['latency'],
config['loss']))
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Assert qdisc does not exist prior to adding it.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
handle_id_re = re.search(qdisc_re_detail, output)
self.assertEqual(handle_id_re, None)
# Add qdisc to class.
traffic_control._AddSubQdisc(config)
# Assert qdisc is added.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
handle_id_re = re.search(qdisc_re_detail, output)
self.assertNotEqual(handle_id_re, None)
def testAddDeleteFilter(self):
config = {
'interface': self._INTERFACE,
'port': 12345,
'bandwidth': 2000
}
# Assert no filter exists.
command = ['tc', 'filter', 'list', 'dev', config['interface'], 'parent',
'1:0']
output = traffic_control._Exec(command)
self.assertEqual(output, '')
# Create the root and class to which the filter will be attached.
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Add the filter.
traffic_control._AddFilter(config['interface'], config['port'])
handle_id = traffic_control._GetFilterHandleId(config['interface'],
config['port'])
self.assertNotEqual(handle_id, None)
# Delete the filter.
# The output of tc filter list is not None because tc adds default filters.
traffic_control._DeleteFilter(config['interface'], config['port'])
self.assertRaises(traffic_control.TrafficControlError,
traffic_control._GetFilterHandleId, config['interface'],
config['port'])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 7,303,329,471,316,451,000 | 33.352601 | 80 | 0.633182 | false |
nishad-jobsglobal/odoo-marriot | addons/account/project/project.py | 273 | 2423 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_journal(osv.osv):
_name = 'account.analytic.journal'
_description = 'Analytic Journal'
_columns = {
'name': fields.char('Journal Name', required=True),
'code': fields.char('Journal Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the analytic journal without removing it."),
'type': fields.selection([('sale','Sale'), ('purchase','Purchase'), ('cash','Cash'), ('general','General'), ('situation','Situation')], 'Type', required=True, help="Gives the type of the analytic journal. When it needs for a document (eg: an invoice) to create analytic entries, Odoo will look for a matching journal of the same type."),
'line_ids': fields.one2many('account.analytic.line', 'journal_id', 'Lines', copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': True,
'type': 'general',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
class account_journal(osv.osv):
_inherit="account.journal"
_columns = {
'analytic_journal_id':fields.many2one('account.analytic.journal','Analytic Journal', help="Journal for analytic entries"),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,416,558,254,399,800,000 | 47.46 | 345 | 0.635163 | false |
blaze/dask | dask/array/reductions.py | 1 | 52217 | import builtins
from collections.abc import Iterable
import operator
from functools import partial
from itertools import product, repeat
from math import factorial, log, ceil, log2
import numpy as np
from numbers import Integral, Number
from tlz import compose, partition_all, get, accumulate, pluck, drop
from . import chunk
from .core import _concatenate2, Array, handle_out, implements
from .blockwise import blockwise
from ..blockwise import lol_tuples
from .creation import arange, diagonal
from .utils import full_like_safe, validate_axis, compute_meta, is_arraylike
from .wrap import zeros, ones
from .numpy_compat import ma_divide, divide as np_divide
from ..base import tokenize
from ..highlevelgraph import HighLevelGraph
from ..utils import (
ignoring,
funcname,
Dispatch,
deepmap,
getargspec,
derived_from,
is_series_like,
)
from .. import config
# Generic functions to support chunks of different types
empty_lookup = Dispatch("empty")
empty_lookup.register((object, np.ndarray), np.empty)
empty_lookup.register(np.ma.masked_array, np.ma.empty)
divide_lookup = Dispatch("divide")
divide_lookup.register((object, np.ndarray), np_divide)
divide_lookup.register(np.ma.masked_array, ma_divide)
def divide(a, b, dtype=None):
key = lambda x: getattr(x, "__array_priority__", float("-inf"))
f = divide_lookup.dispatch(type(builtins.max(a, b, key=key)))
return f(a, b, dtype=dtype)
def reduction(
x,
chunk,
aggregate,
axis=None,
keepdims=False,
dtype=None,
split_every=None,
combine=None,
name=None,
out=None,
concatenate=True,
output_size=1,
meta=None,
):
"""General version of reductions
Parameters
----------
x: Array
Data being reduced along one or more axes
chunk: callable(x_chunk, axis, keepdims)
First function to be executed when resolving the dask graph.
This function is applied in parallel to all original chunks of x.
See below for function parameters.
combine: callable(x_chunk, axis, keepdims), optional
Function used for intermediate recursive aggregation (see
split_every below). If omitted, it defaults to aggregate.
If the reduction can be performed in less than 3 steps, it will not
be invoked at all.
aggregate: callable(x_chunk, axis, keepdims)
Last function to be executed when resolving the dask graph,
producing the final output. It is always invoked, even when the reduced
Array counts a single chunk along the reduced axes.
axis: int or sequence of ints, optional
Axis or axes to aggregate upon. If omitted, aggregate along all axes.
keepdims: boolean, optional
Whether the reduction function should preserve the reduced axes,
leaving them at size ``output_size``, or remove them.
dtype: np.dtype
data type of output. This argument was previously optional, but
leaving as ``None`` will now raise an exception.
split_every: int >= 2 or dict(axis: int), optional
Determines the depth of the recursive aggregation. If set to or more
than the number of input chunks, the aggregation will be performed in
two steps, one ``chunk`` function per input chunk and a single
``aggregate`` function at the end. If set to less than that, an
intermediate ``combine`` function will be used, so that any one
``combine`` or ``aggregate`` function has no more than ``split_every``
inputs. The depth of the aggregation graph will be
:math:`log_{split_every}(input chunks along reduced axes)`. Setting to
a low value can reduce cache size and network transfers, at the cost of
more CPU and a larger dask graph.
Omit to let dask heuristically decide a good default. A default can
also be set globally with the ``split_every`` key in
:mod:`dask.config`.
name: str, optional
Prefix of the keys of the intermediate and output nodes. If omitted it
defaults to the function names.
out: Array, optional
Another dask array whose contents will be replaced. Omit to create a
new one. Note that, unlike in numpy, this setting gives no performance
benefits whatsoever, but can still be useful if one needs to preserve
the references to a previously existing Array.
concatenate: bool, optional
If True (the default), the outputs of the ``chunk``/``combine``
functions are concatenated into a single np.array before being passed
to the ``combine``/``aggregate`` functions. If False, the input of
``combine`` and ``aggregate`` will be either a list of the raw outputs
of the previous step or a single output, and the function will have to
concatenate it itself. It can be useful to set this to False if the
chunk and/or combine steps do not produce np.arrays.
output_size: int >= 1, optional
Size of the output of the ``aggregate`` function along the reduced
axes. Ignored if keepdims is False.
Returns
-------
dask array
**Function Parameters**
x_chunk: numpy.ndarray
Individual input chunk. For ``chunk`` functions, it is one of the
original chunks of x. For ``combine`` and ``aggregate`` functions, it's
the concatenation of the outputs produced by the previous ``chunk`` or
``combine`` functions. If concatenate=False, it's a list of the raw
outputs from the previous functions.
axis: tuple
Normalized list of axes to reduce upon, e.g. ``(0, )``
Scalar, negative, and None axes have been normalized away.
Note that some numpy reduction functions cannot reduce along multiple
axes at once and strictly require an int in input. Such functions have
to be wrapped to cope.
keepdims: bool
Whether the reduction function should preserve the reduced axes or
remove them.
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, Integral):
axis = (axis,)
axis = validate_axis(axis, x.ndim)
if dtype is None:
raise ValueError("Must specify dtype")
if "dtype" in getargspec(chunk).args:
chunk = partial(chunk, dtype=dtype)
if "dtype" in getargspec(aggregate).args:
aggregate = partial(aggregate, dtype=dtype)
if is_series_like(x):
x = x.values
# Map chunk across all blocks
inds = tuple(range(x.ndim))
# The dtype of `tmp` doesn't actually matter, and may be incorrect.
tmp = blockwise(
chunk, inds, x, inds, axis=axis, keepdims=True, token=name, dtype=dtype or float
)
tmp._chunks = tuple(
(output_size,) * len(c) if i in axis else c for i, c in enumerate(tmp.chunks)
)
if meta is None and hasattr(x, "_meta"):
try:
reduced_meta = compute_meta(
chunk, x.dtype, x._meta, axis=axis, keepdims=True, computing_meta=True
)
except TypeError:
reduced_meta = compute_meta(
chunk, x.dtype, x._meta, axis=axis, keepdims=True
)
except ValueError:
pass
else:
reduced_meta = None
result = _tree_reduce(
tmp,
aggregate,
axis,
keepdims,
dtype,
split_every,
combine,
name=name,
concatenate=concatenate,
reduced_meta=reduced_meta,
)
if keepdims and output_size != 1:
result._chunks = tuple(
(output_size,) if i in axis else c for i, c in enumerate(tmp.chunks)
)
if meta is not None:
result._meta = meta
return handle_out(out, result)
def _tree_reduce(
x,
aggregate,
axis,
keepdims,
dtype,
split_every=None,
combine=None,
name=None,
concatenate=True,
reduced_meta=None,
):
"""Perform the tree reduction step of a reduction.
Lower level, users should use ``reduction`` or ``arg_reduction`` directly.
"""
# Normalize split_every
split_every = split_every or config.get("split_every", 4)
if isinstance(split_every, dict):
split_every = dict((k, split_every.get(k, 2)) for k in axis)
elif isinstance(split_every, Integral):
n = builtins.max(int(split_every ** (1 / (len(axis) or 1))), 2)
split_every = dict.fromkeys(axis, n)
else:
raise ValueError("split_every must be a int or a dict")
# Reduce across intermediates
depth = 1
for i, n in enumerate(x.numblocks):
if i in split_every and split_every[i] != 1:
depth = int(builtins.max(depth, ceil(log(n, split_every[i]))))
func = partial(combine or aggregate, axis=axis, keepdims=True)
if concatenate:
func = compose(func, partial(_concatenate2, axes=axis))
for i in range(depth - 1):
x = partial_reduce(
func,
x,
split_every,
True,
dtype=dtype,
name=(name or funcname(combine or aggregate)) + "-partial",
reduced_meta=reduced_meta,
)
func = partial(aggregate, axis=axis, keepdims=keepdims)
if concatenate:
func = compose(func, partial(_concatenate2, axes=axis))
return partial_reduce(
func,
x,
split_every,
keepdims=keepdims,
dtype=dtype,
name=(name or funcname(aggregate)) + "-aggregate",
reduced_meta=reduced_meta,
)
def partial_reduce(
func, x, split_every, keepdims=False, dtype=None, name=None, reduced_meta=None
):
"""Partial reduction across multiple axes.
Parameters
----------
func : function
x : Array
split_every : dict
Maximum reduction block sizes in each dimension.
Examples
--------
Reduce across axis 0 and 2, merging a maximum of 1 block in the 0th
dimension, and 3 blocks in the 2nd dimension:
>>> partial_reduce(np.min, x, {0: 1, 2: 3}) # doctest: +SKIP
"""
name = (
(name or funcname(func)) + "-" + tokenize(func, x, split_every, keepdims, dtype)
)
parts = [
list(partition_all(split_every.get(i, 1), range(n)))
for (i, n) in enumerate(x.numblocks)
]
keys = product(*map(range, map(len, parts)))
out_chunks = [
tuple(1 for p in partition_all(split_every[i], c)) if i in split_every else c
for (i, c) in enumerate(x.chunks)
]
if not keepdims:
out_axis = [i for i in range(x.ndim) if i not in split_every]
getter = lambda k: get(out_axis, k)
keys = map(getter, keys)
out_chunks = list(getter(out_chunks))
dsk = {}
for k, p in zip(keys, product(*parts)):
decided = dict((i, j[0]) for (i, j) in enumerate(p) if len(j) == 1)
dummy = dict(i for i in enumerate(p) if i[0] not in decided)
g = lol_tuples((x.name,), range(x.ndim), decided, dummy)
dsk[(name,) + k] = (func, g)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
meta = x._meta
if reduced_meta is not None:
try:
meta = func(reduced_meta, computing_meta=True)
# no meta keyword argument exists for func, and it isn't required
except TypeError:
try:
meta = func(reduced_meta)
except ValueError as e:
# min/max functions have no identity, don't apply function to meta
if "zero-size array to reduction operation" in str(e):
meta = reduced_meta
# when no work can be computed on the empty array (e.g., func is a ufunc)
except ValueError:
pass
# some functions can't compute empty arrays (those for which reduced_meta
# fall into the ValueError exception) and we have to rely on reshaping
# the array according to len(out_chunks)
if is_arraylike(meta) and meta.ndim != len(out_chunks):
if len(out_chunks) == 0:
meta = meta.sum()
else:
meta = meta.reshape((0,) * len(out_chunks))
if np.isscalar(meta):
return Array(graph, name, out_chunks, dtype=dtype)
else:
with ignoring(AttributeError):
meta = meta.astype(dtype)
return Array(graph, name, out_chunks, meta=meta)
@derived_from(np)
def sum(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is None:
dtype = getattr(np.zeros(1, dtype=a.dtype).sum(), "dtype", object)
result = reduction(
a,
chunk.sum,
chunk.sum,
axis=axis,
keepdims=keepdims,
dtype=dtype,
split_every=split_every,
out=out,
)
return result
@derived_from(np)
def prod(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.empty((1,), dtype=a.dtype).prod(), "dtype", object)
return reduction(
a,
chunk.prod,
chunk.prod,
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
out=out,
)
@implements(np.min, np.amin)
@derived_from(np)
def min(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.min,
chunk.min,
axis=axis,
keepdims=keepdims,
dtype=a.dtype,
split_every=split_every,
out=out,
)
@implements(np.max, np.amax)
@derived_from(np)
def max(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.max,
chunk.max,
axis=axis,
keepdims=keepdims,
dtype=a.dtype,
split_every=split_every,
out=out,
)
@derived_from(np)
def any(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.any,
chunk.any,
axis=axis,
keepdims=keepdims,
dtype="bool",
split_every=split_every,
out=out,
)
@derived_from(np)
def all(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.all,
chunk.all,
axis=axis,
keepdims=keepdims,
dtype="bool",
split_every=split_every,
out=out,
)
@derived_from(np)
def nansum(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(chunk.nansum(np.empty((1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
chunk.nansum,
chunk.sum,
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
out=out,
)
with ignoring(AttributeError):
@derived_from(np)
def nanprod(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(chunk.nansum(np.empty((1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
chunk.nanprod,
chunk.prod,
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
out=out,
)
@derived_from(np)
def nancumsum(x, axis, dtype=None, out=None, *, method="sequential"):
"""Dask added an additional keyword-only argument ``method``.
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumsum. Default is 'sequential'.
* 'sequential' performs the cumsum of each prior block before the current block.
* 'blelloch' is a work-efficient parallel cumsum. It exposes parallelism by
first taking the sum of each block and combines the sums via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
"""
return cumreduction(
chunk.nancumsum,
operator.add,
0,
x,
axis,
dtype,
out=out,
method=method,
preop=np.nansum,
)
@derived_from(np)
def nancumprod(x, axis, dtype=None, out=None, *, method="sequential"):
"""Dask added an additional keyword-only argument ``method``.
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumprod. Default is 'sequential'.
* 'sequential' performs the cumprod of each prior block before the current block.
* 'blelloch' is a work-efficient parallel cumprod. It exposes parallelism by first
taking the product of each block and combines the products via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
"""
return cumreduction(
chunk.nancumprod,
operator.mul,
1,
x,
axis,
dtype,
out=out,
method=method,
preop=np.nanprod,
)
@derived_from(np)
def nanmin(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.nanmin,
chunk.nanmin,
axis=axis,
keepdims=keepdims,
dtype=a.dtype,
split_every=split_every,
out=out,
)
@derived_from(np)
def nanmax(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.nanmax,
chunk.nanmax,
axis=axis,
keepdims=keepdims,
dtype=a.dtype,
split_every=split_every,
out=out,
)
def numel(x, **kwargs):
""" A reduction to count the number of elements """
if hasattr(x, "mask"):
return chunk.sum(np.ones_like(x), **kwargs)
shape = x.shape
keepdims = kwargs.get("keepdims", False)
axis = kwargs.get("axis", None)
dtype = kwargs.get("dtype", np.float64)
if axis is None:
prod = np.prod(shape, dtype=dtype)
return (
full_like_safe(x, prod, shape=(1,) * len(shape), dtype=dtype)
if keepdims is True
else prod
)
if not isinstance(axis, tuple or list):
axis = [axis]
prod = np.prod([shape[dim] for dim in axis])
if keepdims is True:
new_shape = tuple(
shape[dim] if dim not in axis else 1 for dim in range(len(shape))
)
else:
new_shape = tuple(shape[dim] for dim in range(len(shape)) if dim not in axis)
return full_like_safe(x, prod, shape=new_shape, dtype=dtype)
def nannumel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(~(np.isnan(x)), **kwargs)
def mean_chunk(
x, sum=chunk.sum, numel=numel, dtype="f8", computing_meta=False, **kwargs
):
if computing_meta:
return x
n = numel(x, dtype=dtype, **kwargs)
total = sum(x, dtype=dtype, **kwargs)
return {"n": n, "total": total}
def mean_combine(
pairs,
sum=chunk.sum,
numel=numel,
dtype="f8",
axis=None,
computing_meta=False,
**kwargs,
):
if not isinstance(pairs, list):
pairs = [pairs]
ns = deepmap(lambda pair: pair["n"], pairs) if not computing_meta else pairs
n = _concatenate2(ns, axes=axis).sum(axis=axis, **kwargs)
if computing_meta:
return n
totals = deepmap(lambda pair: pair["total"], pairs)
total = _concatenate2(totals, axes=axis).sum(axis=axis, **kwargs)
return {"n": n, "total": total}
def mean_agg(pairs, dtype="f8", axis=None, computing_meta=False, **kwargs):
ns = deepmap(lambda pair: pair["n"], pairs) if not computing_meta else pairs
n = _concatenate2(ns, axes=axis)
n = np.sum(n, axis=axis, dtype=dtype, **kwargs)
if computing_meta:
return n
totals = deepmap(lambda pair: pair["total"], pairs)
total = _concatenate2(totals, axes=axis).sum(axis=axis, dtype=dtype, **kwargs)
return divide(total, n, dtype=dtype)
@derived_from(np)
def mean(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
elif a.dtype == object:
dt = object
else:
dt = getattr(np.mean(np.zeros(shape=(1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
mean_chunk,
mean_agg,
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
combine=mean_combine,
out=out,
concatenate=False,
)
@derived_from(np)
def nanmean(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.mean(np.empty(shape=(1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
partial(mean_chunk, sum=chunk.nansum, numel=nannumel),
mean_agg,
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
out=out,
concatenate=False,
combine=partial(mean_combine, sum=chunk.nansum, numel=nannumel),
)
with ignoring(AttributeError):
nanmean = derived_from(np)(nanmean)
def moment_chunk(
A, order=2, sum=chunk.sum, numel=numel, dtype="f8", computing_meta=False, **kwargs
):
if computing_meta:
return A
n = numel(A, **kwargs)
n = n.astype(np.int64)
total = sum(A, dtype=dtype, **kwargs)
with np.errstate(divide="ignore", invalid="ignore"):
u = total / n
xs = [sum((A - u) ** i, dtype=dtype, **kwargs) for i in range(2, order + 1)]
M = np.stack(xs, axis=-1)
return {"total": total, "n": n, "M": M}
def _moment_helper(Ms, ns, inner_term, order, sum, axis, kwargs):
M = Ms[..., order - 2].sum(axis=axis, **kwargs) + sum(
ns * inner_term ** order, axis=axis, **kwargs
)
for k in range(1, order - 1):
coeff = factorial(order) / (factorial(k) * factorial(order - k))
M += coeff * sum(Ms[..., order - k - 2] * inner_term ** k, axis=axis, **kwargs)
return M
def moment_combine(
pairs,
order=2,
ddof=0,
dtype="f8",
sum=np.sum,
axis=None,
computing_meta=False,
**kwargs,
):
if not isinstance(pairs, list):
pairs = [pairs]
kwargs["dtype"] = dtype
kwargs["keepdims"] = True
ns = deepmap(lambda pair: pair["n"], pairs) if not computing_meta else pairs
ns = _concatenate2(ns, axes=axis)
n = ns.sum(axis=axis, **kwargs)
if computing_meta:
return n
totals = _concatenate2(deepmap(lambda pair: pair["total"], pairs), axes=axis)
Ms = _concatenate2(deepmap(lambda pair: pair["M"], pairs), axes=axis)
total = totals.sum(axis=axis, **kwargs)
with np.errstate(divide="ignore", invalid="ignore"):
mu = divide(total, n, dtype=dtype)
inner_term = divide(totals, ns, dtype=dtype) - mu
xs = [
_moment_helper(Ms, ns, inner_term, o, sum, axis, kwargs)
for o in range(2, order + 1)
]
M = np.stack(xs, axis=-1)
return {"total": total, "n": n, "M": M}
def moment_agg(
pairs,
order=2,
ddof=0,
dtype="f8",
sum=np.sum,
axis=None,
computing_meta=False,
**kwargs,
):
if not isinstance(pairs, list):
pairs = [pairs]
kwargs["dtype"] = dtype
# To properly handle ndarrays, the original dimensions need to be kept for
# part of the calculation.
keepdim_kw = kwargs.copy()
keepdim_kw["keepdims"] = True
ns = deepmap(lambda pair: pair["n"], pairs) if not computing_meta else pairs
ns = _concatenate2(ns, axes=axis)
n = ns.sum(axis=axis, **keepdim_kw)
if computing_meta:
return n
totals = _concatenate2(deepmap(lambda pair: pair["total"], pairs), axes=axis)
Ms = _concatenate2(deepmap(lambda pair: pair["M"], pairs), axes=axis)
mu = divide(totals.sum(axis=axis, **keepdim_kw), n, dtype=dtype)
with np.errstate(divide="ignore", invalid="ignore"):
inner_term = divide(totals, ns, dtype=dtype) - mu
M = _moment_helper(Ms, ns, inner_term, order, sum, axis, kwargs)
denominator = n.sum(axis=axis, **kwargs) - ddof
# taking care of the edge case with empty or all-nans array with ddof > 0
if isinstance(denominator, Number):
if denominator < 0:
denominator = np.nan
elif denominator is not np.ma.masked:
denominator[denominator < 0] = np.nan
return divide(M, denominator, dtype=dtype)
def moment(
a, order, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
if not isinstance(order, Integral) or order < 0:
raise ValueError("Order must be an integer >= 0")
if order < 2:
reduced = a.sum(axis=axis) # get reduced shape and chunks
if order == 0:
# When order equals 0, the result is 1, by definition.
return ones(
reduced.shape, chunks=reduced.chunks, dtype="f8", meta=reduced._meta
)
# By definition the first order about the mean is 0.
return zeros(
reduced.shape, chunks=reduced.chunks, dtype="f8", meta=reduced._meta
)
if dtype is not None:
dt = dtype
else:
dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
partial(moment_chunk, order=order),
partial(moment_agg, order=order, ddof=ddof),
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
out=out,
concatenate=False,
combine=partial(moment_combine, order=order),
)
@derived_from(np)
def var(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
moment_chunk,
partial(moment_agg, ddof=ddof),
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
combine=moment_combine,
name="var",
out=out,
concatenate=False,
)
@derived_from(np)
def nanvar(
a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
partial(moment_chunk, sum=chunk.nansum, numel=nannumel),
partial(moment_agg, sum=np.nansum, ddof=ddof),
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
combine=partial(moment_combine, sum=np.nansum),
out=out,
concatenate=False,
)
with ignoring(AttributeError):
nanvar = derived_from(np)(nanvar)
def _sqrt(a):
o = np.sqrt(a)
if isinstance(o, np.ma.masked_array) and not o.shape and o.mask.all():
return np.ma.masked
return o
def safe_sqrt(a):
"""A version of sqrt that properly handles scalar masked arrays.
To mimic ``np.ma`` reductions, we need to convert scalar masked arrays that
have an active mask to the ``np.ma.masked`` singleton. This is properly
handled automatically for reduction code, but not for ufuncs. We implement
a simple version here, since calling `np.ma.sqrt` everywhere is
significantly more expensive.
"""
if hasattr(a, "_elemwise"):
return a._elemwise(_sqrt, a)
return _sqrt(a)
@derived_from(np)
def std(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None):
result = safe_sqrt(
var(
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
)
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
@derived_from(np)
def nanstd(
a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
result = safe_sqrt(
nanvar(
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
)
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
with ignoring(AttributeError):
nanstd = derived_from(np)(nanstd)
def _arg_combine(data, axis, argfunc, keepdims=False):
""" Merge intermediate results from ``arg_*`` functions"""
axis = None if len(axis) == data.ndim or data.ndim == 1 else axis[0]
vals = data["vals"]
arg = data["arg"]
if axis is None:
local_args = argfunc(vals, axis=axis, keepdims=keepdims)
vals = vals.ravel()[local_args]
arg = arg.ravel()[local_args]
else:
local_args = argfunc(vals, axis=axis)
inds = np.ogrid[tuple(map(slice, local_args.shape))]
inds.insert(axis, local_args)
inds = tuple(inds)
vals = vals[inds]
arg = arg[inds]
if keepdims:
vals = np.expand_dims(vals, axis)
arg = np.expand_dims(arg, axis)
return arg, vals
def arg_chunk(func, argfunc, x, axis, offset_info):
arg_axis = None if len(axis) == x.ndim or x.ndim == 1 else axis[0]
vals = func(x, axis=arg_axis, keepdims=True)
arg = argfunc(x, axis=arg_axis, keepdims=True)
if arg_axis is None:
offset, total_shape = offset_info
ind = np.unravel_index(arg.ravel()[0], x.shape)
total_ind = tuple(o + i for (o, i) in zip(offset, ind))
arg[:] = np.ravel_multi_index(total_ind, total_shape)
else:
arg += offset_info
if isinstance(vals, np.ma.masked_array):
if "min" in argfunc.__name__:
fill_value = np.ma.minimum_fill_value(vals)
else:
fill_value = np.ma.maximum_fill_value(vals)
vals = np.ma.filled(vals, fill_value)
result = np.empty(
shape=vals.shape, dtype=[("vals", vals.dtype), ("arg", arg.dtype)]
)
result["vals"] = vals
result["arg"] = arg
return result
def arg_combine(func, argfunc, data, axis=None, **kwargs):
arg, vals = _arg_combine(data, axis, argfunc, keepdims=True)
result = np.empty(
shape=vals.shape, dtype=[("vals", vals.dtype), ("arg", arg.dtype)]
)
result["vals"] = vals
result["arg"] = arg
return result
def arg_agg(func, argfunc, data, axis=None, **kwargs):
return _arg_combine(data, axis, argfunc, keepdims=False)[0]
def nanarg_agg(func, argfunc, data, axis=None, **kwargs):
arg, vals = _arg_combine(data, axis, argfunc, keepdims=False)
if np.any(np.isnan(vals)):
raise ValueError("All NaN slice encountered")
return arg
def arg_reduction(x, chunk, combine, agg, axis=None, split_every=None, out=None):
"""Generic function for argreduction.
Parameters
----------
x : Array
chunk : callable
Partialed ``arg_chunk``.
combine : callable
Partialed ``arg_combine``.
agg : callable
Partialed ``arg_agg``.
axis : int, optional
split_every : int or dict, optional
"""
if axis is None:
axis = tuple(range(x.ndim))
ravel = True
elif isinstance(axis, Integral):
axis = validate_axis(axis, x.ndim)
axis = (axis,)
ravel = x.ndim == 1
else:
raise TypeError("axis must be either `None` or int, got '{0}'".format(axis))
for ax in axis:
chunks = x.chunks[ax]
if len(chunks) > 1 and np.isnan(chunks).any():
raise ValueError(
"Arg-reductions do not work with arrays that have "
"unknown chunksizes. At some point in your computation "
"this array lost chunking information.\n\n"
"A possible solution is with \n"
" x.compute_chunk_sizes()"
)
# Map chunk across all blocks
name = "arg-reduce-{0}".format(tokenize(axis, x, chunk, combine, split_every))
old = x.name
keys = list(product(*map(range, x.numblocks)))
offsets = list(product(*(accumulate(operator.add, bd[:-1], 0) for bd in x.chunks)))
if ravel:
offset_info = zip(offsets, repeat(x.shape))
else:
offset_info = pluck(axis[0], offsets)
chunks = tuple((1,) * len(c) if i in axis else c for (i, c) in enumerate(x.chunks))
dsk = dict(
((name,) + k, (chunk, (old,) + k, axis, off))
for (k, off) in zip(keys, offset_info)
)
# The dtype of `tmp` doesn't actually matter, just need to provide something
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
tmp = Array(graph, name, chunks, dtype=x.dtype)
dtype = np.argmin([1]).dtype
result = _tree_reduce(tmp, agg, axis, False, dtype, split_every, combine)
return handle_out(out, result)
def make_arg_reduction(func, argfunc, is_nan_func=False):
"""Create an argreduction callable
Parameters
----------
func : callable
The reduction (e.g. ``min``)
argfunc : callable
The argreduction (e.g. ``argmin``)
"""
chunk = partial(arg_chunk, func, argfunc)
combine = partial(arg_combine, func, argfunc)
if is_nan_func:
agg = partial(nanarg_agg, func, argfunc)
else:
agg = partial(arg_agg, func, argfunc)
def wrapped(x, axis=None, split_every=None, out=None):
return arg_reduction(
x, chunk, combine, agg, axis, split_every=split_every, out=out
)
wrapped.__name__ = func.__name__
return derived_from(np)(wrapped)
def _nanargmin(x, axis, **kwargs):
try:
return chunk.nanargmin(x, axis, **kwargs)
except ValueError:
return chunk.nanargmin(np.where(np.isnan(x), np.inf, x), axis, **kwargs)
def _nanargmax(x, axis, **kwargs):
try:
return chunk.nanargmax(x, axis, **kwargs)
except ValueError:
return chunk.nanargmax(np.where(np.isnan(x), -np.inf, x), axis, **kwargs)
argmin = make_arg_reduction(chunk.min, chunk.argmin)
argmax = make_arg_reduction(chunk.max, chunk.argmax)
nanargmin = make_arg_reduction(chunk.nanmin, _nanargmin, True)
nanargmax = make_arg_reduction(chunk.nanmax, _nanargmax, True)
def _prefixscan_combine(func, binop, pre, x, axis, dtype):
"""Combine results of a parallel prefix scan such as cumsum
Parameters
----------
func : callable
Cumulative function (e.g. ``np.cumsum``)
binop : callable
Associative function (e.g. ``add``)
pre : np.array
The value calculated in parallel from ``preop``.
For example, the sum of all the previous blocks.
x : np.array
Current block
axis : int
dtype : dtype
Returns
-------
np.array
"""
# We could compute this in two tasks.
# This would allow us to do useful work (i.e., func), while waiting on `pre`.
# Using one task may guide the scheduler to do better and reduce scheduling overhead.
return binop(pre, func(x, axis=axis, dtype=dtype))
def _prefixscan_first(func, x, axis, dtype):
"""Compute the prefix scan (e.g., cumsum) on the first block
Parameters
----------
func : callable
Cumulative function (e.g. ``np.cumsum``)
x : np.array
Current block
axis : int
dtype : dtype
Returns
-------
np.array
"""
return func(x, axis=axis, dtype=dtype)
def prefixscan_blelloch(func, preop, binop, x, axis=None, dtype=None, out=None):
"""Generic function to perform parallel cumulative scan (a.k.a prefix scan)
The Blelloch prefix scan is work-efficient and exposes parallelism.
A parallel cumsum works by first taking the sum of each block, then do a binary tree
merge followed by a fan-out (i.e., the Brent-Kung pattern). We then take the cumsum
of each block and add the sum of the previous blocks.
When performing a cumsum across N chunks, this method has 2 * lg(N) levels of dependencies.
In contrast, the sequential method has N levels of dependencies.
Floating point operations should be more accurate with this method compared to sequential.
Parameters
----------
func : callable
Cumulative function (e.g. ``np.cumsum``)
preop : callable
Function to get the final value of a cumulative function (e.g., ``np.sum``)
binop : callable
Associative function (e.g. ``add``)
x : dask array
axis : int
dtype : dtype
Returns
-------
dask array
"""
if axis is None:
x = x.flatten()
axis = 0
if dtype is None:
dtype = getattr(func(np.empty((0,), dtype=x.dtype)), "dtype", object)
assert isinstance(axis, Integral)
axis = validate_axis(axis, x.ndim)
name = "{0}-{1}".format(func.__name__, tokenize(func, axis, preop, binop, x, dtype))
base_key = (name,)
# Right now, the metadata for batches is incorrect, but this should be okay
batches = x.map_blocks(preop, axis=axis, keepdims=True, dtype=dtype)
# We don't need the last index until the end
*indices, last_index = full_indices = [
list(
product(
*[range(nb) if j != axis else [i] for j, nb in enumerate(x.numblocks)]
)
)
for i in range(x.numblocks[axis])
]
prefix_vals = [[(batches.name,) + index for index in vals] for vals in indices]
dsk = {}
n_vals = len(prefix_vals)
level = 0
if n_vals >= 2:
# Upsweep
stride = 1
stride2 = 2
while stride2 <= n_vals:
for i in range(stride2 - 1, n_vals, stride2):
new_vals = []
for index, left_val, right_val in zip(
indices[i], prefix_vals[i - stride], prefix_vals[i]
):
key = base_key + index + (level, i)
dsk[key] = (binop, left_val, right_val)
new_vals.append(key)
prefix_vals[i] = new_vals
stride = stride2
stride2 *= 2
level += 1
# Downsweep
# With `n_vals == 3`, we would have `stride = 1` and `stride = 0`, but we need
# to do a downsweep iteration, so make sure stride2 is at least 2.
stride2 = builtins.max(2, 2 ** ceil(log2(n_vals // 2)))
stride = stride2 // 2
while stride > 0:
for i in range(stride2 + stride - 1, n_vals, stride2):
new_vals = []
for index, left_val, right_val in zip(
indices[i], prefix_vals[i - stride], prefix_vals[i]
):
key = base_key + index + (level, i)
dsk[key] = (binop, left_val, right_val)
new_vals.append(key)
prefix_vals[i] = new_vals
stride2 = stride
stride //= 2
level += 1
if full_indices:
for index in full_indices[0]:
dsk[base_key + index] = (
_prefixscan_first,
func,
(x.name,) + index,
axis,
dtype,
)
for indexes, vals in zip(drop(1, full_indices), prefix_vals):
for index, val in zip(indexes, vals):
dsk[base_key + index] = (
_prefixscan_combine,
func,
binop,
val,
(x.name,) + index,
axis,
dtype,
)
if len(full_indices) < 2:
deps = [x]
else:
deps = [x, batches]
graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)
result = Array(graph, name, x.chunks, batches.dtype)
return handle_out(out, result)
def cumreduction(
func,
binop,
ident,
x,
axis=None,
dtype=None,
out=None,
method="sequential",
preop=None,
):
"""Generic function for cumulative reduction
Parameters
----------
func: callable
Cumulative function like np.cumsum or np.cumprod
binop: callable
Associated binary operator like ``np.cumsum->add`` or ``np.cumprod->mul``
ident: Number
Associated identity like ``np.cumsum->0`` or ``np.cumprod->1``
x: dask Array
axis: int
dtype: dtype
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumsum. Default is 'sequential'.
* 'sequential' performs the scan of each prior block before the current block.
* 'blelloch' is a work-efficient parallel scan. It exposes parallelism by first
calling ``preop`` on each block and combines the values via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
preop: callable, optional
Function used by 'blelloch' method like `np.cumsum->np.sum`` or ``np.cumprod->np.prod``
Returns
-------
dask array
See also
--------
cumsum
cumprod
"""
if method == "blelloch":
if preop is None:
raise TypeError(
'cumreduction with "blelloch" method required `preop=` argument'
)
return prefixscan_blelloch(func, preop, binop, x, axis, dtype, out=out)
elif method != "sequential":
raise ValueError(
f'Invalid method for cumreduction. Expected "sequential" or "blelloch". Got: {method!r}'
)
if axis is None:
x = x.flatten()
axis = 0
if dtype is None:
dtype = getattr(func(np.empty((0,), dtype=x.dtype)), "dtype", object)
assert isinstance(axis, Integral)
axis = validate_axis(axis, x.ndim)
m = x.map_blocks(func, axis=axis, dtype=dtype)
name = "{0}-{1}".format(func.__name__, tokenize(func, axis, binop, ident, x, dtype))
n = x.numblocks[axis]
full = slice(None, None, None)
slc = (full,) * axis + (slice(-1, None),) + (full,) * (x.ndim - axis - 1)
indices = list(
product(*[range(nb) if i != axis else [0] for i, nb in enumerate(x.numblocks)])
)
dsk = dict()
for ind in indices:
shape = tuple(x.chunks[i][ii] if i != axis else 1 for i, ii in enumerate(ind))
dsk[(name, "extra") + ind] = (np.full, shape, ident, m.dtype)
dsk[(name,) + ind] = (m.name,) + ind
for i in range(1, n):
last_indices = indices
indices = list(
product(
*[range(nb) if ii != axis else [i] for ii, nb in enumerate(x.numblocks)]
)
)
for old, ind in zip(last_indices, indices):
this_slice = (name, "extra") + ind
dsk[this_slice] = (
binop,
(name, "extra") + old,
(operator.getitem, (m.name,) + old, slc),
)
dsk[(name,) + ind] = (binop, this_slice, (m.name,) + ind)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[m])
result = Array(graph, name, x.chunks, m.dtype)
return handle_out(out, result)
def _cumsum_merge(a, b):
if isinstance(a, np.ma.masked_array) or isinstance(b, np.ma.masked_array):
values = np.ma.getdata(a) + np.ma.getdata(b)
return np.ma.masked_array(values, mask=np.ma.getmaskarray(b))
return a + b
def _cumprod_merge(a, b):
if isinstance(a, np.ma.masked_array) or isinstance(b, np.ma.masked_array):
values = np.ma.getdata(a) * np.ma.getdata(b)
return np.ma.masked_array(values, mask=np.ma.getmaskarray(b))
return a * b
@derived_from(np)
def cumsum(x, axis=None, dtype=None, out=None, method="sequential"):
"""Dask added an additional keyword-only argument ``method``.
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumsum. Default is 'sequential'.
* 'sequential' performs the cumsum of each prior block before the current block.
* 'blelloch' is a work-efficient parallel cumsum. It exposes parallelism by
first taking the sum of each block and combines the sums via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
"""
return cumreduction(
np.cumsum,
_cumsum_merge,
0,
x,
axis,
dtype,
out=out,
method=method,
preop=np.sum,
)
@derived_from(np)
def cumprod(x, axis=None, dtype=None, out=None, method="sequential"):
"""Dask added an additional keyword-only argument ``method``.
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumprod. Default is 'sequential'.
* 'sequential' performs the cumprod of each prior block before the current block.
* 'blelloch' is a work-efficient parallel cumprod. It exposes parallelism by first
taking the product of each block and combines the products via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
"""
return cumreduction(
np.cumprod,
_cumprod_merge,
1,
x,
axis,
dtype,
out=out,
method=method,
preop=np.prod,
)
def topk(a, k, axis=-1, split_every=None):
"""Extract the k largest elements from a on the given axis,
and return them sorted from largest to smallest.
If k is negative, extract the -k smallest elements instead,
and return them sorted from smallest to largest.
This performs best when ``k`` is much smaller than the chunk size. All
results will be returned in a single chunk along the given axis.
Parameters
----------
x: Array
Data being sorted
k: int
axis: int, optional
split_every: int >=2, optional
See :func:`reduce`. This parameter becomes very important when k is
on the same order of magnitude of the chunk size or more, as it
prevents getting the whole or a significant portion of the input array
in memory all at once, with a negative impact on network transfer
too when running on distributed.
Returns
-------
Selection of x with size abs(k) along the given axis.
Examples
--------
>>> import dask.array as da
>>> x = np.array([5, 1, 3, 6])
>>> d = da.from_array(x, chunks=2)
>>> d.topk(2).compute()
array([6, 5])
>>> d.topk(-2).compute()
array([1, 3])
"""
axis = validate_axis(axis, a.ndim)
# chunk and combine steps of the reduction, which recursively invoke
# np.partition to pick the top/bottom k elements from the previous step.
# The selection is not sorted internally.
chunk_combine = partial(chunk.topk, k=k)
# aggregate step of the reduction. Internally invokes the chunk/combine
# function, then sorts the results internally.
aggregate = partial(chunk.topk_aggregate, k=k)
return reduction(
a,
chunk=chunk_combine,
combine=chunk_combine,
aggregate=aggregate,
axis=axis,
keepdims=True,
dtype=a.dtype,
split_every=split_every,
output_size=abs(k),
)
def argtopk(a, k, axis=-1, split_every=None):
"""Extract the indices of the k largest elements from a on the given axis,
and return them sorted from largest to smallest. If k is negative, extract
the indices of the -k smallest elements instead, and return them sorted
from smallest to largest.
This performs best when ``k`` is much smaller than the chunk size. All
results will be returned in a single chunk along the given axis.
Parameters
----------
x: Array
Data being sorted
k: int
axis: int, optional
split_every: int >=2, optional
See :func:`topk`. The performance considerations for topk also apply
here.
Returns
-------
Selection of np.intp indices of x with size abs(k) along the given axis.
Examples
--------
>>> import dask.array as da
>>> x = np.array([5, 1, 3, 6])
>>> d = da.from_array(x, chunks=2)
>>> d.argtopk(2).compute()
array([3, 0])
>>> d.argtopk(-2).compute()
array([1, 2])
"""
axis = validate_axis(axis, a.ndim)
# Generate nodes where every chunk is a tuple of (a, original index of a)
idx = arange(a.shape[axis], chunks=(a.chunks[axis],), dtype=np.intp)
idx = idx[tuple(slice(None) if i == axis else np.newaxis for i in range(a.ndim))]
a_plus_idx = a.map_blocks(chunk.argtopk_preprocess, idx, dtype=object)
# chunk and combine steps of the reduction. They acquire in input a tuple
# of (a, original indices of a) and return another tuple containing the top
# k elements of a and the matching original indices. The selection is not
# sorted internally, as in np.argpartition.
chunk_combine = partial(chunk.argtopk, k=k)
# aggregate step of the reduction. Internally invokes the chunk/combine
# function, then sorts the results internally, drops a and returns the
# index only.
aggregate = partial(chunk.argtopk_aggregate, k=k)
if isinstance(axis, Number):
naxis = 1
else:
naxis = len(axis)
meta = a._meta.astype(np.intp).reshape((0,) * (a.ndim - naxis + 1))
return reduction(
a_plus_idx,
chunk=chunk_combine,
combine=chunk_combine,
aggregate=aggregate,
axis=axis,
keepdims=True,
dtype=np.intp,
split_every=split_every,
concatenate=False,
output_size=abs(k),
meta=meta,
)
@derived_from(np)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None):
return diagonal(a, offset=offset, axis1=axis1, axis2=axis2).sum(-1, dtype=dtype)
@derived_from(np)
def median(a, axis=None, keepdims=False, out=None):
"""
This works by automatically chunking the reduced axes to a single chunk
and then calling ``numpy.median`` function across the remaining dimensions
"""
if axis is None:
raise NotImplementedError(
"The da.median function only works along an axis. "
"The full algorithm is difficult to do in parallel"
)
if not isinstance(axis, Iterable):
axis = (axis,)
axis = [ax + a.ndim if ax < 0 else ax for ax in axis]
a = a.rechunk({ax: -1 if ax in axis else "auto" for ax in range(a.ndim)})
result = a.map_blocks(
np.median,
axis=axis,
keepdims=keepdims,
drop_axis=axis if not keepdims else None,
chunks=[1 if ax in axis else c for ax, c in enumerate(a.chunks)]
if keepdims
else None,
)
result = handle_out(out, result)
return result
@derived_from(np)
def nanmedian(a, axis=None, keepdims=False, out=None):
"""
This works by automatically chunking the reduced axes to a single chunk
and then calling ``numpy.nanmedian`` function across the remaining dimensions
"""
if axis is None:
raise NotImplementedError(
"The da.nanmedian function only works along an axis or a subset of axes. "
"The full algorithm is difficult to do in parallel"
)
if not isinstance(axis, Iterable):
axis = (axis,)
axis = [ax + a.ndim if ax < 0 else ax for ax in axis]
a = a.rechunk({ax: -1 if ax in axis else "auto" for ax in range(a.ndim)})
result = a.map_blocks(
np.nanmedian,
axis=axis,
keepdims=keepdims,
drop_axis=axis if not keepdims else None,
chunks=[1 if ax in axis else c for ax, c in enumerate(a.chunks)]
if keepdims
else None,
)
result = handle_out(out, result)
return result
| bsd-3-clause | -6,353,376,076,036,699,000 | 30.608354 | 102 | 0.599747 | false |
1suming/readthedocs.org | readthedocs/projects/migrations/0035_make_null.py | 13 | 12994 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Project.num_point'
db.alter_column('projects_project', 'num_point', self.gf('django.db.models.fields.IntegerField')(max_length=3, null=True))
# Changing field 'Project.num_minor'
db.alter_column('projects_project', 'num_minor', self.gf('django.db.models.fields.IntegerField')(max_length=3, null=True))
# Changing field 'Project.num_major'
db.alter_column('projects_project', 'num_major', self.gf('django.db.models.fields.IntegerField')(max_length=3, null=True))
def backwards(self, orm):
# Changing field 'Project.num_point'
db.alter_column('projects_project', 'num_point', self.gf('django.db.models.fields.IntegerField')(max_length=3))
# Changing field 'Project.num_minor'
db.alter_column('projects_project', 'num_minor', self.gf('django.db.models.fields.IntegerField')(max_length=3))
# Changing field 'Project.num_major'
db.alter_column('projects_project', 'num_major', self.gf('django.db.models.fields.IntegerField')(max_length=3))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'builds.version': {
'Meta': {'ordering': "['-verbose_name']", 'unique_together': "[('project', 'slug')]", 'object_name': 'Version'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'built': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.emailhook': {
'Meta': {'object_name': 'EmailHook'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emailhook_notifications'", 'to': "orm['projects.Project']"})
},
'projects.importedfile': {
'Meta': {'object_name': 'ImportedFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_filed'", 'null': 'True', 'to': "orm['builds.Version']"})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'analytics_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'conf_py_file': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'crate_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'default_branch': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_version': ('django.db.models.fields.CharField', [], {'default': "'latest'", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'django_packages_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'default': "'sphinx'", 'max_length': '20'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '20'}),
'main_language_project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': "orm['projects.Project']"}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_major': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'num_minor': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'num_point': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'python_interpreter': ('django.db.models.fields.CharField', [], {'default': "'python'", 'max_length': '20'}),
'related_projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['projects.Project']", 'null': 'True', 'through': "orm['projects.ProjectRelationship']", 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'requirements_file': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'use_system_packages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_virtualenv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'version_privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'})
},
'projects.projectrelationship': {
'Meta': {'object_name': 'ProjectRelationship'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'superprojects'", 'to': "orm['projects.Project']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subprojects'", 'to': "orm['projects.Project']"})
},
'projects.webhook': {
'Meta': {'object_name': 'WebHook'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'webhook_notifications'", 'to': "orm['projects.Project']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['projects'] | mit | -7,147,823,058,791,610,000 | 78.723926 | 230 | 0.561105 | false |
wilx/autoconf-archive | macro.py | 9 | 4436 | #! /usr/bin/env python
from contextlib import closing
import os, sys, subprocess, re, textwrap
def loadFile(path):
with closing( open(path) ) as fd:
return fd.read()
def writeFile(path, buffer):
with closing( open(path, "w") ) as fd:
fd.write(buffer)
def splitSections(buffer):
while buffer:
assert len(buffer) >= 3
name = buffer.pop(0).lower()
assert buffer.pop(0) == ''
body = []
while buffer:
line = buffer.pop(0)
if line == '' or line[0].isspace():
body.append(line[2:])
else:
buffer.insert(0, line)
yield (name, body)
body = []
break
if body:
yield (name, body)
def collapseText(lines, width = 72):
wrapper = textwrap.TextWrapper( width = width
, expand_tabs = False
, break_on_hyphens = False
, break_long_words = False
)
body = []
prev = None
for line in lines:
if line == '':
prev = None
elif line[0].isspace():
if prev == "quote":
body[-1].append(line)
else:
body.append([line])
prev = "quote"
else:
if prev == "text":
newtext = ' '.join(body[-1]) + ' ' + line
body[-1] = wrapper.wrap(newtext)
else:
body.append(wrapper.wrap(line))
prev = "text"
return body
class Macro:
def __init__(self, filePath, computeSerialNumber=False):
self.name = os.path.splitext(os.path.basename(filePath))[0]
# header and body are separated by an empty line.
(header,body) = loadFile(filePath).split("\n\n", 1)
self.body = body.split('\n')
# headers may not contain tab characters
assert not ('\t' in header)
# drop initial header (if present)
header = re.sub(r"^\n*# =+\n#[^\n]*\n# =+\n(#\n)+", '', header, 1)
# split buffer into lines and drop initial "# " prefix in the process
header = [l[2:] for l in header.split('\n')]
# set defaults
self.authors = []
# parse each section in the remaining list
for (key, body) in splitSections(header):
# drop empty lines at beginning and end of body
while body[0] == '': body.pop(0)
while body[-1] == '': body.pop(-1)
# each section has its own parser
if key == "synopsis":
if '' in body:
raise Exception("%s: malformed synopsis section" % filePath)
elif key == "description":
body = collapseText(body)
elif key == "license":
while True:
match = re.match(r"Copyright \([cC]\) ([0-9.,-]+) (.*)", body[0])
if not match: break
(year,name) = (match.group(1), match.group(2))
match = re.match(r"(.*) <(.*)>", name)
if match:
(name,email) = (match.group(1), match.group(2))
self.authors.append(dict(year = year, name = name, email = email))
else:
self.authors.append(dict(year = year, name = name))
body.pop(0)
assert self.authors
if body.pop(0) != '':
raise Exception("%s: malformed license section" % filePath)
body = collapseText(body)
elif key == "obsolete macro":
key = "obsolete"
body = collapseText(body)
elif key == "description":
body = collapseText(body)
else:
raise Exception("%s: unknown section %r in macro" % (filePath, key))
self.__dict__[key] = body
# determine the macro's serial number
if computeSerialNumber: # compute the number from git
logMessages = subprocess.check_output(["git", "log", "--oneline", "054e8ad8c766afa7059d8cd4a81bbfa99133ef5e..HEAD", "--", filePath], bufsize=1)
logLines = logMessages.rstrip(b'\n').split(b"\n")
self.serial = len(logLines)
modified = subprocess.call(["git", "diff", "--quiet", "--exit-code", "HEAD", "--", filePath])
if modified:
self.serial += 1
else: # trust the m4 file
assert self.body[0].startswith("#serial")
self.serial = int(self.body[0].split()[1])
# drop the original serial number from the body
self.body = [ l for l in self.body if not l.startswith("#serial") ]
# drop whitespace from begining and end of body
while self.body[0] == "":
self.body.pop(0)
while self.body[-1] == "":
self.body.pop(-1)
def __repr__(self):
return repr(self.__dict__)
| gpl-3.0 | 7,881,672,914,899,835,000 | 33.929134 | 149 | 0.556357 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.