repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
ujjvala-addsol/addsol_hr
|
openerp/addons/gamification/tests/test_challenge.py
|
386
|
5133
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class test_challenge(common.TransactionCase):
def setUp(self):
super(test_challenge, self).setUp()
cr, uid = self.cr, self.uid
self.data_obj = self.registry('ir.model.data')
self.user_obj = self.registry('res.users')
self.challenge_obj = self.registry('gamification.challenge')
self.line_obj = self.registry('gamification.challenge.line')
self.goal_obj = self.registry('gamification.goal')
self.badge_obj = self.registry('gamification.badge')
self.badge_user_obj = self.registry('gamification.badge.user')
self.demo_user_id = self.data_obj.get_object_reference(cr, uid, 'base', 'user_demo')[1]
self.group_user_id = self.data_obj.get_object_reference(cr, uid, 'base', 'group_user')[1]
self.challenge_base_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'challenge_base_discover')[1]
self.definition_timezone_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'definition_base_timezone')[1]
self.badge_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'badge_good_job')[1]
def test_00_join_challenge(self):
cr, uid, context = self.cr, self.uid, {}
user_ids = self.user_obj.search(cr, uid, [('groups_id', '=', self.group_user_id)])
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
self.assertGreaterEqual(len(challenge.user_ids), len(user_ids), "Not enough users in base challenge")
self.user_obj.create(cr, uid, {
'name': 'R2D2',
'login': '[email protected]',
'email': '[email protected]',
'groups_id': [(6, 0, [self.group_user_id])]
}, {'no_reset_password': True})
self.challenge_obj._update_all(cr, uid, [self.challenge_base_id], context=context)
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
self.assertGreaterEqual(len(challenge.user_ids), len(user_ids)+1, "These are not droids you are looking for")
def test_10_reach_challenge(self):
cr, uid, context = self.cr, self.uid, {}
self.challenge_obj.write(cr, uid, [self.challenge_base_id], {'state': 'inprogress'}, context=context)
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
challenge_user_ids = [user.id for user in challenge.user_ids]
self.assertEqual(challenge.state, 'inprogress', "Challenge failed the change of state")
line_ids = self.line_obj.search(cr, uid, [('challenge_id', '=', self.challenge_base_id)], context=context)
goal_ids = self.goal_obj.search(cr, uid, [('challenge_id', '=', self.challenge_base_id), ('state', '!=', 'draft')], context=context)
self.assertEqual(len(goal_ids), len(line_ids)*len(challenge_user_ids), "Incorrect number of goals generated, should be 1 goal per user, per challenge line")
# demo user will set a timezone
self.user_obj.write(cr, uid, self.demo_user_id, {'tz': "Europe/Brussels"}, context=context)
goal_ids = self.goal_obj.search(cr, uid, [('user_id', '=', self.demo_user_id), ('definition_id', '=', self.definition_timezone_id)], context=context)
self.goal_obj.update(cr, uid, goal_ids, context=context)
reached_goal_ids = self.goal_obj.search(cr, uid, [('id', 'in', goal_ids), ('state', '=', 'reached')], context=context)
self.assertEqual(set(goal_ids), set(reached_goal_ids), "Not every goal was reached after changing timezone")
# reward for two firsts as admin may have timezone
self.challenge_obj.write(cr, uid, self.challenge_base_id, {'reward_first_id': self.badge_id, 'reward_second_id': self.badge_id}, context=context)
self.challenge_obj.write(cr, uid, self.challenge_base_id, {'state': 'done'}, context=context)
badge_ids = self.badge_user_obj.search(cr, uid, [('badge_id', '=', self.badge_id), ('user_id', '=', self.demo_user_id)])
self.assertGreater(len(badge_ids), 0, "Demo user has not received the badge")
|
agpl-3.0
| 991,387,984,481,785,900 | -2,676,164,345,255,969,000 | 56.044444 | 164 | 0.639782 | false |
tudorvio/nova
|
nova/tests/unit/keymgr/test_single_key_mgr.py
|
78
|
2448
|
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test cases for the single key manager.
"""
import array
from nova import exception
from nova.keymgr import key
from nova.keymgr import single_key_mgr
from nova.tests.unit.keymgr import test_mock_key_mgr
class SingleKeyManagerTestCase(test_mock_key_mgr.MockKeyManagerTestCase):
def _create_key_manager(self):
return single_key_mgr.SingleKeyManager()
def setUp(self):
super(SingleKeyManagerTestCase, self).setUp()
self.key_id = '00000000-0000-0000-0000-000000000000'
encoded = array.array('B', ('0' * 64).decode('hex')).tolist()
self.key = key.SymmetricKey('AES', encoded)
def test___init__(self):
self.assertEqual(self.key,
self.key_mgr.get_key(self.ctxt, self.key_id))
def test_create_key(self):
key_id_1 = self.key_mgr.create_key(self.ctxt)
key_id_2 = self.key_mgr.create_key(self.ctxt)
# ensure that the UUIDs are the same
self.assertEqual(key_id_1, key_id_2)
def test_create_key_with_length(self):
pass
def test_store_null_context(self):
self.assertRaises(exception.Forbidden,
self.key_mgr.store_key, None, self.key)
def test_copy_key(self):
key_id = self.key_mgr.create_key(self.ctxt)
key = self.key_mgr.get_key(self.ctxt, key_id)
copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id)
copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id)
self.assertEqual(key_id, copied_key_id)
self.assertEqual(key, copied_key)
def test_delete_key(self):
pass
def test_delete_unknown_key(self):
self.assertRaises(exception.KeyManagerError,
self.key_mgr.delete_key, self.ctxt, None)
|
apache-2.0
| -943,067,510,437,789,400 | 7,078,059,991,933,968,000 | 33 | 78 | 0.662582 | false |
tjsavage/sfcsdatabase
|
django/conf/locale/en_GB/formats.py
|
80
|
1770
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'N j, Y' # 'Oct. 25, 2006'
TIME_FORMAT = 'P' # '2:30 pm'
DATETIME_FORMAT = 'N j, Y, P' # 'Oct. 25, 2006, 2:30 pm'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'F j' # 'October 25'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 pm'
FIRST_DAY_OF_WEEK = 0 # Sunday
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%Y-%m-%d', # '2006-10-25'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
bsd-3-clause
| -2,279,711,852,236,648,400 | 454,872,417,644,523,600 | 44.384615 | 79 | 0.379661 | false |
yg257/Pangea
|
templates/root/ec2/lib/boto-2.34.0/boto/ec2/autoscale/activity.py
|
152
|
3058
|
# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from datetime import datetime
class Activity(object):
def __init__(self, connection=None):
self.connection = connection
self.start_time = None
self.end_time = None
self.activity_id = None
self.progress = None
self.status_code = None
self.cause = None
self.description = None
self.status_message = None
self.group_name = None
def __repr__(self):
return 'Activity<%s>: For group:%s, progress:%s, cause:%s' % (self.activity_id,
self.group_name,
self.status_message,
self.cause)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ActivityId':
self.activity_id = value
elif name == 'AutoScalingGroupName':
self.group_name = value
elif name == 'StartTime':
try:
self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'EndTime':
try:
self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'Progress':
self.progress = value
elif name == 'Cause':
self.cause = value
elif name == 'Description':
self.description = value
elif name == 'StatusMessage':
self.status_message = value
elif name == 'StatusCode':
self.status_code = value
else:
setattr(self, name, value)
|
apache-2.0
| 5,237,708,080,555,276,000 | 3,812,608,670,962,065,400 | 40.890411 | 90 | 0.590582 | false |
kspviswa/personfinder
|
tools/admin.py
|
19
|
5570
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for administration in the interactive console."""
from model import *
from utils import *
import logging
import pickle
class Mapper(object):
# Subclasses should replace this with a model class (eg, model.Person).
KIND = None
# Subclasses can replace this with a list of (property, value) tuples
# to filter by.
FILTERS = []
def map(self, entity):
"""Updates a single entity.
Implementers should return a tuple containing two iterables
(to_update, to_delete)."""
return ([], [])
def get_query(self):
"""Returns a query over the specified kind, with any appropriate
filters applied."""
q = self.KIND.all()
for prop, value in self.FILTERS:
q.filter("%s =" % prop, value)
q.order("__key__")
return q
def run(self, batch_size=100):
"""Executes the map procedure over all matching entities."""
q = self.get_query()
entities = q.fetch(batch_size)
while entities:
to_put = []
to_delete = []
for entity in entities:
map_updates, map_deletes = self.map(entity)
to_put.extend(map_updates)
to_delete.extend(map_deletes)
if to_put:
db.put(to_put)
logging.info('entities written: %d' % len(to_put))
if to_delete:
db.delete(to_delete)
logging.info('entities deleted: %d' % len(to_delete))
q = self.get_query()
q.filter("__key__ >", entities[-1].key())
entities = q.fetch(batch_size)
class Reindexer(Mapper):
KIND = Person
def map(self, entity):
# This updates both old and new index and we need it for now,
# as first stage of deployment.
entity.update_index(['old','new'])
# Use the next line to index only with new index
#indexing.update_index_properties(entity)
return [entity], []
def Person_repr(person):
return '<Person %s %r>' % (
person.record_id, person.primary_full_name)
def Note_repr(note):
return '<Note %s for %s by %r at %s>' % (
note.record_id, note.person_record_id,
note.author_name, note.entry_date)
Person.__repr__ = Person_repr
Note.__repr__ = Note_repr
def expand_id(repo, id):
id = str(id)
if '/' not in id:
id = repo + '.' + HOME_DOMAIN + '/person.' + id
return id
def clear_found(id):
person = get_person(id)
person.found = False
db.put(person)
def get_person(repo, id):
return Person.get(repo, expand_id(repo, id))
def get_notes(repo, id):
return list(Note.all_in_repo(repo).filter(
'person_record_id =', expand_id(repo, id)))
def delete_person(person):
"""Deletes a Person, possibly leaving behind an empty placeholder."""
if person.is_original():
person.expiry_date = get_utcnow()
person.put_expiry_flags()
person.wipe_contents()
else:
person.delete_related_entities(delete_self=True)
def delete_repo(repo):
"""Deletes a Repo and associated Person, Note, Authorization, Subscription
(but not Counter, ApiActionLog, or UserAgentLog) entities."""
for person in Person.all_in_repo(repo, filter_expired=False):
delete_person(person)
entities = [Repo.get_by_key_name(repo)]
for cls in [Person, Note, Authorization, Subscription]:
entities += list(cls.all().filter('repo =', repo))
min_key = db.Key.from_path('ConfigEntry', repo + ':')
max_key = db.Key.from_path('ConfigEntry', repo + ';')
entities += list(config.ConfigEntry.all().filter('__key__ >', min_key
).filter('__key__ <', max_key))
db.delete(entities)
def get_all_resources():
"""Gets all the Resource entities and returns a dictionary of the contents.
The resulting dictionary has the structure: {
<bundle_name>: {
'created': <bundle_created_datetime>,
'resources': {
<resource_name>: {
'cache_seconds': <cache_seconds>
'content': <content_string>
'last_modified': <last_modified_datetime>
}
}
}
"""
import resources
bundle_dicts = {}
for b in resources.ResourceBundle.all():
resource_dicts = {}
for r in resources.Resource.all().ancestor(b):
resource_dicts[r.key().name()] = {
'cache_seconds': r.cache_seconds,
'content': r.content,
'last_modified': r.last_modified
}
bundle_dicts[b.key().name()] = {
'created': b.created,
'resources': resource_dicts
}
return bundle_dicts
def download_resources(filename):
"""Downloads all the Resource data into a backup file in pickle format."""
file = open(filename, 'w')
pickle.dump(get_all_resources(), file)
file.close()
|
apache-2.0
| 4,516,740,963,426,520,600 | -1,185,494,256,518,347,800 | 32.154762 | 79 | 0.597307 | false |
newville/scikit-image
|
doc/examples/plot_rank_mean.py
|
17
|
1499
|
"""
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import rank
image = (data.coins()).astype(np.uint16) * 16
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=3, figsize=(8, 10))
ax0, ax1, ax2 = axes
ax0.imshow(np.hstack((image, percentile_result)))
ax0.set_title('Percentile mean')
ax0.axis('off')
ax1.imshow(np.hstack((image, bilateral_result)))
ax1.set_title('Bilateral mean')
ax1.axis('off')
ax2.imshow(np.hstack((image, normal_result)))
ax2.set_title('Local mean')
ax2.axis('off')
plt.show()
|
bsd-3-clause
| -5,808,338,262,284,796,000 | 1,641,428,406,291,062,800 | 27.826923 | 79 | 0.731154 | false |
Arcanemagus/SickRage
|
lib/hachoir_parser/image/tga.py
|
95
|
2911
|
"""
Truevision Targa Graphic (TGA) picture parser.
Author: Victor Stinner
Creation: 18 december 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import FieldSet, UInt8, UInt16, Enum, RawBytes
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.image.common import PaletteRGB
class Line(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/bpp"].value
def createFields(self):
for x in xrange(self["/width"].value):
yield UInt8(self, "pixel[]")
class Pixels(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/height"].value * self["/bpp"].value
def createFields(self):
if self["/options"].value == 0:
RANGE = xrange(self["/height"].value-1,-1,-1)
else:
RANGE = xrange(self["/height"].value)
for y in RANGE:
yield Line(self, "line[%u]" % y)
class TargaFile(Parser):
PARSER_TAGS = {
"id": "targa",
"category": "image",
"file_ext": ("tga",),
"mime": (u"image/targa", u"image/tga", u"image/x-tga"),
"min_size": 18*8,
"description": u"Truevision Targa Graphic (TGA)"
}
CODEC_NAME = {
1: u"8-bit uncompressed",
2: u"24-bit uncompressed",
9: u"8-bit RLE",
10: u"24-bit RLE",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["version"].value != 1:
return "Unknown version"
if self["codec"].value not in self.CODEC_NAME:
return "Unknown codec"
if self["x_min"].value != 0 or self["y_min"].value != 0:
return "(x_min, y_min) is not (0,0)"
if self["bpp"].value not in (8, 24):
return "Unknown bits/pixel value"
return True
def createFields(self):
yield UInt8(self, "hdr_size", "Header size in bytes")
yield UInt8(self, "version", "Targa version (always one)")
yield Enum(UInt8(self, "codec", "Pixels encoding"), self.CODEC_NAME)
yield UInt16(self, "palette_ofs", "Palette absolute file offset")
yield UInt16(self, "nb_color", "Number of color")
yield UInt8(self, "color_map_size", "Color map entry size")
yield UInt16(self, "x_min")
yield UInt16(self, "y_min")
yield UInt16(self, "width")
yield UInt16(self, "height")
yield UInt8(self, "bpp", "Bits per pixel")
yield UInt8(self, "options", "Options (0: vertical mirror)")
if self["bpp"].value == 8:
yield PaletteRGB(self, "palette", 256)
if self["codec"].value == 1:
yield Pixels(self, "pixels")
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw_pixels", size)
|
gpl-3.0
| -2,713,574,452,629,040,600 | 8,333,039,999,783,041,000 | 33.247059 | 86 | 0.566472 | false |
lsaffre/voga
|
lino_voga/lib/voga/fixtures/demo.py
|
2
|
1358
|
# -*- coding: UTF-8 -*-
# Copyright 2013-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from __future__ import unicode_literals
from django.conf import settings
from lino.api import dd, rt
def objects():
Person = rt.models.contacts.Person
Teacher = rt.models.courses.Teacher
User = rt.models.users.User
from lino.modlib.users.choicelists import UserTypes
Place = rt.models.countries.Place
eupen = Place.objects.get(name__exact='Eupen')
person = Person(first_name="Marianne", last_name="Martin",
email=settings.SITE.demo_email,
city=eupen, gender=dd.Genders.female)
yield person
yield User(username=person.first_name.lower(),
partner=person, user_type='100')
person = Person(first_name="Monique", last_name="Mommer",
email=settings.SITE.demo_email,
city=eupen, gender=dd.Genders.female)
yield person
yield User(username=person.first_name.lower(),
partner=person, user_type='200')
person = Teacher(first_name="Tom", last_name="Thess",
email=settings.SITE.demo_email,
city=eupen, gender=dd.Genders.male)
yield person
yield User(username=person.first_name.lower(),
partner=person, user_type=UserTypes.teacher)
|
agpl-3.0
| -3,340,462,530,861,455,400 | 4,832,766,373,673,378,000 | 33.820513 | 62 | 0.635493 | false |
ldieselUT/Kruus-robtech
|
install/_setup_util.py
|
1
|
12413
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/tudeng/Kruus-robtech/install;/opt/ros/indigo'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
|
mit
| 8,538,908,958,146,463,000 | -2,586,468,605,157,412,000 | 41.656357 | 213 | 0.655764 | false |
signal18/replication-manager
|
share/opensvc/compliance/com.replication-manager/sysctl.py
|
2
|
8354
|
#!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_SYSCTL_",
"example_value": """
{
"key": "vm.lowmem_reserve_ratio",
"index": 1,
"op": ">",
"value": 256
}
""",
"description": """* Verify a linux kernel parameter value is on target
* Live parameter value (sysctl executable)
* Persistent parameter value (/etc/sysctl.conf)
""",
"form_definition": """
Desc: |
A rule to set a list of Linux kernel parameters to be set in /etc/sysctl.conf. Current values can be checked as strictly equal, or superior/inferior to their target value. Each field in a vectored value can be tuned independantly using the index key.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: sysctl
Inputs:
-
Id: key
Label: Key
DisplayModeLabel: key
LabelCss: action16
Mandatory: Yes
Type: string
Help: The /etc/sysctl.conf parameter to check.
-
Id: index
Label: Index
DisplayModeLabel: idx
LabelCss: action16
Mandatory: Yes
Default: 0
Type: integer
Help: The /etc/sysctl.conf parameter to check.
-
Id: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Mandatory: Yes
Type: string
Default: "="
Candidates:
- "="
- ">"
- ">="
- "<"
- "<="
Help: The comparison operator to use to check the parameter current value.
-
Id: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string or integer
Help: The /etc/sysctl.conf parameter target value.
""",
}
import os
import sys
import json
import pwd
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class Sysctl(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
if os.uname()[0] != "Linux":
raise NotApplicable()
self.need_reload = False
self.cf = os.path.join(os.sep, "etc", "sysctl.conf")
if not os.path.exists(self.cf):
perror(self.cf, 'does not exist')
raise NotApplicable()
self.keys = []
self.cache = None
self.keys = self.get_rules()
if len(self.keys) == 0:
raise NotApplicable()
self.convert_keys()
def fixable(self):
return RET_OK
def parse_val(self, val):
val = list(map(lambda x: x.strip(), val.strip().split()))
for i, e in enumerate(val):
try:
val[i] = int(e)
except:
pass
return val
def get_keys(self):
with open(self.cf, 'r') as f:
buff = f.read()
if self.cache is None:
self.cache = {}
for line in buff.splitlines():
line = line.strip()
if line.startswith('#'):
continue
l = line.split('=')
if len(l) != 2:
continue
key = l[0].strip()
val = self.parse_val(l[1])
self.cache[key] = val
def get_live_key(self, key):
p = Popen(['sysctl', key], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return None
l = bdecode(out).split('=')
if len(l) != 2:
return None
val = self.parse_val(l[1])
return val
def get_key(self, key):
if self.cache is None:
self.get_keys()
if key not in self.cache:
return None
return self.cache[key]
def fix_key(self, key):
done = False
target = key['value']
index = key['index']
with open(self.cf, 'r') as f:
buff = f.read()
lines = buff.split('\n')
for i, line in enumerate(lines):
line = line.strip()
if line.startswith('#'):
continue
l = line.split('=')
if len(l) != 2:
continue
keyname = l[0].strip()
if key['key'] != keyname:
continue
if done:
pinfo("sysctl: remove redundant key %s"%keyname)
del lines[i]
continue
val = self.parse_val(l[1])
if target == val[index]:
done = True
continue
pinfo("sysctl: set %s[%d] = %s"%(keyname, index, str(target)))
val[index] = target
lines[i] = "%s = %s"%(keyname, " ".join(map(str, val)))
done = True
if not done:
# if key is not in sysctl.conf, get the value from kernel
val = self.get_live_key(key['key'])
if val is None:
perror("key '%s' not found in live kernel parameters" % key['key'])
return RET_ERR
if target != val[index]:
val[index] = target
pinfo("sysctl: set %s = %s"%(key['key'], " ".join(map(str, val))))
lines += ["%s = %s"%(key['key'], " ".join(map(str, val)))]
try:
with open(self.cf, 'w') as f:
f.write('\n'.join(lines))
except:
perror("failed to write sysctl.conf")
return RET_ERR
return RET_OK
def convert_keys(self):
keys = []
for key in self.keys:
keyname = key['key']
value = key['value']
if type(value) == list:
if len(value) > 0 and type(value[0]) != list:
value = [value]
for i, v in enumerate(value):
keys.append({
"key": keyname,
"index": i,
"op": v[0],
"value": v[1],
})
elif 'key' in key and 'index' in key and 'op' in key and 'value' in key:
keys.append(key)
self.keys = keys
def check_key(self, key, verbose=False):
r = RET_OK
keyname = key['key']
target = key['value']
op = key['op']
i = key['index']
current_value = self.get_key(keyname)
current_live_value = self.get_live_key(keyname)
if current_value is None:
if verbose:
perror("key '%s' not found in sysctl.conf"%keyname)
return RET_ERR
if op == "=" and str(current_value[i]) != str(target):
if verbose:
perror("sysctl err: %s[%d] = %s, target: %s"%(keyname, i, str(current_value[i]), str(target)))
r |= RET_ERR
elif op == ">=" and type(target) == int and current_value[i] < target:
if verbose:
perror("sysctl err: %s[%d] = %s, target: >= %s"%(keyname, i, str(current_value[i]), str(target)))
r |= RET_ERR
elif op == "<=" and type(target) == int and current_value[i] > target:
if verbose:
perror("sysctl err: %s[%d] = %s, target: <= %s"%(keyname, i, str(current_value[i]), str(target)))
r |= RET_ERR
else:
if verbose:
pinfo("sysctl ok: %s[%d] = %s, on target"%(keyname, i, str(current_value[i])))
if r == RET_OK and current_live_value is not None and current_value != current_live_value:
if verbose:
perror("sysctl err: %s on target in sysctl.conf but kernel value is different"%(keyname))
self.need_reload = True
r |= RET_ERR
return r
def check(self):
r = 0
for key in self.keys:
r |= self.check_key(key, verbose=True)
return r
def reload_sysctl(self):
cmd = ['sysctl', '-e', '-p']
pinfo("sysctl:", " ".join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
p.communicate()
if p.returncode != 0:
perror("reload failed")
return RET_ERR
return RET_OK
def fix(self):
r = 0
for key in self.keys:
if self.check_key(key, verbose=False) == RET_ERR:
self.need_reload = True
r |= self.fix_key(key)
if self.need_reload:
r |= self.reload_sysctl()
return r
if __name__ == "__main__":
main(Sysctl)
|
gpl-3.0
| 3,347,495,590,040,284,000 | -7,945,472,440,350,878,000 | 27.318644 | 252 | 0.500958 | false |
nikolas/lettuce
|
tests/integration/lib/Django-1.3/tests/regressiontests/model_inheritance_regress/models.py
|
75
|
4389
|
import datetime
from django.db import models
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Meta:
ordering = ('name',)
def __unicode__(self):
return u"%s the place" % self.name
class Restaurant(Place):
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
def __unicode__(self):
return u"%s the restaurant" % self.name
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField()
def __unicode__(self):
return u"%s the italian restaurant" % self.name
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, primary_key=True, parent_link=True)
capacity = models.IntegerField()
def __unicode__(self):
return u"%s the parking lot" % self.name
class ParkingLot2(Place):
# In lieu of any other connector, an existing OneToOneField will be
# promoted to the primary key.
parent = models.OneToOneField(Place)
class ParkingLot3(Place):
# The parent_link connector need not be the pk on the model.
primary_key = models.AutoField(primary_key=True)
parent = models.OneToOneField(Place, parent_link=True)
class Supplier(models.Model):
restaurant = models.ForeignKey(Restaurant)
class Wholesaler(Supplier):
retailer = models.ForeignKey(Supplier,related_name='wholesale_supplier')
class Parent(models.Model):
created = models.DateTimeField(default=datetime.datetime.now)
class Child(Parent):
name = models.CharField(max_length=10)
class SelfRefParent(models.Model):
parent_data = models.IntegerField()
self_data = models.ForeignKey('self', null=True)
class SelfRefChild(SelfRefParent):
child_data = models.IntegerField()
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __unicode__(self):
return self.headline
class ArticleWithAuthor(Article):
author = models.CharField(max_length=100)
class M2MBase(models.Model):
articles = models.ManyToManyField(Article)
class M2MChild(M2MBase):
name = models.CharField(max_length=50)
class Evaluation(Article):
quality = models.IntegerField()
class Meta:
abstract = True
class QualityControl(Evaluation):
assignee = models.CharField(max_length=50)
class BaseM(models.Model):
base_name = models.CharField(max_length=100)
def __unicode__(self):
return self.base_name
class DerivedM(BaseM):
customPK = models.IntegerField(primary_key=True)
derived_name = models.CharField(max_length=100)
def __unicode__(self):
return "PK = %d, base_name = %s, derived_name = %s" \
% (self.customPK, self.base_name, self.derived_name)
class AuditBase(models.Model):
planned_date = models.DateField()
class Meta:
abstract = True
verbose_name_plural = u'Audits'
class CertificationAudit(AuditBase):
class Meta(AuditBase.Meta):
abstract = True
class InternalCertificationAudit(CertificationAudit):
auditing_dept = models.CharField(max_length=20)
# Check that abstract classes don't get m2m tables autocreated.
class Person(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class AbstractEvent(models.Model):
name = models.CharField(max_length=100)
attendees = models.ManyToManyField(Person, related_name="%(class)s_set")
class Meta:
abstract = True
ordering = ('name',)
def __unicode__(self):
return self.name
class BirthdayParty(AbstractEvent):
pass
class BachelorParty(AbstractEvent):
pass
class MessyBachelorParty(BachelorParty):
pass
# Check concrete -> abstract -> concrete inheritance
class SearchableLocation(models.Model):
keywords = models.CharField(max_length=256)
class Station(SearchableLocation):
name = models.CharField(max_length=128)
class Meta:
abstract = True
class BusStation(Station):
bus_routes = models.CommaSeparatedIntegerField(max_length=128)
inbound = models.BooleanField()
class TrainStation(Station):
zone = models.IntegerField()
|
gpl-3.0
| -5,828,065,444,987,914,000 | 7,329,729,132,413,699,000 | 25.6 | 76 | 0.695147 | false |
hkawasaki/kawasaki-aio8-0
|
lms/djangoapps/class_dashboard/dashboard_data.py
|
10
|
20700
|
"""
Computes the data to display on the Instructor Dashboard
"""
from util.json_request import JsonResponse
from courseware import models
from django.db.models import Count
from django.utils.translation import ugettext as _
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import own_metadata
from analytics.csvs import create_csv_response
# Used to limit the length of list displayed to the screen.
MAX_SCREEN_LIST_LENGTH = 250
def get_problem_grade_distribution(course_id):
"""
Returns the grade distribution per problem for the course
`course_id` the course ID for the course interested in
Output is a dict, where the key is the problem 'module_id' and the value is a dict with:
'max_grade' - max grade for this problem
'grade_distrib' - array of tuples (`grade`,`count`).
"""
# Aggregate query on studentmodule table for grade data for all problems in course
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__exact="problem",
).values('module_state_key', 'grade', 'max_grade').annotate(count_grade=Count('grade'))
prob_grade_distrib = {}
# Loop through resultset building data for each problem
for row in db_query:
curr_problem = row['module_state_key']
# Build set of grade distributions for each problem that has student responses
if curr_problem in prob_grade_distrib:
prob_grade_distrib[curr_problem]['grade_distrib'].append((row['grade'], row['count_grade']))
if (prob_grade_distrib[curr_problem]['max_grade'] != row['max_grade']) and \
(prob_grade_distrib[curr_problem]['max_grade'] < row['max_grade']):
prob_grade_distrib[curr_problem]['max_grade'] = row['max_grade']
else:
prob_grade_distrib[curr_problem] = {
'max_grade': row['max_grade'],
'grade_distrib': [(row['grade'], row['count_grade'])]
}
return prob_grade_distrib
def get_sequential_open_distrib(course_id):
"""
Returns the number of students that opened each subsection/sequential of the course
`course_id` the course ID for the course interested in
Outputs a dict mapping the 'module_id' to the number of students that have opened that subsection/sequential.
"""
# Aggregate query on studentmodule table for "opening a subsection" data
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
module_type__exact="sequential",
).values('module_state_key').annotate(count_sequential=Count('module_state_key'))
# Build set of "opened" data for each subsection that has "opened" data
sequential_open_distrib = {}
for row in db_query:
sequential_open_distrib[row['module_state_key']] = row['count_sequential']
return sequential_open_distrib
def get_problem_set_grade_distrib(course_id, problem_set):
"""
Returns the grade distribution for the problems specified in `problem_set`.
`course_id` the course ID for the course interested in
`problem_set` an array of strings representing problem module_id's.
Requests from the database the a count of each grade for each problem in the `problem_set`.
Returns a dict, where the key is the problem 'module_id' and the value is a dict with two parts:
'max_grade' - the maximum grade possible for the course
'grade_distrib' - array of tuples (`grade`,`count`) ordered by `grade`
"""
# Aggregate query on studentmodule table for grade data for set of problems in course
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__exact="problem",
module_state_key__in=problem_set,
).values(
'module_state_key',
'grade',
'max_grade',
).annotate(count_grade=Count('grade')).order_by('module_state_key', 'grade')
prob_grade_distrib = {}
# Loop through resultset building data for each problem
for row in db_query:
if row['module_state_key'] not in prob_grade_distrib:
prob_grade_distrib[row['module_state_key']] = {
'max_grade': 0,
'grade_distrib': [],
}
curr_grade_distrib = prob_grade_distrib[row['module_state_key']]
curr_grade_distrib['grade_distrib'].append((row['grade'], row['count_grade']))
if curr_grade_distrib['max_grade'] < row['max_grade']:
curr_grade_distrib['max_grade'] = row['max_grade']
return prob_grade_distrib
def get_d3_problem_grade_distrib(course_id):
"""
Returns problem grade distribution information for each section, data already in format for d3 function.
`course_id` the course ID for the course interested in
Returns an array of dicts in the order of the sections. Each dict has:
'display_name' - display name for the section
'data' - data for the d3_stacked_bar_graph function of the grade distribution for that problem
"""
prob_grade_distrib = get_problem_grade_distribution(course_id)
d3_data = []
# Retrieve course object down to problems
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=4)
# Iterate through sections, subsections, units, problems
for section in course.get_children():
curr_section = {}
curr_section['display_name'] = own_metadata(section).get('display_name', '')
data = []
c_subsection = 0
for subsection in section.get_children():
c_subsection += 1
c_unit = 0
for unit in subsection.get_children():
c_unit += 1
c_problem = 0
for child in unit.get_children():
# Student data is at the problem level
if child.location.category == 'problem':
c_problem += 1
stack_data = []
# Construct label to display for this problem
label = "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem)
# Only problems in prob_grade_distrib have had a student submission.
if child.location.url() in prob_grade_distrib:
# Get max_grade, grade_distribution for this problem
problem_info = prob_grade_distrib[child.location.url()]
# Get problem_name for tooltip
problem_name = own_metadata(child).get('display_name', '')
# Compute percent of this grade over max_grade
max_grade = float(problem_info['max_grade'])
for (grade, count_grade) in problem_info['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = (grade * 100.0) / max_grade
# Construct tooltip for problem in grade distibution view
tooltip = _("{label} {problem_name} - {count_grade} {students} ({percent:.0f}%: {grade:.0f}/{max_grade:.0f} {questions})").format(
label=label,
problem_name=problem_name,
count_grade=count_grade,
students=_("students"),
percent=percent,
grade=grade,
max_grade=max_grade,
questions=_("questions"),
)
# Construct data to be sent to d3
stack_data.append({
'color': percent,
'value': count_grade,
'tooltip': tooltip,
'module_url': child.location.url(),
})
problem = {
'xValue': label,
'stackData': stack_data,
}
data.append(problem)
curr_section['data'] = data
d3_data.append(curr_section)
return d3_data
def get_d3_sequential_open_distrib(course_id):
"""
Returns how many students opened a sequential/subsection for each section, data already in format for d3 function.
`course_id` the course ID for the course interested in
Returns an array in the order of the sections and each dict has:
'display_name' - display name for the section
'data' - data for the d3_stacked_bar_graph function of how many students opened each sequential/subsection
"""
sequential_open_distrib = get_sequential_open_distrib(course_id)
d3_data = []
# Retrieve course object down to subsection
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=2)
# Iterate through sections, subsections
for section in course.get_children():
curr_section = {}
curr_section['display_name'] = own_metadata(section).get('display_name', '')
data = []
c_subsection = 0
# Construct data for each subsection to be sent to d3
for subsection in section.get_children():
c_subsection += 1
subsection_name = own_metadata(subsection).get('display_name', '')
num_students = 0
if subsection.location.url() in sequential_open_distrib:
num_students = sequential_open_distrib[subsection.location.url()]
stack_data = []
tooltip = _("{num_students} student(s) opened Subsection {subsection_num}: {subsection_name}").format(
num_students=num_students,
subsection_num=c_subsection,
subsection_name=subsection_name,
)
stack_data.append({
'color': 0,
'value': num_students,
'tooltip': tooltip,
'module_url': subsection.location.url(),
})
subsection = {
'xValue': "SS {0}".format(c_subsection),
'stackData': stack_data,
}
data.append(subsection)
curr_section['data'] = data
d3_data.append(curr_section)
return d3_data
def get_d3_section_grade_distrib(course_id, section):
"""
Returns the grade distribution for the problems in the `section` section in a format for the d3 code.
`course_id` a string that is the course's ID.
`section` an int that is a zero-based index into the course's list of sections.
Navigates to the section specified to find all the problems associated with that section and then finds the grade
distribution for those problems. Finally returns an object formated the way the d3_stacked_bar_graph.js expects its
data object to be in.
If this is requested multiple times quickly for the same course, it is better to call
get_d3_problem_grade_distrib and pick out the sections of interest.
Returns an array of dicts with the following keys (taken from d3_stacked_bar_graph.js's documentation)
'xValue' - Corresponding value for the x-axis
'stackData' - Array of objects with key, value pairs that represent a bar:
'color' - Defines what "color" the bar will map to
'value' - Maps to the height of the bar, along the y-axis
'tooltip' - (Optional) Text to display on mouse hover
"""
# Retrieve course object down to problems
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=4)
problem_set = []
problem_info = {}
c_subsection = 0
for subsection in course.get_children()[section].get_children():
c_subsection += 1
c_unit = 0
for unit in subsection.get_children():
c_unit += 1
c_problem = 0
for child in unit.get_children():
if (child.location.category == 'problem'):
c_problem += 1
problem_set.append(child.location.url())
problem_info[child.location.url()] = {
'id': child.location.url(),
'x_value': "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem),
'display_name': own_metadata(child).get('display_name', ''),
}
# Retrieve grade distribution for these problems
grade_distrib = get_problem_set_grade_distrib(course_id, problem_set)
d3_data = []
# Construct data for each problem to be sent to d3
for problem in problem_set:
stack_data = []
if problem in grade_distrib: # Some problems have no data because students have not tried them yet.
max_grade = float(grade_distrib[problem]['max_grade'])
for (grade, count_grade) in grade_distrib[problem]['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = (grade * 100.0) / max_grade
# Construct tooltip for problem in grade distibution view
tooltip = _("{problem_info_x} {problem_info_n} - {count_grade} {students} ({percent:.0f}%: {grade:.0f}/{max_grade:.0f} {questions})").format(
problem_info_x=problem_info[problem]['x_value'],
count_grade=count_grade,
students=_("students"),
percent=percent,
problem_info_n=problem_info[problem]['display_name'],
grade=grade,
max_grade=max_grade,
questions=_("questions"),
)
stack_data.append({
'color': percent,
'value': count_grade,
'tooltip': tooltip,
})
d3_data.append({
'xValue': problem_info[problem]['x_value'],
'stackData': stack_data,
})
return d3_data
def get_section_display_name(course_id):
"""
Returns an array of the display names for each section in the course.
`course_id` the course ID for the course interested in
The ith string in the array is the display name of the ith section in the course.
"""
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=4)
section_display_name = [""] * len(course.get_children())
i = 0
for section in course.get_children():
section_display_name[i] = own_metadata(section).get('display_name', '')
i += 1
return section_display_name
def get_array_section_has_problem(course_id):
"""
Returns an array of true/false whether each section has problems.
`course_id` the course ID for the course interested in
The ith value in the array is true if the ith section in the course contains problems and false otherwise.
"""
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=4)
b_section_has_problem = [False] * len(course.get_children())
i = 0
for section in course.get_children():
for subsection in section.get_children():
for unit in subsection.get_children():
for child in unit.get_children():
if child.location.category == 'problem':
b_section_has_problem[i] = True
break # out of child loop
if b_section_has_problem[i]:
break # out of unit loop
if b_section_has_problem[i]:
break # out of subsection loop
i += 1
return b_section_has_problem
def get_students_opened_subsection(request, csv=False):
"""
Get a list of students that opened a particular subsection.
If 'csv' is False, returns a dict of student's name: username.
If 'csv' is True, returns a header array, and an array of arrays in the format:
student names, usernames for CSV download.
"""
module_id = request.GET.get('module_id')
csv = request.GET.get('csv')
# Query for "opened a subsection" students
students = models.StudentModule.objects.select_related('student').filter(
module_state_key__exact=module_id,
module_type__exact='sequential',
).values('student__username', 'student__profile__name').order_by('student__profile__name')
results = []
if not csv:
# Restrict screen list length
# Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH
# without doing another select.
for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:
results.append({
'name': student['student__profile__name'],
'username': student['student__username'],
})
max_exceeded = False
if len(results) > MAX_SCREEN_LIST_LENGTH:
# Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH
del results[-1]
max_exceeded = True
response_payload = {
'results': results,
'max_exceeded': max_exceeded,
}
return JsonResponse(response_payload)
else:
tooltip = request.GET.get('tooltip')
filename = sanitize_filename(tooltip[tooltip.index('S'):])
header = ['Name', 'Username']
for student in students:
results.append([student['student__profile__name'], student['student__username']])
response = create_csv_response(filename, header, results)
return response
def get_students_problem_grades(request, csv=False):
"""
Get a list of students and grades for a particular problem.
If 'csv' is False, returns a dict of student's name: username: grade: percent.
If 'csv' is True, returns a header array, and an array of arrays in the format:
student names, usernames, grades, percents for CSV download.
"""
module_id = request.GET.get('module_id')
csv = request.GET.get('csv')
# Query for "problem grades" students
students = models.StudentModule.objects.select_related('student').filter(
module_state_key__exact=module_id,
module_type__exact='problem',
grade__isnull=False,
).values('student__username', 'student__profile__name', 'grade', 'max_grade').order_by('student__profile__name')
results = []
if not csv:
# Restrict screen list length
# Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH
# without doing another select.
for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:
student_dict = {
'name': student['student__profile__name'],
'username': student['student__username'],
'grade': student['grade'],
}
student_dict['percent'] = 0
if student['max_grade'] > 0:
student_dict['percent'] = round(student['grade'] * 100 / student['max_grade'])
results.append(student_dict)
max_exceeded = False
if len(results) > MAX_SCREEN_LIST_LENGTH:
# Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH
del results[-1]
max_exceeded = True
response_payload = {
'results': results,
'max_exceeded': max_exceeded,
}
return JsonResponse(response_payload)
else:
tooltip = request.GET.get('tooltip')
filename = sanitize_filename(tooltip[:tooltip.rfind(' - ')])
header = ['Name', 'Username', 'Grade', 'Percent']
for student in students:
percent = 0
if student['max_grade'] > 0:
percent = round(student['grade'] * 100 / student['max_grade'])
results.append([student['student__profile__name'], student['student__username'], student['grade'], percent])
response = create_csv_response(filename, header, results)
return response
def sanitize_filename(filename):
"""
Utility function
"""
filename = filename.replace(" ", "_")
filename = filename.encode('ascii')
filename = filename[0:25] + '.csv'
return filename
|
agpl-3.0
| 6,444,292,151,137,930,000 | -7,583,036,639,094,742,000 | 38.130435 | 162 | 0.58715 | false |
AnishShah/tensorflow
|
tensorflow/contrib/rnn/python/tools/checkpoint_convert_test.py
|
46
|
4250
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for checkpoint converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import tempfile
from tensorflow.contrib.rnn.python.tools import checkpoint_convert
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
class CheckpointConvertTest(test.TestCase):
def setUp(self):
self._old_ckpt_path = tempfile.mktemp()
self._new_ckpt_path = tempfile.mktemp()
ops.reset_default_graph()
def tearDown(self):
for file_name in glob.glob(self._old_ckpt_path + "*"):
os.remove(file_name)
for file_name in glob.glob(self._new_ckpt_path + "*"):
os.remove(file_name)
def testReplacementDictsContainUniqueAndNonEmptyVariableNames(self):
for old_name in checkpoint_convert.RNN_NAME_REPLACEMENTS:
new_name = checkpoint_convert.RNN_NAME_REPLACEMENTS[old_name]
self.assertTrue(old_name)
self.assertTrue(new_name)
self.assertNotEqual(old_name, new_name)
for old_name in checkpoint_convert._RNN_SHARDED_NAME_REPLACEMENTS:
new_name = checkpoint_convert._RNN_SHARDED_NAME_REPLACEMENTS[old_name]
self.assertTrue(old_name)
self.assertTrue(new_name)
self.assertNotEqual(old_name, new_name)
def testConversionFromV2WithConvertedVariableNamesSucceeds(self):
variables.Variable(10.0, name="a")
for old_name in checkpoint_convert.RNN_NAME_REPLACEMENTS:
variables.Variable(20.0, name=old_name)
with session.Session() as sess:
saver = saver_lib.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, self._old_ckpt_path)
new_var_map, conversion_map = checkpoint_convert.convert_names(
self._old_ckpt_path, self._new_ckpt_path)
self.assertTrue(glob.glob(self._new_ckpt_path + "*"))
self.assertItemsEqual(
set(checkpoint_convert.RNN_NAME_REPLACEMENTS.values()).union(["a"]),
new_var_map.keys())
self.assertEqual(checkpoint_convert.RNN_NAME_REPLACEMENTS, conversion_map)
def testConversionFromV2WithoutConvertedVariableNamesSucceeds(self):
variables.Variable(10.0, name="a")
with session.Session() as sess:
saver = saver_lib.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, self._old_ckpt_path)
new_var_map, conversion_map = checkpoint_convert.convert_names(
self._old_ckpt_path, self._new_ckpt_path)
self.assertItemsEqual(["a"], new_var_map.keys())
self.assertFalse(conversion_map)
def testConversionToV1Succeeds(self):
variables.Variable(10.0, name="a")
variables.Variable(
20.0, name=list(checkpoint_convert.RNN_NAME_REPLACEMENTS.keys())[-1])
with session.Session() as sess:
saver = saver_lib.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, self._old_ckpt_path)
new_var_map, conversion_map = checkpoint_convert.convert_names(
self._old_ckpt_path, self._new_ckpt_path, write_v1_checkpoint=True)
self.assertItemsEqual(
["a", list(checkpoint_convert.RNN_NAME_REPLACEMENTS.values())[-1]],
new_var_map.keys())
self.assertEqual(
{list(checkpoint_convert.RNN_NAME_REPLACEMENTS.keys())[-1]:
list(checkpoint_convert.RNN_NAME_REPLACEMENTS.values())[-1]},
conversion_map)
if __name__ == "__main__":
test.main()
|
apache-2.0
| -4,022,139,342,345,995,000 | 2,847,994,937,898,020,400 | 38.351852 | 80 | 0.703059 | false |
praveenmax/OctoPrint-redd
|
src/octoprint/server/__init__.py
|
3
|
47109
|
# coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import uuid
from sockjs.tornado import SockJSRouter
from flask import Flask, g, request, session, Blueprint, Request, Response
from flask.ext.login import LoginManager, current_user
from flask.ext.principal import Principal, Permission, RoleNeed, identity_loaded, UserNeed
from flask.ext.babel import Babel, gettext, ngettext
from flask.ext.assets import Environment, Bundle
from babel import Locale
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from collections import defaultdict
import os
import logging
import logging.config
import atexit
import signal
import base64
SUCCESS = {}
NO_CONTENT = ("", 204)
NOT_MODIFIED = ("Not Modified", 304)
app = Flask("octoprint")
assets = None
babel = None
debug = False
printer = None
printerProfileManager = None
fileManager = None
slicingManager = None
analysisQueue = None
userManager = None
eventManager = None
loginManager = None
pluginManager = None
appSessionManager = None
pluginLifecycleManager = None
preemptiveCache = None
principals = Principal(app)
admin_permission = Permission(RoleNeed("admin"))
user_permission = Permission(RoleNeed("user"))
# only import the octoprint stuff down here, as it might depend on things defined above to be initialized already
from octoprint.printer import get_connection_options
from octoprint.printer.profile import PrinterProfileManager
from octoprint.printer.standard import Printer
from octoprint.settings import settings
import octoprint.users as users
import octoprint.events as events
import octoprint.plugin
import octoprint.timelapse
import octoprint._version
import octoprint.util
import octoprint.filemanager.storage
import octoprint.filemanager.analysis
import octoprint.slicing
from octoprint.server.util.flask import PreemptiveCache
from . import util
UI_API_KEY = ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes)
versions = octoprint._version.get_versions()
VERSION = versions['version']
BRANCH = versions.get('branch', None)
DISPLAY_VERSION = "%s (%s branch)" % (VERSION, BRANCH) if BRANCH else VERSION
REVISION = versions.get('full-revision-id', versions.get('full', None))
del versions
LOCALES = []
LANGUAGES = set()
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
user = load_user(identity.id)
if user is None:
return
identity.provides.add(UserNeed(user.get_id()))
if user.is_user():
identity.provides.add(RoleNeed("user"))
if user.is_admin():
identity.provides.add(RoleNeed("admin"))
def load_user(id):
if id == "_api":
return users.ApiUser()
if session and "usersession.id" in session:
sessionid = session["usersession.id"]
else:
sessionid = None
if userManager.enabled:
if sessionid:
return userManager.findUser(userid=id, session=sessionid)
else:
return userManager.findUser(userid=id)
return users.DummyUser()
#~~ startup code
class Server(object):
def __init__(self, configfile=None, basedir=None, host="0.0.0.0", port=5000, debug=False, allowRoot=False, logConf=None, octoprint_daemon=None):
self._configfile = configfile
self._basedir = basedir
self._host = host
self._port = port
self._debug = debug
self._allowRoot = allowRoot
self._logConf = logConf
self._server = None
self._octoprint_daemon = octoprint_daemon
self._logger = None
self._lifecycle_callbacks = defaultdict(list)
self._template_searchpaths = []
self._intermediary_server = None
def run(self):
if not self._allowRoot:
self._check_for_root()
global app
global babel
global printer
global printerProfileManager
global fileManager
global slicingManager
global analysisQueue
global userManager
global eventManager
global loginManager
global pluginManager
global appSessionManager
global pluginLifecycleManager
global preemptiveCache
global debug
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
import sys
debug = self._debug
# first initialize the settings singleton and make sure it uses given configfile and basedir if available
s = settings(init=True, basedir=self._basedir, configfile=self._configfile)
# then monkey patch a bunch of stuff
util.tornado.fix_ioloop_scheduling()
util.flask.enable_additional_translations(additional_folders=[s.getBaseFolder("translations")])
# setup app
self._setup_app(app)
# setup i18n
self._setup_i18n(app)
# then initialize logging
self._setup_logging(self._debug, self._logConf)
self._logger = logging.getLogger(__name__)
def exception_logger(exc_type, exc_value, exc_tb):
self._logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_tb))
sys.excepthook = exception_logger
self._logger.info("Starting OctoPrint %s" % DISPLAY_VERSION)
# start the intermediary server
self._start_intermediary_server(s)
# then initialize the plugin manager
pluginManager = octoprint.plugin.plugin_manager(init=True)
printerProfileManager = PrinterProfileManager()
eventManager = events.eventManager()
analysisQueue = octoprint.filemanager.analysis.AnalysisQueue()
slicingManager = octoprint.slicing.SlicingManager(s.getBaseFolder("slicingProfiles"), printerProfileManager)
storage_managers = dict()
storage_managers[octoprint.filemanager.FileDestinations.LOCAL] = octoprint.filemanager.storage.LocalFileStorage(s.getBaseFolder("uploads"))
fileManager = octoprint.filemanager.FileManager(analysisQueue, slicingManager, printerProfileManager, initial_storage_managers=storage_managers)
printer = Printer(fileManager, analysisQueue, printerProfileManager)
appSessionManager = util.flask.AppSessionManager()
pluginLifecycleManager = LifecycleManager(pluginManager)
preemptiveCache = PreemptiveCache(os.path.join(s.getBaseFolder("data"), "preemptive_cache_config.yaml"))
# ... and initialize all plugins
def octoprint_plugin_inject_factory(name, implementation):
"""Factory for injections for all OctoPrintPlugins"""
if not isinstance(implementation, octoprint.plugin.OctoPrintPlugin):
# we only care about OctoPrintPlugins
return None
return dict(
plugin_manager=pluginManager,
printer_profile_manager=printerProfileManager,
event_bus=eventManager,
analysis_queue=analysisQueue,
slicing_manager=slicingManager,
file_manager=fileManager,
printer=printer,
app_session_manager=appSessionManager,
plugin_lifecycle_manager=pluginLifecycleManager,
data_folder=os.path.join(settings().getBaseFolder("data"), name),
preemptive_cache=preemptiveCache
)
def settings_plugin_inject_factory(name, implementation):
"""Factory for additional injections depending on plugin type"""
if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
# we only care about SettingsPlugins
return None
# SettingsPlugin instnances get a PluginSettings instance injected
default_settings = implementation.get_settings_defaults()
get_preprocessors, set_preprocessors = implementation.get_settings_preprocessors()
plugin_settings = octoprint.plugin.plugin_settings(name,
defaults=default_settings,
get_preprocessors=get_preprocessors,
set_preprocessors=set_preprocessors)
return dict(settings=plugin_settings)
def settings_plugin_config_migration_and_cleanup(name, implementation):
"""Take care of migrating and cleaning up any old settings"""
if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
return
settings_version = implementation.get_settings_version()
settings_migrator = implementation.on_settings_migrate
if settings_version is not None and settings_migrator is not None:
stored_version = implementation._settings.get_int([octoprint.plugin.SettingsPlugin.config_version_key])
if stored_version is None or stored_version < settings_version:
settings_migrator(settings_version, stored_version)
implementation._settings.set_int([octoprint.plugin.SettingsPlugin.config_version_key], settings_version)
implementation.on_settings_cleanup()
implementation._settings.save()
implementation.on_settings_initialized()
pluginManager.implementation_inject_factories=[octoprint_plugin_inject_factory, settings_plugin_inject_factory]
pluginManager.initialize_implementations()
settingsPlugins = pluginManager.get_implementations(octoprint.plugin.SettingsPlugin)
for implementation in settingsPlugins:
try:
settings_plugin_config_migration_and_cleanup(implementation._identifier, implementation)
except:
self._logger.exception("Error while trying to migrate settings for plugin {}, ignoring it".format(implementation._identifier))
pluginManager.implementation_post_inits=[settings_plugin_config_migration_and_cleanup]
pluginManager.log_all_plugins()
# initialize file manager and register it for changes in the registered plugins
fileManager.initialize()
pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: fileManager.reload_plugins())
# initialize slicing manager and register it for changes in the registered plugins
slicingManager.initialize()
pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: slicingManager.reload_slicers())
# setup jinja2
self._setup_jinja2()
# make sure plugin lifecycle events relevant for jinja2 are taken care of
def template_enabled(name, plugin):
if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
return
self._register_additional_template_plugin(plugin.implementation)
def template_disabled(name, plugin):
if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
return
self._unregister_additional_template_plugin(plugin.implementation)
pluginLifecycleManager.add_callback("enabled", template_enabled)
pluginLifecycleManager.add_callback("disabled", template_disabled)
# setup assets
self._setup_assets()
# configure timelapse
octoprint.timelapse.configure_timelapse()
# setup command triggers
events.CommandTrigger(printer)
if self._debug:
events.DebugEventListener()
# setup access control
userManagerName = s.get(["accessControl", "userManager"])
try:
clazz = octoprint.util.get_class(userManagerName)
userManager = clazz()
except AttributeError as e:
self._logger.exception("Could not instantiate user manager {}, falling back to FilebasedUserManager!".format(userManagerName))
userManager = octoprint.users.FilebasedUserManager()
finally:
userManager.enabled = s.getBoolean(["accessControl", "enabled"])
loginManager = LoginManager()
loginManager.session_protection = "strong"
loginManager.user_callback = load_user
if not userManager.enabled:
loginManager.anonymous_user = users.DummyUser
principals.identity_loaders.appendleft(users.dummy_identity_loader)
loginManager.init_app(app)
# register API blueprint
self._setup_blueprints()
## Tornado initialization starts here
if self._host is None:
self._host = s.get(["server", "host"])
if self._port is None:
self._port = s.getInt(["server", "port"])
ioloop = IOLoop()
ioloop.install()
self._router = SockJSRouter(self._create_socket_connection, "/sockjs")
upload_suffixes = dict(name=s.get(["server", "uploads", "nameSuffix"]), path=s.get(["server", "uploads", "pathSuffix"]))
def mime_type_guesser(path):
from octoprint.filemanager import get_mime_type
return get_mime_type(path)
download_handler_kwargs = dict(
as_attachment=True,
allow_client_caching=False
)
additional_mime_types=dict(mime_type_guesser=mime_type_guesser)
admin_validator = dict(access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.admin_validator))
no_hidden_files_validator = dict(path_validation=util.tornado.path_validation_factory(lambda path: not octoprint.util.is_hidden_path(path), status_code=404))
def joined_dict(*dicts):
if not len(dicts):
return dict()
joined = dict()
for d in dicts:
joined.update(d)
return joined
server_routes = self._router.urls + [
# various downloads
(r"/downloads/timelapse/([^/]*\.mp[g4])", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("timelapse")), download_handler_kwargs, no_hidden_files_validator)),
(r"/downloads/files/local/(.*)", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("uploads")), download_handler_kwargs, no_hidden_files_validator, additional_mime_types)),
(r"/downloads/logs/([^/]*)", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("logs")), download_handler_kwargs, admin_validator)),
# camera snapshot
(r"/downloads/camera/current", util.tornado.UrlProxyHandler, dict(url=s.get(["webcam", "snapshot"]), as_attachment=True, access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.user_validator))),
# generated webassets
(r"/static/webassets/(.*)", util.tornado.LargeResponseHandler, dict(path=os.path.join(s.getBaseFolder("generated"), "webassets"))),
# online indicators - text file with "online" as content and a transparent gif
(r"/online.txt", util.tornado.StaticDataHandler, dict(data="online\n")),
(r"/online.gif", util.tornado.StaticDataHandler, dict(data=bytes(base64.b64decode("R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")), content_type="image/gif"))
]
# fetch additional routes from plugins
for name, hook in pluginManager.get_hooks("octoprint.server.http.routes").items():
try:
result = hook(list(server_routes))
except:
self._logger.exception("There was an error while retrieving additional server routes from plugin hook {name}".format(**locals()))
else:
if isinstance(result, (list, tuple)):
for entry in result:
if not isinstance(entry, tuple) or not len(entry) == 3:
continue
if not isinstance(entry[0], basestring):
continue
if not isinstance(entry[2], dict):
continue
route, handler, kwargs = entry
route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])
self._logger.debug("Adding additional route {route} handled by handler {handler} and with additional arguments {kwargs!r}".format(**locals()))
server_routes.append((route, handler, kwargs))
server_routes.append((r".*", util.tornado.UploadStorageFallbackHandler, dict(fallback=util.tornado.WsgiInputContainer(app.wsgi_app), file_prefix="octoprint-file-upload-", file_suffix=".tmp", suffixes=upload_suffixes)))
self._tornado_app = Application(server_routes)
max_body_sizes = [
("POST", r"/api/files/([^/]*)", s.getInt(["server", "uploads", "maxSize"])),
("POST", r"/api/languages", 5 * 1024 * 1024)
]
# allow plugins to extend allowed maximum body sizes
for name, hook in pluginManager.get_hooks("octoprint.server.http.bodysize").items():
try:
result = hook(list(max_body_sizes))
except:
self._logger.exception("There was an error while retrieving additional upload sizes from plugin hook {name}".format(**locals()))
else:
if isinstance(result, (list, tuple)):
for entry in result:
if not isinstance(entry, tuple) or not len(entry) == 3:
continue
if not entry[0] in util.tornado.UploadStorageFallbackHandler.BODY_METHODS:
continue
if not isinstance(entry[2], int):
continue
method, route, size = entry
route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])
self._logger.debug("Adding maximum body size of {size}B for {method} requests to {route})".format(**locals()))
max_body_sizes.append((method, route, size))
self._stop_intermediary_server()
# initialize and bind the server
self._server = util.tornado.CustomHTTPServer(self._tornado_app, max_body_sizes=max_body_sizes, default_max_body_size=s.getInt(["server", "maxSize"]))
self._server.listen(self._port, address=self._host)
eventManager.fire(events.Events.STARTUP)
# auto connect
if s.getBoolean(["serial", "autoconnect"]):
(port, baudrate) = s.get(["serial", "port"]), s.getInt(["serial", "baudrate"])
printer_profile = printerProfileManager.get_default()
connectionOptions = get_connection_options()
if port in connectionOptions["ports"]:
printer.connect(port=port, baudrate=baudrate, profile=printer_profile["id"] if "id" in printer_profile else "_default")
# start up watchdogs
if s.getBoolean(["feature", "pollWatched"]):
# use less performant polling observer if explicitely configured
observer = PollingObserver()
else:
# use os default
observer = Observer()
observer.schedule(util.watchdog.GcodeWatchdogHandler(fileManager, printer), s.getBaseFolder("watched"))
observer.start()
# run our startup plugins
octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
"on_startup",
args=(self._host, self._port))
def call_on_startup(name, plugin):
implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
if implementation is None:
return
implementation.on_startup(self._host, self._port)
pluginLifecycleManager.add_callback("enabled", call_on_startup)
# prepare our after startup function
def on_after_startup():
self._logger.info("Listening on http://%s:%d" % (self._host, self._port))
# now this is somewhat ugly, but the issue is the following: startup plugins might want to do things for
# which they need the server to be already alive (e.g. for being able to resolve urls, such as favicons
# or service xmls or the like). While they are working though the ioloop would block. Therefore we'll
# create a single use thread in which to perform our after-startup-tasks, start that and hand back
# control to the ioloop
def work():
octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
"on_after_startup")
def call_on_after_startup(name, plugin):
implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
if implementation is None:
return
implementation.on_after_startup()
pluginLifecycleManager.add_callback("enabled", call_on_after_startup)
# when we are through with that we also run our preemptive cache
if settings().getBoolean(["devel", "cache", "preemptive"]):
self._execute_preemptive_flask_caching(preemptiveCache)
import threading
threading.Thread(target=work).start()
ioloop.add_callback(on_after_startup)
# prepare our shutdown function
def on_shutdown():
# will be called on clean system exit and shutdown the watchdog observer and call the on_shutdown methods
# on all registered ShutdownPlugins
self._logger.info("Shutting down...")
observer.stop()
observer.join()
octoprint.plugin.call_plugin(octoprint.plugin.ShutdownPlugin,
"on_shutdown")
if self._octoprint_daemon is not None:
self._logger.info("Cleaning up daemon pidfile")
self._octoprint_daemon.terminated()
self._logger.info("Goodbye!")
atexit.register(on_shutdown)
def sigterm_handler(*args, **kwargs):
# will stop tornado on SIGTERM, making the program exit cleanly
def shutdown_tornado():
ioloop.stop()
ioloop.add_callback_from_signal(shutdown_tornado)
signal.signal(signal.SIGTERM, sigterm_handler)
try:
# this is the main loop - as long as tornado is running, OctoPrint is running
ioloop.start()
except (KeyboardInterrupt, SystemExit):
pass
except:
self._logger.fatal("Now that is embarrassing... Something really really went wrong here. Please report this including the stacktrace below in OctoPrint's bugtracker. Thanks!")
self._logger.exception("Stacktrace follows:")
def _create_socket_connection(self, session):
global printer, fileManager, analysisQueue, userManager, eventManager
return util.sockjs.PrinterStateConnection(printer, fileManager, analysisQueue, userManager,
eventManager, pluginManager, session)
def _check_for_root(self):
if "geteuid" in dir(os) and os.geteuid() == 0:
exit("You should not run OctoPrint as root!")
def _get_locale(self):
global LANGUAGES
if "l10n" in request.values:
return Locale.negotiate([request.values["l10n"]], LANGUAGES)
if hasattr(g, "identity") and g.identity and userManager.enabled:
userid = g.identity.id
try:
user_language = userManager.getUserSetting(userid, ("interface", "language"))
if user_language is not None and not user_language == "_default":
return Locale.negotiate([user_language], LANGUAGES)
except octoprint.users.UnknownUser:
pass
default_language = settings().get(["appearance", "defaultLanguage"])
if default_language is not None and not default_language == "_default" and default_language in LANGUAGES:
return Locale.negotiate([default_language], LANGUAGES)
return Locale.parse(request.accept_languages.best_match(LANGUAGES))
def _setup_logging(self, debug, logConf=None):
defaultConfig = {
"version": 1,
"formatters": {
"simple": {
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
},
"serial": {
"format": "%(asctime)s - %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"file": {
"class": "octoprint.logging.handlers.CleaningTimedRotatingFileHandler",
"level": "DEBUG",
"formatter": "simple",
"when": "D",
"backupCount": 6,
"filename": os.path.join(settings().getBaseFolder("logs"), "octoprint.log")
},
"serialFile": {
"class": "octoprint.logging.handlers.SerialLogHandler",
"level": "DEBUG",
"formatter": "serial",
"backupCount": 3,
"filename": os.path.join(settings().getBaseFolder("logs"), "serial.log")
}
},
"loggers": {
"SERIAL": {
"level": "CRITICAL",
"handlers": ["serialFile"],
"propagate": False
},
"tornado.application": {
"level": "INFO"
},
"tornado.general": {
"level": "INFO"
},
"octoprint.server.util.flask": {
"level": "WARN"
}
},
"root": {
"level": "INFO",
"handlers": ["console", "file"]
}
}
if debug:
defaultConfig["root"]["level"] = "DEBUG"
if logConf is None:
logConf = os.path.join(settings().getBaseFolder("base"), "logging.yaml")
configFromFile = {}
if os.path.exists(logConf) and os.path.isfile(logConf):
import yaml
with open(logConf, "r") as f:
configFromFile = yaml.safe_load(f)
config = octoprint.util.dict_merge(defaultConfig, configFromFile)
logging.config.dictConfig(config)
logging.captureWarnings(True)
import warnings
warnings.simplefilter("always")
if settings().getBoolean(["serial", "log"]):
# enable debug logging to serial.log
logging.getLogger("SERIAL").setLevel(logging.DEBUG)
def _setup_app(self, app):
from octoprint.server.util.flask import ReverseProxiedEnvironment, OctoPrintFlaskRequest, OctoPrintFlaskResponse
s = settings()
app.debug = self._debug
secret_key = s.get(["server", "secretKey"])
if not secret_key:
import string
from random import choice
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
secret_key = "".join(choice(chars) for _ in range(32))
s.set(["server", "secretKey"], secret_key)
s.save()
app.secret_key = secret_key
reverse_proxied = ReverseProxiedEnvironment(
header_prefix=s.get(["server", "reverseProxy", "prefixHeader"]),
header_scheme=s.get(["server", "reverseProxy", "schemeHeader"]),
header_host=s.get(["server", "reverseProxy", "hostHeader"]),
header_server=s.get(["server", "reverseProxy", "serverHeader"]),
header_port=s.get(["server", "reverseProxy", "portHeader"]),
prefix=s.get(["server", "reverseProxy", "prefixFallback"]),
scheme=s.get(["server", "reverseProxy", "schemeFallback"]),
host=s.get(["server", "reverseProxy", "hostFallback"]),
server=s.get(["server", "reverseProxy", "serverFallback"]),
port=s.get(["server", "reverseProxy", "portFallback"])
)
OctoPrintFlaskRequest.environment_wrapper = reverse_proxied
app.request_class = OctoPrintFlaskRequest
app.response_class = OctoPrintFlaskResponse
@app.before_request
def before_request():
g.locale = self._get_locale()
@app.after_request
def after_request(response):
# send no-cache headers with all POST responses
if request.method == "POST":
response.cache_control.no_cache = True
response.headers.add("X-Clacks-Overhead", "GNU Terry Pratchett")
return response
from octoprint.util.jinja import MarkdownFilter
MarkdownFilter(app)
def _setup_i18n(self, app):
global babel
global LOCALES
global LANGUAGES
babel = Babel(app)
def get_available_locale_identifiers(locales):
result = set()
# add available translations
for locale in locales:
result.add(locale.language)
if locale.territory:
# if a territory is specified, add that too
result.add("%s_%s" % (locale.language, locale.territory))
return result
LOCALES = babel.list_translations()
LANGUAGES = get_available_locale_identifiers(LOCALES)
@babel.localeselector
def get_locale():
return self._get_locale()
def _setup_jinja2(self):
import re
app.jinja_env.add_extension("jinja2.ext.do")
app.jinja_env.add_extension("octoprint.util.jinja.trycatch")
def regex_replace(s, find, replace):
return re.sub(find, replace, s)
html_header_regex = re.compile("<h(?P<number>[1-6])>(?P<content>.*?)</h(?P=number)>")
def offset_html_headers(s, offset):
def repl(match):
number = int(match.group("number"))
number += offset
if number > 6:
number = 6
elif number < 1:
number = 1
return "<h{number}>{content}</h{number}>".format(number=number, content=match.group("content"))
return html_header_regex.sub(repl, s)
markdown_header_regex = re.compile("^(?P<hashs>#+)\s+(?P<content>.*)$", flags=re.MULTILINE)
def offset_markdown_headers(s, offset):
def repl(match):
number = len(match.group("hashs"))
number += offset
if number > 6:
number = 6
elif number < 1:
number = 1
return "{hashs} {content}".format(hashs="#" * number, content=match.group("content"))
return markdown_header_regex.sub(repl, s)
html_link_regex = re.compile("<(?P<tag>a.*?)>(?P<content>.*?)</a>")
def externalize_links(text):
def repl(match):
tag = match.group("tag")
if not u"href" in tag:
return match.group(0)
if not u"target=" in tag and not u"rel=" in tag:
tag += u" target=\"_blank\" rel=\"noreferrer noopener\""
content = match.group("content")
return u"<{tag}>{content}</a>".format(tag=tag, content=content)
return html_link_regex.sub(repl, text)
app.jinja_env.filters["regex_replace"] = regex_replace
app.jinja_env.filters["offset_html_headers"] = offset_html_headers
app.jinja_env.filters["offset_markdown_headers"] = offset_markdown_headers
app.jinja_env.filters["externalize_links"] = externalize_links
# configure additional template folders for jinja2
import jinja2
import octoprint.util.jinja
filesystem_loader = octoprint.util.jinja.FilteredFileSystemLoader([],
path_filter=lambda x: not octoprint.util.is_hidden_path(x))
filesystem_loader.searchpath = self._template_searchpaths
loaders = [app.jinja_loader, filesystem_loader]
if octoprint.util.is_running_from_source():
from octoprint.util.jinja import SelectedFileSystemLoader
root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
allowed = ["AUTHORS.md", "CHANGELOG.md", "SUPPORTERS.md", "THIRDPARTYLICENSES.md"]
loaders.append(SelectedFileSystemLoader(root, allowed, prefix="_data/"))
jinja_loader = jinja2.ChoiceLoader(loaders)
app.jinja_loader = jinja_loader
self._register_template_plugins()
def _execute_preemptive_flask_caching(self, preemptive_cache):
from werkzeug.test import EnvironBuilder
import time
# we clean up entries from our preemptive cache settings that haven't been
# accessed longer than server.preemptiveCache.until days
preemptive_cache_timeout = settings().getInt(["server", "preemptiveCache", "until"])
cutoff_timestamp = time.time() - preemptive_cache_timeout * 24 * 60 * 60
def filter_current_entries(entry):
"""Returns True for entries younger than the cutoff date"""
return "_timestamp" in entry and entry["_timestamp"] > cutoff_timestamp
def filter_http_entries(entry):
"""Returns True for entries targeting http or https."""
return "base_url" in entry \
and entry["base_url"] \
and (entry["base_url"].startswith("http://")
or entry["base_url"].startswith("https://"))
def filter_entries(entry):
"""Combined filter."""
filters = (filter_current_entries,
filter_http_entries)
return all([f(entry) for f in filters])
# filter out all old and non-http entries
cache_data = preemptive_cache.clean_all_data(lambda root, entries: filter(filter_entries, entries))
if not cache_data:
return
def execute_caching():
for route in sorted(cache_data.keys(), key=lambda x: (x.count("/"), x)):
entries = reversed(sorted(cache_data[route], key=lambda x: x.get("_count", 0)))
for kwargs in entries:
plugin = kwargs.get("plugin", None)
additional_request_data = kwargs.get("_additional_request_data", dict())
kwargs = dict((k, v) for k, v in kwargs.items() if not k.startswith("_") and not k == "plugin")
kwargs.update(additional_request_data)
try:
if plugin:
self._logger.info("Preemptively caching {} (plugin {}) for {!r}".format(route, plugin, kwargs))
else:
self._logger.info("Preemptively caching {} for {!r}".format(route, kwargs))
headers = kwargs.get("headers", dict())
headers["X-Preemptive-Record"] = "no"
kwargs["headers"] = headers
builder = EnvironBuilder(**kwargs)
app(builder.get_environ(), lambda *a, **kw: None)
except:
self._logger.exception("Error while trying to preemptively cache {} for {!r}".format(route, kwargs))
# asynchronous caching
import threading
cache_thread = threading.Thread(target=execute_caching, name="Preemptive Cache Worker")
cache_thread.daemon = True
cache_thread.start()
def _register_template_plugins(self):
template_plugins = pluginManager.get_implementations(octoprint.plugin.TemplatePlugin)
for plugin in template_plugins:
try:
self._register_additional_template_plugin(plugin)
except:
self._logger.exception("Error while trying to register templates of plugin {}, ignoring it".format(plugin._identifier))
def _register_additional_template_plugin(self, plugin):
folder = plugin.get_template_folder()
if folder is not None and not folder in self._template_searchpaths:
self._template_searchpaths.append(folder)
def _unregister_additional_template_plugin(self, plugin):
folder = plugin.get_template_folder()
if folder is not None and folder in self._template_searchpaths:
self._template_searchpaths.remove(folder)
def _setup_blueprints(self):
from octoprint.server.api import api
from octoprint.server.apps import apps, clear_registered_app
import octoprint.server.views
app.register_blueprint(api, url_prefix="/api")
app.register_blueprint(apps, url_prefix="/apps")
# also register any blueprints defined in BlueprintPlugins
self._register_blueprint_plugins()
# and register a blueprint for serving the static files of asset plugins which are not blueprint plugins themselves
self._register_asset_plugins()
global pluginLifecycleManager
def clear_apps(name, plugin):
clear_registered_app()
pluginLifecycleManager.add_callback("enabled", clear_apps)
pluginLifecycleManager.add_callback("disabled", clear_apps)
def _register_blueprint_plugins(self):
blueprint_plugins = octoprint.plugin.plugin_manager().get_implementations(octoprint.plugin.BlueprintPlugin)
for plugin in blueprint_plugins:
try:
self._register_blueprint_plugin(plugin)
except:
self._logger.exception("Error while registering blueprint of plugin {}, ignoring it".format(plugin._identifier))
continue
def _register_asset_plugins(self):
asset_plugins = octoprint.plugin.plugin_manager().get_implementations(octoprint.plugin.AssetPlugin)
for plugin in asset_plugins:
if isinstance(plugin, octoprint.plugin.BlueprintPlugin):
continue
try:
self._register_asset_plugin(plugin)
except:
self._logger.exception("Error while registering assets of plugin {}, ignoring it".format(plugin._identifier))
continue
def _register_blueprint_plugin(self, plugin):
name = plugin._identifier
blueprint = plugin.get_blueprint()
if blueprint is None:
return
if plugin.is_blueprint_protected():
from octoprint.server.util import apiKeyRequestHandler, corsResponseHandler
blueprint.before_request(apiKeyRequestHandler)
blueprint.after_request(corsResponseHandler)
url_prefix = "/plugin/{name}".format(name=name)
app.register_blueprint(blueprint, url_prefix=url_prefix)
if self._logger:
self._logger.debug("Registered API of plugin {name} under URL prefix {url_prefix}".format(name=name, url_prefix=url_prefix))
def _register_asset_plugin(self, plugin):
name = plugin._identifier
url_prefix = "/plugin/{name}".format(name=name)
blueprint = Blueprint("plugin." + name, name, static_folder=plugin.get_asset_folder())
app.register_blueprint(blueprint, url_prefix=url_prefix)
if self._logger:
self._logger.debug("Registered assets of plugin {name} under URL prefix {url_prefix}".format(name=name, url_prefix=url_prefix))
def _setup_assets(self):
global app
global assets
global pluginManager
util.flask.fix_webassets_cache()
util.flask.fix_webassets_filtertool()
base_folder = settings().getBaseFolder("generated")
# clean the folder
if settings().getBoolean(["devel", "webassets", "clean_on_startup"]):
import shutil
import errno
import sys
for entry in ("webassets", ".webassets-cache"):
path = os.path.join(base_folder, entry)
# delete path if it exists
if os.path.isdir(path):
try:
self._logger.debug("Deleting {path}...".format(**locals()))
shutil.rmtree(path)
except:
self._logger.exception("Error while trying to delete {path}, leaving it alone".format(**locals()))
continue
# re-create path
self._logger.debug("Creating {path}...".format(**locals()))
error_text = "Error while trying to re-create {path}, that might cause errors with the webassets cache".format(**locals())
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EACCES:
# that might be caused by the user still having the folder open somewhere, let's try again after
# waiting a bit
import time
for n in range(3):
time.sleep(0.5)
self._logger.debug("Creating {path}: Retry #{retry} after {time}s".format(path=path, retry=n+1, time=(n + 1)*0.5))
try:
os.makedirs(path)
break
except:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.exception("Ignored error while creating directory {path}".format(**locals()))
pass
else:
# this will only get executed if we never did
# successfully execute makedirs above
self._logger.exception(error_text)
continue
else:
# not an access error, so something we don't understand
# went wrong -> log an error and stop
self._logger.exception(error_text)
continue
except:
# not an OSError, so something we don't understand
# went wrong -> log an error and stop
self._logger.exception(error_text)
continue
self._logger.info("Reset webasset folder {path}...".format(**locals()))
AdjustedEnvironment = type(Environment)(Environment.__name__, (Environment,), dict(
resolver_class=util.flask.PluginAssetResolver
))
class CustomDirectoryEnvironment(AdjustedEnvironment):
@property
def directory(self):
return base_folder
assets = CustomDirectoryEnvironment(app)
assets.debug = not settings().getBoolean(["devel", "webassets", "bundle"])
UpdaterType = type(util.flask.SettingsCheckUpdater)(util.flask.SettingsCheckUpdater.__name__, (util.flask.SettingsCheckUpdater,), dict(
updater=assets.updater
))
assets.updater = UpdaterType
enable_gcodeviewer = settings().getBoolean(["gcodeViewer", "enabled"])
preferred_stylesheet = settings().get(["devel", "stylesheet"])
dynamic_core_assets = util.flask.collect_core_assets(enable_gcodeviewer=enable_gcodeviewer)
dynamic_plugin_assets = util.flask.collect_plugin_assets(
enable_gcodeviewer=enable_gcodeviewer,
preferred_stylesheet=preferred_stylesheet
)
js_libs = [
"js/lib/jquery/jquery.min.js",
"js/lib/modernizr.custom.js",
"js/lib/lodash.min.js",
"js/lib/sprintf.min.js",
"js/lib/knockout-3.4.0.js",
"js/lib/knockout.mapping-latest.js",
"js/lib/babel.js",
"js/lib/avltree.js",
"js/lib/bootstrap/bootstrap.js",
"js/lib/bootstrap/bootstrap-modalmanager.js",
"js/lib/bootstrap/bootstrap-modal.js",
"js/lib/bootstrap/bootstrap-slider.js",
"js/lib/bootstrap/bootstrap-tabdrop.js",
"js/lib/jquery/jquery.ui.core.js",
"js/lib/jquery/jquery.ui.widget.js",
"js/lib/jquery/jquery.ui.mouse.js",
"js/lib/jquery/jquery.flot.js",
"js/lib/jquery/jquery.iframe-transport.js",
"js/lib/jquery/jquery.fileupload.js",
"js/lib/jquery/jquery.slimscroll.min.js",
"js/lib/jquery/jquery.qrcode.min.js",
"js/lib/moment-with-locales.min.js",
"js/lib/pusher.color.min.js",
"js/lib/detectmobilebrowser.js",
"js/lib/md5.min.js",
"js/lib/pnotify.min.js",
"js/lib/bootstrap-slider-knockout-binding.js",
"js/lib/loglevel.min.js",
"js/lib/sockjs-0.3.4.min.js"
]
js_core = dynamic_core_assets["js"] + \
dynamic_plugin_assets["bundled"]["js"] + \
["js/app/dataupdater.js",
"js/app/helpers.js",
"js/app/main.js"]
js_plugins = dynamic_plugin_assets["external"]["js"]
if len(js_plugins) == 0:
js_plugins = ["empty"]
js_app = js_plugins + js_core
css_libs = [
"css/bootstrap.min.css",
"css/bootstrap-modal.css",
"css/bootstrap-slider.css",
"css/bootstrap-tabdrop.css",
"css/font-awesome.min.css",
"css/jquery.fileupload-ui.css",
"css/pnotify.min.css"
]
css_core = list(dynamic_core_assets["css"]) + list(dynamic_plugin_assets["bundled"]["css"])
if len(css_core) == 0:
css_core = ["empty"]
css_plugins = list(dynamic_plugin_assets["external"]["css"])
if len(css_plugins) == 0:
css_plugins = ["empty"]
css_app = css_core + css_plugins
less_core = list(dynamic_core_assets["less"]) + list(dynamic_plugin_assets["bundled"]["less"])
if len(less_core) == 0:
less_core = ["empty"]
less_plugins = list(dynamic_plugin_assets["external"]["less"])
if len(less_plugins) == 0:
less_plugins = ["empty"]
less_app = less_core + less_plugins
from webassets.filter import register_filter, Filter
from webassets.filter.cssrewrite.base import PatternRewriter
import re
class LessImportRewrite(PatternRewriter):
name = "less_importrewrite"
patterns = {
"import_rewrite": re.compile("(@import(\s+\(.*\))?\s+)\"(.*)\";")
}
def import_rewrite(self, m):
import_with_options = m.group(1)
import_url = m.group(3)
if not import_url.startswith("http:") and not import_url.startswith("https:") and not import_url.startswith("/"):
import_url = "../less/" + import_url
return "{import_with_options}\"{import_url}\";".format(**locals())
class JsDelimiterBundler(Filter):
name = "js_delimiter_bundler"
options = {}
def input(self, _in, out, **kwargs):
out.write(_in.read())
out.write("\n;\n")
register_filter(LessImportRewrite)
register_filter(JsDelimiterBundler)
# JS
js_libs_bundle = Bundle(*js_libs, output="webassets/packed_libs.js", filters="js_delimiter_bundler")
if settings().getBoolean(["devel", "webassets", "minify"]):
js_core_bundle = Bundle(*js_core, output="webassets/packed_core.js", filters="rjsmin, js_delimiter_bundler")
js_plugins_bundle = Bundle(*js_plugins, output="webassets/packed_plugins.js", filters="rjsmin, js_delimiter_bundler")
js_app_bundle = Bundle(*js_app, output="webassets/packed_app.js", filters="rjsmin, js_delimiter_bundler")
else:
js_core_bundle = Bundle(*js_core, output="webassets/packed_core.js", filters="js_delimiter_bundler")
js_plugins_bundle = Bundle(*js_plugins, output="webassets/packed_plugins.js", filters="js_delimiter_bundler")
js_app_bundle = Bundle(*js_app, output="webassets/packed_app.js", filters="js_delimiter_bundler")
# CSS
css_libs_bundle = Bundle(*css_libs, output="webassets/packed_libs.css")
css_core_bundle = Bundle(*css_core, output="webassets/packed_core.css", filters="cssrewrite")
css_plugins_bundle = Bundle(*css_plugins, output="webassets/packed_plugins.css", filters="cssrewrite")
css_app_bundle = Bundle(*css_app, output="webassets/packed_app.css", filters="cssrewrite")
# LESS
less_core_bundle = Bundle(*less_core, output="webassets/packed_core.less", filters="cssrewrite, less_importrewrite")
less_plugins_bundle = Bundle(*less_plugins, output="webassets/packed_plugins.less", filters="cssrewrite, less_importrewrite")
less_app_bundle = Bundle(*less_app, output="webassets/packed_app.less", filters="cssrewrite, less_importrewrite")
# asset registration
assets.register("js_libs", js_libs_bundle)
assets.register("js_core", js_core_bundle)
assets.register("js_plugins", js_plugins_bundle)
assets.register("js_app", js_app_bundle)
assets.register("css_libs", css_libs_bundle)
assets.register("css_core", css_core_bundle)
assets.register("css_plugins", css_plugins_bundle)
assets.register("css_app", css_app_bundle)
assets.register("less_core", less_core_bundle)
assets.register("less_plugins", less_plugins_bundle)
assets.register("less_app", less_app_bundle)
def _start_intermediary_server(self, s):
import BaseHTTPServer
import SimpleHTTPServer
import threading
host = self._host
port = self._port
if host is None:
host = s.get(["server", "host"])
if port is None:
port = s.getInt(["server", "port"])
self._logger.debug("Starting intermediary server on {}:{}".format(host, port))
class IntermediaryServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def __init__(self, rules=None, *args, **kwargs):
if rules is None:
rules = []
self.rules = rules
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_GET(self):
request_path = self.path
if "?" in request_path:
request_path = request_path[0:request_path.find("?")]
for rule in self.rules:
path, data, content_type = rule
if request_path == path:
self.send_response(200)
if content_type:
self.send_header("Content-Type", content_type)
self.end_headers()
self.wfile.write(data)
break
else:
self.send_response(404)
self.wfile.write("Not found")
base_path = os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "static"))
rules = [
("/", ["intermediary.html",], "text/html"),
("/favicon.ico", ["img", "tentacle-20x20.png"], "image/png"),
("/intermediary.gif", bytes(base64.b64decode("R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")), "image/gif")
]
def contents(args):
path = os.path.join(base_path, *args)
if not os.path.isfile(path):
return ""
with open(path, "rb") as f:
data = f.read()
return data
def process(rule):
if len(rule) == 2:
path, data = rule
content_type = None
else:
path, data, content_type = rule
if isinstance(data, (list, tuple)):
data = contents(data)
return path, data, content_type
rules = map(process, filter(lambda rule: len(rule) == 2 or len(rule) == 3, rules))
self._intermediary_server = BaseHTTPServer.HTTPServer((host, port), lambda *args, **kwargs: IntermediaryServerHandler(rules, *args, **kwargs))
thread = threading.Thread(target=self._intermediary_server.serve_forever)
thread.daemon = True
thread.start()
self._logger.debug("Intermediary server started")
def _stop_intermediary_server(self):
if self._intermediary_server is None:
return
self._logger.debug("Shutting down intermediary server...")
self._intermediary_server.shutdown()
self._intermediary_server.server_close()
self._logger.debug("Intermediary server shut down")
class LifecycleManager(object):
def __init__(self, plugin_manager):
self._plugin_manager = plugin_manager
self._plugin_lifecycle_callbacks = defaultdict(list)
self._logger = logging.getLogger(__name__)
def on_plugin_event_factory(lifecycle_event):
def on_plugin_event(name, plugin):
self.on_plugin_event(lifecycle_event, name, plugin)
return on_plugin_event
self._plugin_manager.on_plugin_loaded = on_plugin_event_factory("loaded")
self._plugin_manager.on_plugin_unloaded = on_plugin_event_factory("unloaded")
self._plugin_manager.on_plugin_activated = on_plugin_event_factory("activated")
self._plugin_manager.on_plugin_deactivated = on_plugin_event_factory("deactivated")
self._plugin_manager.on_plugin_enabled = on_plugin_event_factory("enabled")
self._plugin_manager.on_plugin_disabled = on_plugin_event_factory("disabled")
def on_plugin_event(self, event, name, plugin):
for lifecycle_callback in self._plugin_lifecycle_callbacks[event]:
lifecycle_callback(name, plugin)
def add_callback(self, events, callback):
if isinstance(events, (str, unicode)):
events = [events]
for event in events:
self._plugin_lifecycle_callbacks[event].append(callback)
def remove_callback(self, callback, events=None):
if events is None:
for event in self._plugin_lifecycle_callbacks:
if callback in self._plugin_lifecycle_callbacks[event]:
self._plugin_lifecycle_callbacks[event].remove(callback)
else:
if isinstance(events, (str, unicode)):
events = [events]
for event in events:
if callback in self._plugin_lifecycle_callbacks[event]:
self._plugin_lifecycle_callbacks[event].remove(callback)
if __name__ == "__main__":
server = Server()
server.run()
|
agpl-3.0
| 2,597,432,022,406,943,000 | 6,338,243,315,555,719,000 | 35.687695 | 229 | 0.707729 | false |
NateBrune/bitcoin-fio
|
qa/rpc-tests/rest.py
|
10
|
12151
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from struct import *
import binascii
import json
import StringIO
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls with a request body
def http_get_call(host, port, path, requestdata = '', response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('GET', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
print "Mining blocks..."
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += binascii.unhexlify(txid)
binaryRequest += pack("i", n);
binaryRequest += binascii.unhexlify(vintx);
binaryRequest += pack("i", 0);
bin_response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = StringIO.StringIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(65).rstrip("L")
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid json request
json_request = '{"checkmempool'
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
response = http_get_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 500) #must be a 500 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/");
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 500 because we exceeding the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", "", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", "", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(response_str.encode("hex")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(response_header_str.encode("hex")[0:160], response_header_hex_str[0:160])
# check json format
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['hash'], bb_hash)
# do tx test
tx_hash = json_obj['tx'][0]['txid'];
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
|
mit
| -7,580,196,575,297,582,000 | -8,183,960,430,772,935,000 | 41.190972 | 133 | 0.609662 | false |
dipanjanS/text-analytics-with-python
|
Old-First-Edition/Ch06_Text_Similarity_and_Clustering/utils.py
|
1
|
1097
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 11 23:06:06 2016
@author: DIP
"""
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
def build_feature_matrix(documents, feature_type='frequency',
ngram_range=(1, 1), min_df=0.0, max_df=1.0):
feature_type = feature_type.lower().strip()
if feature_type == 'binary':
vectorizer = CountVectorizer(binary=True, min_df=min_df,
max_df=max_df, ngram_range=ngram_range)
elif feature_type == 'frequency':
vectorizer = CountVectorizer(binary=False, min_df=min_df,
max_df=max_df, ngram_range=ngram_range)
elif feature_type == 'tfidf':
vectorizer = TfidfVectorizer(min_df=min_df, max_df=max_df,
ngram_range=ngram_range)
else:
raise Exception("Wrong feature type entered. Possible values: 'binary', 'frequency', 'tfidf'")
feature_matrix = vectorizer.fit_transform(documents).astype(float)
return vectorizer, feature_matrix
|
apache-2.0
| -7,794,889,610,154,016,000 | -2,347,031,151,640,498,700 | 36.862069 | 102 | 0.601641 | false |
tzewangdorje/SIPserv
|
Twisted-13.1.0/twisted/conch/test/test_insults.py
|
15
|
17669
|
# -*- test-case-name: twisted.conch.test.test_insults -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
from twisted.conch.insults.insults import ServerProtocol, ClientProtocol
from twisted.conch.insults.insults import CS_UK, CS_US, CS_DRAWING, CS_ALTERNATE, CS_ALTERNATE_SPECIAL
from twisted.conch.insults.insults import G0, G1
from twisted.conch.insults.insults import modes
def _getattr(mock, name):
return super(Mock, mock).__getattribute__(name)
def occurrences(mock):
return _getattr(mock, 'occurrences')
def methods(mock):
return _getattr(mock, 'methods')
def _append(mock, obj):
occurrences(mock).append(obj)
default = object()
class Mock(object):
callReturnValue = default
def __init__(self, methods=None, callReturnValue=default):
"""
@param methods: Mapping of names to return values
@param callReturnValue: object __call__ should return
"""
self.occurrences = []
if methods is None:
methods = {}
self.methods = methods
if callReturnValue is not default:
self.callReturnValue = callReturnValue
def __call__(self, *a, **kw):
returnValue = _getattr(self, 'callReturnValue')
if returnValue is default:
returnValue = Mock()
# _getattr(self, 'occurrences').append(('__call__', returnValue, a, kw))
_append(self, ('__call__', returnValue, a, kw))
return returnValue
def __getattribute__(self, name):
methods = _getattr(self, 'methods')
if name in methods:
attrValue = Mock(callReturnValue=methods[name])
else:
attrValue = Mock()
# _getattr(self, 'occurrences').append((name, attrValue))
_append(self, (name, attrValue))
return attrValue
class MockMixin:
def assertCall(self, occurrence, methodName, expectedPositionalArgs=(),
expectedKeywordArgs={}):
attr, mock = occurrence
self.assertEqual(attr, methodName)
self.assertEqual(len(occurrences(mock)), 1)
[(call, result, args, kw)] = occurrences(mock)
self.assertEqual(call, "__call__")
self.assertEqual(args, expectedPositionalArgs)
self.assertEqual(kw, expectedKeywordArgs)
return result
_byteGroupingTestTemplate = """\
def testByte%(groupName)s(self):
transport = StringTransport()
proto = Mock()
parser = self.protocolFactory(lambda: proto)
parser.factory = self
parser.makeConnection(transport)
bytes = self.TEST_BYTES
while bytes:
chunk = bytes[:%(bytesPer)d]
bytes = bytes[%(bytesPer)d:]
parser.dataReceived(chunk)
self.verifyResults(transport, proto, parser)
"""
class ByteGroupingsMixin(MockMixin):
protocolFactory = None
for word, n in [('Pairs', 2), ('Triples', 3), ('Quads', 4), ('Quints', 5), ('Sexes', 6)]:
exec _byteGroupingTestTemplate % {'groupName': word, 'bytesPer': n}
del word, n
def verifyResults(self, transport, proto, parser):
result = self.assertCall(occurrences(proto).pop(0), "makeConnection", (parser,))
self.assertEqual(occurrences(result), [])
del _byteGroupingTestTemplate
class ServerArrowKeys(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ServerProtocol
# All the arrow keys once
TEST_BYTES = '\x1b[A\x1b[B\x1b[C\x1b[D'
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for arrow in (parser.UP_ARROW, parser.DOWN_ARROW,
parser.RIGHT_ARROW, parser.LEFT_ARROW):
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (arrow, None))
self.assertEqual(occurrences(result), [])
self.failIf(occurrences(proto))
class PrintableCharacters(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ServerProtocol
# Some letters and digits, first on their own, then capitalized,
# then modified with alt
TEST_BYTES = 'abc123ABC!@#\x1ba\x1bb\x1bc\x1b1\x1b2\x1b3'
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for char in 'abc123ABC!@#':
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (char, None))
self.assertEqual(occurrences(result), [])
for char in 'abc123':
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (char, parser.ALT))
self.assertEqual(occurrences(result), [])
occs = occurrences(proto)
self.failIf(occs, "%r should have been []" % (occs,))
class ServerFunctionKeys(ByteGroupingsMixin, unittest.TestCase):
"""Test for parsing and dispatching function keys (F1 - F12)
"""
protocolFactory = ServerProtocol
byteList = []
for bytes in ('OP', 'OQ', 'OR', 'OS', # F1 - F4
'15~', '17~', '18~', '19~', # F5 - F8
'20~', '21~', '23~', '24~'): # F9 - F12
byteList.append('\x1b[' + bytes)
TEST_BYTES = ''.join(byteList)
del byteList, bytes
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for funcNum in range(1, 13):
funcArg = getattr(parser, 'F%d' % (funcNum,))
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (funcArg, None))
self.assertEqual(occurrences(result), [])
self.failIf(occurrences(proto))
class ClientCursorMovement(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ClientProtocol
d2 = "\x1b[2B"
r4 = "\x1b[4C"
u1 = "\x1b[A"
l2 = "\x1b[2D"
# Move the cursor down two, right four, up one, left two, up one, left two
TEST_BYTES = d2 + r4 + u1 + l2 + u1 + l2
del d2, r4, u1, l2
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for (method, count) in [('Down', 2), ('Forward', 4), ('Up', 1),
('Backward', 2), ('Up', 1), ('Backward', 2)]:
result = self.assertCall(occurrences(proto).pop(0), "cursor" + method, (count,))
self.assertEqual(occurrences(result), [])
self.failIf(occurrences(proto))
class ClientControlSequences(unittest.TestCase, MockMixin):
def setUp(self):
self.transport = StringTransport()
self.proto = Mock()
self.parser = ClientProtocol(lambda: self.proto)
self.parser.factory = self
self.parser.makeConnection(self.transport)
result = self.assertCall(occurrences(self.proto).pop(0), "makeConnection", (self.parser,))
self.failIf(occurrences(result))
def testSimpleCardinals(self):
self.parser.dataReceived(
''.join([''.join(['\x1b[' + str(n) + ch for n in ('', 2, 20, 200)]) for ch in 'BACD']))
occs = occurrences(self.proto)
for meth in ("Down", "Up", "Forward", "Backward"):
for count in (1, 2, 20, 200):
result = self.assertCall(occs.pop(0), "cursor" + meth, (count,))
self.failIf(occurrences(result))
self.failIf(occs)
def testScrollRegion(self):
self.parser.dataReceived('\x1b[5;22r\x1b[r')
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "setScrollRegion", (5, 22))
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "setScrollRegion", (None, None))
self.failIf(occurrences(result))
self.failIf(occs)
def testHeightAndWidth(self):
self.parser.dataReceived("\x1b#3\x1b#4\x1b#5\x1b#6")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "doubleHeightLine", (True,))
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "doubleHeightLine", (False,))
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "singleWidthLine")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "doubleWidthLine")
self.failIf(occurrences(result))
self.failIf(occs)
def testCharacterSet(self):
self.parser.dataReceived(
''.join([''.join(['\x1b' + g + n for n in 'AB012']) for g in '()']))
occs = occurrences(self.proto)
for which in (G0, G1):
for charset in (CS_UK, CS_US, CS_DRAWING, CS_ALTERNATE, CS_ALTERNATE_SPECIAL):
result = self.assertCall(occs.pop(0), "selectCharacterSet", (charset, which))
self.failIf(occurrences(result))
self.failIf(occs)
def testShifting(self):
self.parser.dataReceived("\x15\x14")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "shiftIn")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "shiftOut")
self.failIf(occurrences(result))
self.failIf(occs)
def testSingleShifts(self):
self.parser.dataReceived("\x1bN\x1bO")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "singleShift2")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "singleShift3")
self.failIf(occurrences(result))
self.failIf(occs)
def testKeypadMode(self):
self.parser.dataReceived("\x1b=\x1b>")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "applicationKeypadMode")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "numericKeypadMode")
self.failIf(occurrences(result))
self.failIf(occs)
def testCursor(self):
self.parser.dataReceived("\x1b7\x1b8")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "saveCursor")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "restoreCursor")
self.failIf(occurrences(result))
self.failIf(occs)
def testReset(self):
self.parser.dataReceived("\x1bc")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "reset")
self.failIf(occurrences(result))
self.failIf(occs)
def testIndex(self):
self.parser.dataReceived("\x1bD\x1bM\x1bE")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "index")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "reverseIndex")
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "nextLine")
self.failIf(occurrences(result))
self.failIf(occs)
def testModes(self):
self.parser.dataReceived(
"\x1b[" + ';'.join(map(str, [modes.KAM, modes.IRM, modes.LNM])) + "h")
self.parser.dataReceived(
"\x1b[" + ';'.join(map(str, [modes.KAM, modes.IRM, modes.LNM])) + "l")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "setModes", ([modes.KAM, modes.IRM, modes.LNM],))
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "resetModes", ([modes.KAM, modes.IRM, modes.LNM],))
self.failIf(occurrences(result))
self.failIf(occs)
def testErasure(self):
self.parser.dataReceived(
"\x1b[K\x1b[1K\x1b[2K\x1b[J\x1b[1J\x1b[2J\x1b[3P")
occs = occurrences(self.proto)
for meth in ("eraseToLineEnd", "eraseToLineBeginning", "eraseLine",
"eraseToDisplayEnd", "eraseToDisplayBeginning",
"eraseDisplay"):
result = self.assertCall(occs.pop(0), meth)
self.failIf(occurrences(result))
result = self.assertCall(occs.pop(0), "deleteCharacter", (3,))
self.failIf(occurrences(result))
self.failIf(occs)
def testLineDeletion(self):
self.parser.dataReceived("\x1b[M\x1b[3M")
occs = occurrences(self.proto)
for arg in (1, 3):
result = self.assertCall(occs.pop(0), "deleteLine", (arg,))
self.failIf(occurrences(result))
self.failIf(occs)
def testLineInsertion(self):
self.parser.dataReceived("\x1b[L\x1b[3L")
occs = occurrences(self.proto)
for arg in (1, 3):
result = self.assertCall(occs.pop(0), "insertLine", (arg,))
self.failIf(occurrences(result))
self.failIf(occs)
def testCursorPosition(self):
methods(self.proto)['reportCursorPosition'] = (6, 7)
self.parser.dataReceived("\x1b[6n")
self.assertEqual(self.transport.value(), "\x1b[7;8R")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "reportCursorPosition")
# This isn't really an interesting assert, since it only tests that
# our mock setup is working right, but I'll include it anyway.
self.assertEqual(result, (6, 7))
def test_applicationDataBytes(self):
"""
Contiguous non-control bytes are passed to a single call to the
C{write} method of the terminal to which the L{ClientProtocol} is
connected.
"""
occs = occurrences(self.proto)
self.parser.dataReceived('a')
self.assertCall(occs.pop(0), "write", ("a",))
self.parser.dataReceived('bc')
self.assertCall(occs.pop(0), "write", ("bc",))
def _applicationDataTest(self, data, calls):
occs = occurrences(self.proto)
self.parser.dataReceived(data)
while calls:
self.assertCall(occs.pop(0), *calls.pop(0))
self.assertFalse(occs, "No other calls should happen: %r" % (occs,))
def test_shiftInAfterApplicationData(self):
"""
Application data bytes followed by a shift-in command are passed to a
call to C{write} before the terminal's C{shiftIn} method is called.
"""
self._applicationDataTest(
'ab\x15', [
("write", ("ab",)),
("shiftIn",)])
def test_shiftOutAfterApplicationData(self):
"""
Application data bytes followed by a shift-out command are passed to a
call to C{write} before the terminal's C{shiftOut} method is called.
"""
self._applicationDataTest(
'ab\x14', [
("write", ("ab",)),
("shiftOut",)])
def test_cursorBackwardAfterApplicationData(self):
"""
Application data bytes followed by a cursor-backward command are passed
to a call to C{write} before the terminal's C{cursorBackward} method is
called.
"""
self._applicationDataTest(
'ab\x08', [
("write", ("ab",)),
("cursorBackward",)])
def test_escapeAfterApplicationData(self):
"""
Application data bytes followed by an escape character are passed to a
call to C{write} before the terminal's handler method for the escape is
called.
"""
# Test a short escape
self._applicationDataTest(
'ab\x1bD', [
("write", ("ab",)),
("index",)])
# And a long escape
self._applicationDataTest(
'ab\x1b[4h', [
("write", ("ab",)),
("setModes", ([4],))])
# There's some other cases too, but they're all handled by the same
# codepaths as above.
class ServerProtocolOutputTests(unittest.TestCase):
"""
Tests for the bytes L{ServerProtocol} writes to its transport when its
methods are called.
"""
def test_nextLine(self):
"""
L{ServerProtocol.nextLine} writes C{"\r\n"} to its transport.
"""
# Why doesn't it write ESC E? Because ESC E is poorly supported. For
# example, gnome-terminal (many different versions) fails to scroll if
# it receives ESC E and the cursor is already on the last row.
protocol = ServerProtocol()
transport = StringTransport()
protocol.makeConnection(transport)
protocol.nextLine()
self.assertEqual(transport.value(), "\r\n")
class Deprecations(unittest.TestCase):
"""
Tests to ensure deprecation of L{insults.colors} and L{insults.client}
"""
def ensureDeprecated(self, message):
"""
Ensures that the correct deprecation warning was issued.
"""
warnings = self.flushWarnings()
self.assertIdentical(warnings[0]['category'], DeprecationWarning)
self.assertEqual(warnings[0]['message'], message)
self.assertEqual(len(warnings), 1)
def test_colors(self):
"""
The L{insults.colors} module is deprecated
"""
from twisted.conch.insults import colors
self.ensureDeprecated("twisted.conch.insults.colors was deprecated "
"in Twisted 10.1.0: Please use "
"twisted.conch.insults.helper instead.")
def test_client(self):
"""
The L{insults.client} module is deprecated
"""
from twisted.conch.insults import client
self.ensureDeprecated("twisted.conch.insults.client was deprecated "
"in Twisted 10.1.0: Please use "
"twisted.conch.insults.insults instead.")
|
gpl-3.0
| 6,763,629,555,660,839,000 | -5,075,340,867,973,381,000 | 34.622984 | 104 | 0.611806 | false |
DakRomo/2017Challenges
|
challenge_3/python/mindm/src/majority.py
|
3
|
1522
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from collections import defaultdict
def main():
if len(sys.argv) == 1: # Arguments provided by piping in shell
args = sys.stdin.read()
elif len(sys.argv) == 2: # Arguments provided as command line args
args = sys.argv[1]
else:
print("Error: too many arguments")
exit(1)
inlist = args_to_list(args)
test_digit(inlist)
# Map elements to a dictionary where the key is the element and increment
# the value (default value is 0 for each key initially)
sum_dict = defaultdict(int)
for elem in inlist:
sum_dict[elem] += 1
result = []
majority_threshold = len(inlist) / 2
for key, value in sum_dict.items():
if value > majority_threshold:
result.append(key)
if result:
print(result[0])
else:
print("No majority value found")
def args_to_list(arg_string):
""" Parses argument-string to a list
"""
# Strip whitespace -> strip brackets -> split to substrings ->
# -> strip whitespace
arg_list = [x.strip() for x in arg_string.strip().strip("[]").split(',')]
return arg_list
def test_digit(arr):
""" Exits if list contains non-numeric strings
"""
for element in arr:
if not element.isdigit():
print("Error: '{}' is not numeric.".format(element))
exit(1)
if __name__ == "__main__":
main()
|
mit
| 7,273,910,176,473,000,000 | 1,108,627,928,331,782,100 | 25.178571 | 77 | 0.568331 | false |
sudheesh001/oh-mainline
|
vendor/packages/docutils/test/test_parsers/test_rst/test_targets.py
|
16
|
13765
|
#! /usr/bin/env python
# $Id: test_targets.py 7062 2011-06-30 22:14:29Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Tests for states.py.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['targets'] = [
["""\
.. _target:
(Internal hyperlink target.)
""",
"""\
<document source="test data">
<target ids="target" names="target">
<paragraph>
(Internal hyperlink target.)
"""],
["""\
.. _optional space before colon :
""",
"""\
<document source="test data">
<target ids="optional-space-before-colon" names="optional\ space\ before\ colon">
"""],
["""\
External hyperlink targets:
.. _one-liner: http://structuredtext.sourceforge.net
.. _starts-on-this-line: http://
structuredtext.
sourceforge.net
.. _entirely-below:
http://structuredtext.
sourceforge.net
.. _not-indirect: uri\\_
""",
"""\
<document source="test data">
<paragraph>
External hyperlink targets:
<target ids="one-liner" names="one-liner" refuri="http://structuredtext.sourceforge.net">
<target ids="starts-on-this-line" names="starts-on-this-line" refuri="http://structuredtext.sourceforge.net">
<target ids="entirely-below" names="entirely-below" refuri="http://structuredtext.sourceforge.net">
<target ids="not-indirect" names="not-indirect" refuri="uri_">
"""],
["""\
Indirect hyperlink targets:
.. _target1: reference_
.. _target2: `phrase-link reference`_
""",
"""\
<document source="test data">
<paragraph>
Indirect hyperlink targets:
<target ids="target1" names="target1" refname="reference">
<target ids="target2" names="target2" refname="phrase-link reference">
"""],
["""\
.. _a long target name:
.. _`a target name: including a colon (quoted)`:
.. _a target name\: including a colon (escaped):
""",
"""\
<document source="test data">
<target ids="a-long-target-name" names="a\ long\ target\ name">
<target ids="a-target-name-including-a-colon-quoted" names="a\ target\ name:\ including\ a\ colon\ (quoted)">
<target ids="a-target-name-including-a-colon-escaped" names="a\ target\ name:\ including\ a\ colon\ (escaped)">
"""],
["""\
.. _`target: No matching backquote.
.. _`: No matching backquote either.
""",
"""\
<document source="test data">
<comment xml:space="preserve">
_`target: No matching backquote.
<system_message level="2" line="1" source="test data" type="WARNING">
<paragraph>
malformed hyperlink target.
<comment xml:space="preserve">
_`: No matching backquote either.
<system_message level="2" line="2" source="test data" type="WARNING">
<paragraph>
malformed hyperlink target.
"""],
["""\
.. _a very long target name,
split across lines:
.. _`and another,
with backquotes`:
""",
"""\
<document source="test data">
<target ids="a-very-long-target-name-split-across-lines" names="a\ very\ long\ target\ name,\ split\ across\ lines">
<target ids="and-another-with-backquotes" names="and\ another,\ with\ backquotes">
"""],
["""\
External hyperlink:
.. _target: http://www.python.org/
""",
"""\
<document source="test data">
<paragraph>
External hyperlink:
<target ids="target" names="target" refuri="http://www.python.org/">
"""],
["""\
.. _email: [email protected]
.. _multi-line email: jdoe
@example.com
""",
"""\
<document source="test data">
<target ids="email" names="email" refuri="mailto:[email protected]">
<target ids="multi-line-email" names="multi-line\ email" refuri="mailto:[email protected]">
"""],
["""\
Malformed target:
.. __malformed: no good
Target beginning with an underscore:
.. _`_target`: OK
""",
"""\
<document source="test data">
<paragraph>
Malformed target:
<comment xml:space="preserve">
__malformed: no good
<system_message level="2" line="3" source="test data" type="WARNING">
<paragraph>
malformed hyperlink target.
<paragraph>
Target beginning with an underscore:
<target ids="target" names="_target" refuri="OK">
"""],
["""\
Duplicate external targets (different URIs):
.. _target: first
.. _target: second
""",
"""\
<document source="test data">
<paragraph>
Duplicate external targets (different URIs):
<target dupnames="target" ids="target" refuri="first">
<system_message backrefs="id1" level="2" line="5" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "target".
<target dupnames="target" ids="id1" refuri="second">
"""],
["""\
Duplicate external targets (same URIs):
.. _target: first
.. _target: first
""",
"""\
<document source="test data">
<paragraph>
Duplicate external targets (same URIs):
<target ids="target" names="target" refuri="first">
<system_message backrefs="id1" level="1" line="5" source="test data" type="INFO">
<paragraph>
Duplicate explicit target name: "target".
<target dupnames="target" ids="id1" refuri="first">
"""],
["""\
Duplicate implicit targets.
Title
=====
Paragraph.
Title
=====
Paragraph.
""",
"""\
<document source="test data">
<paragraph>
Duplicate implicit targets.
<section dupnames="title" ids="title">
<title>
Title
<paragraph>
Paragraph.
<section dupnames="title" ids="id1">
<title>
Title
<system_message backrefs="id1" level="1" line="9" source="test data" type="INFO">
<paragraph>
Duplicate implicit target name: "title".
<paragraph>
Paragraph.
"""],
["""\
Duplicate implicit/explicit targets.
Title
=====
.. _title:
Paragraph.
""",
"""\
<document source="test data">
<paragraph>
Duplicate implicit/explicit targets.
<section dupnames="title" ids="title">
<title>
Title
<system_message backrefs="id1" level="1" line="6" source="test data" type="INFO">
<paragraph>
Duplicate implicit target name: "title".
<target ids="id1" names="title">
<paragraph>
Paragraph.
"""],
["""\
Duplicate implicit/directive targets.
Title
=====
.. target-notes::
:name: title
""",
"""\
<document source="test data">
<paragraph>
Duplicate implicit/directive targets.
<section dupnames="title" ids="title">
<title>
Title
<pending ids="id1" names="title">
<system_message backrefs="id1" level="1" line="4" source="test data" type="INFO">
<paragraph>
Duplicate implicit target name: "title".
.. internal attributes:
.transform: docutils.transforms.references.TargetNotes
.details:
"""],
["""\
Duplicate explicit targets.
.. _title:
First.
.. _title:
Second.
.. _title:
Third.
""",
"""\
<document source="test data">
<paragraph>
Duplicate explicit targets.
<target dupnames="title" ids="title">
<paragraph>
First.
<system_message backrefs="id1" level="2" line="7" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "title".
<target dupnames="title" ids="id1">
<paragraph>
Second.
<system_message backrefs="id2" level="2" line="11" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "title".
<target dupnames="title" ids="id2">
<paragraph>
Third.
"""],
["""\
Duplicate explicit/directive targets.
.. _title:
First.
.. rubric:: this is a title too
:name: title
""",
"""\
<document source="test data">
<paragraph>
Duplicate explicit/directive targets.
<target dupnames="title" ids="title">
<paragraph>
First.
<rubric dupnames="title" ids="id1">
this is a title too
<system_message backrefs="id1" level="2" line="9" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "title".
"""],
["""\
Duplicate targets:
Target
======
Implicit section header target.
.. [TARGET] Citation target.
.. [#target] Autonumber-labeled footnote target.
.. _target:
Explicit internal target.
.. _target: Explicit_external_target
.. rubric:: directive with target
:name: Target
""",
"""\
<document source="test data">
<paragraph>
Duplicate targets:
<section dupnames="target" ids="target">
<title>
Target
<paragraph>
Implicit section header target.
<citation dupnames="target" ids="id1">
<label>
TARGET
<system_message backrefs="id1" level="1" line="8" source="test data" type="INFO">
<paragraph>
Duplicate implicit target name: "target".
<paragraph>
Citation target.
<footnote auto="1" dupnames="target" ids="id2">
<system_message backrefs="id2" level="2" line="10" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "target".
<paragraph>
Autonumber-labeled footnote target.
<system_message backrefs="id3" level="2" line="12" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "target".
<target dupnames="target" ids="id3">
<paragraph>
Explicit internal target.
<system_message backrefs="id4" level="2" line="16" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "target".
<target dupnames="target" ids="id4" refuri="Explicit_external_target">
<rubric dupnames="target" ids="id5">
directive with target
<system_message backrefs="id5" level="2" line="4" source="test data" type="WARNING">
<paragraph>
Duplicate explicit target name: "target".
"""],
["""\
.. _unescaped colon at end:: no good
.. _:: no good either
.. _escaped colon\:: OK
.. _`unescaped colon, quoted:`: OK
""",
"""\
<document source="test data">
<comment xml:space="preserve">
_unescaped colon at end:: no good
<system_message level="2" line="1" source="test data" type="WARNING">
<paragraph>
malformed hyperlink target.
<comment xml:space="preserve">
_:: no good either
<system_message level="2" line="3" source="test data" type="WARNING">
<paragraph>
malformed hyperlink target.
<target ids="escaped-colon" names="escaped\ colon:" refuri="OK">
<target ids="unescaped-colon-quoted" names="unescaped\ colon,\ quoted:" refuri="OK">
"""],
]
totest['anonymous_targets'] = [
["""\
Anonymous external hyperlink target:
.. __: http://w3c.org/
""",
"""\
<document source="test data">
<paragraph>
Anonymous external hyperlink target:
<target anonymous="1" ids="id1" refuri="http://w3c.org/">
"""],
["""\
Anonymous external hyperlink target:
__ http://w3c.org/
""",
"""\
<document source="test data">
<paragraph>
Anonymous external hyperlink target:
<target anonymous="1" ids="id1" refuri="http://w3c.org/">
"""],
["""\
Anonymous indirect hyperlink target:
.. __: reference_
""",
"""\
<document source="test data">
<paragraph>
Anonymous indirect hyperlink target:
<target anonymous="1" ids="id1" refname="reference">
"""],
["""\
Anonymous external hyperlink target, not indirect:
__ uri\\_
__ this URI ends with an underscore_
""",
"""\
<document source="test data">
<paragraph>
Anonymous external hyperlink target, not indirect:
<target anonymous="1" ids="id1" refuri="uri_">
<target anonymous="1" ids="id2" refuri="thisURIendswithanunderscore_">
"""],
["""\
Anonymous indirect hyperlink targets:
__ reference_
__ `a very long
reference`_
""",
"""\
<document source="test data">
<paragraph>
Anonymous indirect hyperlink targets:
<target anonymous="1" ids="id1" refname="reference">
<target anonymous="1" ids="id2" refname="a very long reference">
"""],
["""\
Mixed anonymous & named indirect hyperlink targets:
__ reference_
.. __: reference_
__ reference_
.. _target1: reference_
no blank line
.. _target2: reference_
__ reference_
.. __: reference_
__ reference_
no blank line
""",
"""\
<document source="test data">
<paragraph>
Mixed anonymous & named indirect hyperlink targets:
<target anonymous="1" ids="id1" refname="reference">
<target anonymous="1" ids="id2" refname="reference">
<target anonymous="1" ids="id3" refname="reference">
<target ids="target1" names="target1" refname="reference">
<system_message level="2" line="7" source="test data" type="WARNING">
<paragraph>
Explicit markup ends without a blank line; unexpected unindent.
<paragraph>
no blank line
<target ids="target2" names="target2" refname="reference">
<target anonymous="1" ids="id4" refname="reference">
<target anonymous="1" ids="id5" refname="reference">
<target anonymous="1" ids="id6" refname="reference">
<system_message level="2" line="13" source="test data" type="WARNING">
<paragraph>
Explicit markup ends without a blank line; unexpected unindent.
<paragraph>
no blank line
"""],
["""\
.. _
""",
"""\
<document source="test data">
<comment xml:space="preserve">
_
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
agpl-3.0
| -310,854,255,594,943,500 | -8,330,373,382,181,838,000 | 24.825516 | 120 | 0.60879 | false |
fuhongliang/erpnext
|
erpnext/patches/v6_0/set_default_title.py
|
19
|
1193
|
import frappe
def execute():
frappe.reload_doctype("Quotation")
frappe.db.sql("""update tabQuotation set title = customer_name""")
frappe.reload_doctype("Sales Order")
frappe.db.sql("""update `tabSales Order` set title = customer_name""")
frappe.reload_doctype("Delivery Note")
frappe.db.sql("""update `tabDelivery Note` set title = customer_name""")
frappe.reload_doctype("Material Request")
frappe.db.sql("""update `tabMaterial Request` set title = material_request_type""")
frappe.reload_doctype("Supplier Quotation")
frappe.db.sql("""update `tabSupplier Quotation` set title = supplier_name""")
frappe.reload_doctype("Purchase Order")
frappe.db.sql("""update `tabPurchase Order` set title = supplier_name""")
frappe.reload_doctype("Purchase Receipt")
frappe.db.sql("""update `tabPurchase Receipt` set title = supplier_name""")
frappe.reload_doctype("Purchase Invoice")
frappe.db.sql("""update `tabPurchase Invoice` set title = supplier_name""")
frappe.reload_doctype("Stock Entry")
frappe.db.sql("""update `tabStock Entry` set title = purpose""")
frappe.reload_doctype("Sales Invoice")
frappe.db.sql("""update `tabSales Invoice` set title = customer_name""")
|
agpl-3.0
| 1,250,049,343,234,260,500 | -7,828,873,536,800,085,000 | 36.28125 | 84 | 0.729254 | false |
Dhivyap/ansible
|
lib/ansible/modules/network/aci/aci_epg_to_contract.py
|
27
|
9836
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_epg_to_contract
short_description: Bind EPGs to Contracts (fv:RsCons, fv:RsProv)
description:
- Bind EPGs to Contracts on Cisco ACI fabrics.
notes:
- The C(tenant), C(app_profile), C(EPG), and C(Contract) used must exist before using this module in your playbook.
The M(aci_tenant), M(aci_ap), M(aci_epg), and M(aci_contract) modules can be used for this.
version_added: '2.4'
options:
ap:
description:
- Name of an existing application network profile, that will contain the EPGs.
type: str
aliases: [ app_profile, app_profile_name ]
contract:
description:
- The name of the contract.
type: str
aliases: [ contract_name ]
contract_type:
description:
- Determines if the EPG should Provide or Consume the Contract.
type: str
required: yes
choices: [ consumer, provider ]
epg:
description:
- The name of the end point group.
type: str
aliases: [ epg_name ]
priority:
description:
- QoS class.
- The APIC defaults to C(unspecified) when unset during creation.
type: str
choices: [ level1, level2, level3, unspecified ]
provider_match:
description:
- The matching algorithm for Provided Contracts.
- The APIC defaults to C(at_least_one) when unset during creation.
type: str
choices: [ all, at_least_one, at_most_one, none ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
tenant:
description:
- Name of an existing tenant.
type: str
aliases: [ tenant_name ]
extends_documentation_fragment: aci
seealso:
- module: aci_ap
- module: aci_epg
- module: aci_contract
- name: APIC Management Information Model reference
description: More information about the internal APIC classes B(fv:RsCons) and B(fv:RsProv).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
'''
EXAMPLES = r'''
- name: Add a new contract to EPG binding
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
contract: anstest_http
contract_type: provider
state: present
delegate_to: localhost
- name: Remove an existing contract to EPG binding
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
contract: anstest_http
contract_type: provider
state: absent
delegate_to: localhost
- name: Query a specific contract to EPG binding
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
contract: anstest_http
contract_type: provider
state: query
delegate_to: localhost
register: query_result
- name: Query all provider contract to EPG bindings
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
contract_type: provider
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
ACI_CLASS_MAPPING = dict(
consumer={
'class': 'fvRsCons',
'rn': 'rscons-',
},
provider={
'class': 'fvRsProv',
'rn': 'rsprov-',
},
)
PROVIDER_MATCH_MAPPING = dict(
all='All',
at_least_one='AtleastOne',
at_most_one='tmostOne',
none='None',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
contract_type=dict(type='str', required=True, choices=['consumer', 'provider']),
ap=dict(type='str', aliases=['app_profile', 'app_profile_name']), # Not required for querying all objects
epg=dict(type='str', aliases=['epg_name']), # Not required for querying all objects
contract=dict(type='str', aliases=['contract_name']), # Not required for querying all objects
priority=dict(type='str', choices=['level1', 'level2', 'level3', 'unspecified']),
provider_match=dict(type='str', choices=['all', 'at_least_one', 'at_most_one', 'none']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['ap', 'contract', 'epg', 'tenant']],
['state', 'present', ['ap', 'contract', 'epg', 'tenant']],
],
)
ap = module.params['ap']
contract = module.params['contract']
contract_type = module.params['contract_type']
epg = module.params['epg']
priority = module.params['priority']
provider_match = module.params['provider_match']
if provider_match is not None:
provider_match = PROVIDER_MATCH_MAPPING[provider_match]
state = module.params['state']
tenant = module.params['tenant']
aci_class = ACI_CLASS_MAPPING[contract_type]["class"]
aci_rn = ACI_CLASS_MAPPING[contract_type]["rn"]
if contract_type == "consumer" and provider_match is not None:
module.fail_json(msg="the 'provider_match' is only configurable for Provided Contracts")
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvAp',
aci_rn='ap-{0}'.format(ap),
module_object=ap,
target_filter={'name': ap},
),
subclass_2=dict(
aci_class='fvAEPg',
aci_rn='epg-{0}'.format(epg),
module_object=epg,
target_filter={'name': epg},
),
subclass_3=dict(
aci_class=aci_class,
aci_rn='{0}{1}'.format(aci_rn, contract),
module_object=contract,
target_filter={'tnVzBrCPName': contract},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class=aci_class,
class_config=dict(
matchT=provider_match,
prio=priority,
tnVzBrCPName=contract,
),
)
aci.get_diff(aci_class=aci_class)
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
gpl-3.0
| -6,192,907,102,501,803,000 | -8,081,332,617,478,608,000 | 27.102857 | 141 | 0.604107 | false |
billy-inn/scikit-learn
|
examples/decomposition/plot_ica_vs_pca.py
|
306
|
3329
|
"""
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
|
bsd-3-clause
| 3,255,604,642,832,979,500 | 6,425,276,476,299,721,000 | 30.704762 | 79 | 0.644037 | false |
rcarrillocruz/ansible
|
lib/ansible/module_utils/facts/other/facter.py
|
232
|
2985
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts.collector import BaseFactCollector
class FacterFactCollector(BaseFactCollector):
name = 'facter'
_fact_ids = set(['facter'])
def __init__(self, collectors=None, namespace=None):
namespace = PrefixFactNamespace(namespace_name='facter',
prefix='facter_')
super(FacterFactCollector, self).__init__(collectors=collectors,
namespace=namespace)
def find_facter(self, module):
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
return facter_path
def run_facter(self, module, facter_path):
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = module.run_command(facter_path + " --puppet --json")
return rc, out, err
def get_facter_output(self, module):
facter_path = self.find_facter(module)
if not facter_path:
return None
rc, out, err = self.run_facter(module, facter_path)
if rc != 0:
return None
return out
def collect(self, module=None, collected_facts=None):
# Note that this mirrors previous facter behavior, where there isnt
# a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever'
# items are added to the main dict.
facter_dict = {}
if not module:
return facter_dict
facter_output = self.get_facter_output(module)
# TODO: if we fail, should we add a empty facter key or nothing?
if facter_output is None:
return facter_dict
try:
facter_dict = json.loads(facter_output)
except Exception:
# FIXME: maybe raise a FactCollectorError with some info attrs?
pass
return facter_dict
|
gpl-3.0
| -7,460,448,580,947,151,000 | -3,660,895,951,895,419,400 | 34.117647 | 87 | 0.646566 | false |
endlessm/chromium-browser
|
tools/site_compare/scrapers/chrome/chromebase.py
|
189
|
5358
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Does scraping for all currently-known versions of Chrome"""
import pywintypes
import types
from drivers import keyboard
from drivers import mouse
from drivers import windowing
# TODO: this has moved, use some logic to find it. For now,
# expects a subst k:.
DEFAULT_PATH = r"k:\chrome.exe"
def InvokeBrowser(path):
"""Invoke the Chrome browser.
Args:
path: full path to browser
Returns:
A tuple of (main window, process handle, address bar, render pane)
"""
# Reuse an existing instance of the browser if we can find one. This
# may not work correctly, especially if the window is behind other windows.
# TODO(jhaas): make this work with Vista
wnds = windowing.FindChildWindows(0, "Chrome_XPFrame")
if len(wnds):
wnd = wnds[0]
proc = None
else:
# Invoke Chrome
(proc, wnd) = windowing.InvokeAndWait(path)
# Get windows we'll need
address_bar = windowing.FindChildWindow(wnd, "Chrome_AutocompleteEdit")
render_pane = GetChromeRenderPane(wnd)
return (wnd, proc, address_bar, render_pane)
def Scrape(urls, outdir, size, pos, timeout, kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
Args:
urls: list of URLs to scrape
outdir: directory to place output
size: size of browser window to use
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
None if success, else an error string
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
(wnd, proc, address_bar, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, pos, size, render_pane)
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
timedout = False
for url in urls:
# Double-click in the address bar, type the name, and press Enter
mouse.ClickInWindow(address_bar)
keyboard.TypeString(url, 0.1)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (20, 16, 36, 32), timeout)
timedout = load_time < 0
if timedout:
break
# Scrape the page
image = windowing.ScrapeWindow(render_pane)
# Save to disk
if "filename" in kwargs:
if callable(kwargs["filename"]):
filename = kwargs["filename"](url)
else:
filename = kwargs["filename"]
else:
filename = windowing.URLtoFilename(url, outdir, ".bmp")
image.save(filename)
if proc:
windowing.SetForegroundWindow(wnd)
# Send Alt-F4, then wait for process to end
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
return "crashed"
if timedout:
return "timeout"
return None
def Time(urls, size, timeout, kwargs):
"""Measure how long it takes to load each of a series of URLs
Args:
urls: list of URLs to time
size: size of browser window to use
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
A list of tuples (url, time). "time" can be "crashed" or "timeout"
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
proc = None
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
ret = []
for url in urls:
try:
# Invoke the browser if necessary
if not proc:
(wnd, proc, address_bar, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, (0,0), size, render_pane)
# Double-click in the address bar, type the name, and press Enter
mouse.ClickInWindow(address_bar)
keyboard.TypeString(url, 0.1)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (20, 16, 36, 32), timeout)
timedout = load_time < 0
if timedout:
load_time = "timeout"
# Send an alt-F4 to make the browser close; if this times out,
# we've probably got a crash
windowing.SetForegroundWindow(wnd)
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
load_time = "crashed"
proc = None
except pywintypes.error:
proc = None
load_time = "crashed"
ret.append( (url, load_time) )
if proc:
windowing.SetForegroundWindow(wnd)
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
return ret
def main():
# We're being invoked rather than imported, so run some tests
path = r"c:\sitecompare\scrapes\chrome\0.1.97.0"
windowing.PreparePath(path)
# Scrape three sites and save the results
Scrape([
"http://www.microsoft.com",
"http://www.google.com",
"http://www.sun.com"],
path, (1024, 768), (0, 0))
return 0
if __name__ == "__main__":
sys.exit(main())
|
bsd-3-clause
| 7,359,626,323,380,886,000 | 6,991,424,106,857,465,000 | 25.924623 | 77 | 0.667786 | false |
jmesmon/trifles
|
2011s_lab2/gen_spice2.py
|
1
|
4333
|
#! /usr/bin/env python
act2_spice = """\
* Lab 1, Act 2, BJT {bjt}, Part {part}, Plot Num {qn}
Vs Vi1 0 {wave}
Vcc Vi2 0 DC 5
RC1 Vi2 Vo1 {rc}
RB1 Vi1 Vb {rb}
Q1 Vo1 Vb 0 {bjt}
{extra}
* Model for 2N3904 NPN BJT (from Eval library in Pspice)
.model 2N3904 NPN(Is=6.734f Xti=3 Eg=1.11 Vaf=74.03 Bf=416.4 Ne=1.259
+ Ise=6.734f Ikf=66.78m Xtb=1.5 Br=.7371 Nc=2 Isc=0 Ikr=0 Rc=1
+ Cjc=3.638p Mjc=.3085 Vjc=.75 Fc=.5 Cje=4.493p Mje=.2593 Vje=.75
+ Tr=239.5n Tf=301.2p Itf=.4 Vtf=4 Xtf=2 Rb=10)
.MODEL tip31 npn
+IS=1e-09 BF=3656.16 NF=1.23899 VAF=10
+IKF=0.0333653 ISE=1e-08 NE=2.29374 BR=0.1
+NR=1.5 VAR=100 IKR=0.333653 ISC=1e-08
+NC=1.75728 RB=6.15083 IRB=100 RBM=0.00113049
+RE=0.0001 RC=0.0491489 XTB=50 XTI=1
+EG=1.05 CJE=3.26475e-10 VJE=0.446174 MJE=0.464221
+TF=2.06218e-09 XTF=15.0842 VTF=25.7317 ITF=0.001
+CJC=3.07593e-10 VJC=0.775484 MJC=0.476498 XCJC=0.750493
+FC=0.796407 CJS=0 VJS=0.75 MJS=0.5
+TR=9.57121e-06 PTF=0 KF=0 AF=1
.control
{action}
hardcopy {fname}.eps {plot}
.endc
.end
"""
def set_freq(defs):
freq = defs['freq']
period = 1.0 / freq
defs['period'] = period
defs['pw'] = period / 2
def a2():
defs = {
'qn': 0,
}
models = [ {'bjt': 'tip31'},
{'bjt': '2n3904'} ]
parts = [ {
'part': 1,
'action': 'dc Vs 0 5 0.2',
'extra': '',
'plot': 'V(Vo1) V(Vi1)',
'rc': 470,
'rb': 2000,
'wave': 'DC 0'
}, {
'part':2,
'action': 'tran {ts} {all_time}',
'extra': '',
'plot': 'V(Vo1) V(Vi1)',
'wave': 'PULSE( {Vil}, {Vih}, 0, {ts}, {ts}, {pw}, {period} )',
'ts': '10NS',
'freq': 10e3,
'_over' : [
{ 'rc': 1e3, 'rb': 10e3, 'Vil':0, 'Vih':5 },
{ 'rc': .1e3,'rb': 1e3, 'Vil':0, 'Vih':5 },
{ 'rc': .1e3,'rb': 1e3, 'Vil':-5, 'Vih':5}
]
}, {
'part':3,
'rb':2000,
'rc':470,
'extra': """\
* attach shotkey diode between B and C
D1 Vb Vo1 SR102
""",
'_over': [
# p1
{ 'freq': 10e3,
'wave': 'PULSE( 0, 5, 0, {ts}, {ts}, {pw}, {period} )',
'ts':'10NS',
'action': 'tran {ts} {all_time}',
'plot': 'V(Vo1) V(Vi1)',
}, { # p2
'freq': 10e3,
'wave': 'PULSE( -5, 5, 0, {ts}, {ts}, {pw}, {period} )',
'ts':'2NS',
'action': 'tran {ts} {all_time}',
'plot': 'V(Vo1) V(Vi1)'
}, { # p3
# OH god, i need current measurments.
'wave': 'DC 5',
'plot': 'I(Vs)',
'action': 'tran 2NS 4NS'
}
]
}, {
'part': 4,
'rb': 2000,
'rc': 470,
'extra': """\
* attach a cap across Vi1 and Vb
C1 Vi1 Vb 1000pF
""",
'wave': 'PULSE( -5 , 5, 0, {ts}, {ts}, {pw}, {period} )',
'action': 'tran {ts} {all_time}',
'plot': 'V(Vo1) V(Vi1)',
'ts': '10NS',
'freq': 10e3
} ]
for model in models:
m_defs = dict(defs.items() + model.items())
for part in parts:
p_defs = dict(m_defs.items() + part.items())
defs['qn'] = p_defs['qn'] = m_defs['qn'] = proc(p_defs)
def proc(defs):
if '_over' in defs:
cdefs = dict(defs.items())
del cdefs['_over']
qn = defs['qn']
for inner in defs['_over']:
n_defs = dict(cdefs.items() + inner.items())
qn = cdefs['qn'] = proc(n_defs)
return qn
else:
defs['qn'] = defs['qn'] + 1
fname = '{bjt}_{part}_{qn:02}'.format(**defs)
defs['fname'] = fname
try:
set_freq(defs)
defs['all_time'] = defs['period'] * 2
except:
pass
defs['action'] = defs['action'].format(**defs)
defs['wave'] = defs['wave'].format(**defs)
f = open(fname + '.spice.gen', 'w')
f.write(act2_spice.format(**defs))
return defs['qn']
if __name__ == "__main__":
a2()
|
gpl-3.0
| -7,587,171,676,884,989,000 | -5,630,463,420,309,496,000 | 26.251572 | 76 | 0.426956 | false |
ayushin78/coala
|
coalib/bears/GlobalBear.py
|
10
|
1175
|
from coalib.bears.Bear import Bear
from coalib.bears.BEAR_KIND import BEAR_KIND
class GlobalBear(Bear):
"""
A GlobalBear is able to analyze semantic facts across several file.
The results of a GlobalBear will be presented grouped by the origin Bear.
Therefore Results spanning above multiple files are allowed and will be
handled right.
If you only look at one file at once anyway a LocalBear is better for your
needs. (And better for performance and usability for both user and
developer.)
"""
def __init__(self,
file_dict, # filename : file contents
section,
message_queue,
timeout=0):
Bear.__init__(self, section, message_queue, timeout)
self.file_dict = file_dict
@staticmethod
def kind():
return BEAR_KIND.GLOBAL
def run(self,
*args,
dependency_results=None,
**kwargs):
"""
Handles all files in file_dict.
:return: A list of Result type.
"""
raise NotImplementedError(
'This function has to be implemented for a runnable bear.')
|
agpl-3.0
| -6,944,658,783,304,338,000 | 9,199,917,550,005,921,000 | 28.375 | 78 | 0.611064 | false |
ajose01/rethinkdb
|
external/v8_3.30.33.16/build/gyp/test/win/gyptest-cl-optimizations.py
|
247
|
3416
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure optimization settings are extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('optimizations.gyp', chdir=CHDIR)
# It's hard to map flags to output contents in a non-fragile way (especially
# handling both 2008/2010), so just verify the correct ninja command line
# contents.
ninja_file = test.built_file_path('obj/test_opt_off.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /Od')
ninja_file = test.built_file_path('obj/test_opt_lev_size.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /O1')
ninja_file = test.built_file_path('obj/test_opt_lev_speed.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /O2')
ninja_file = test.built_file_path('obj/test_opt_lev_max.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /Ox')
ninja_file = test.built_file_path('obj/test_opt_unset.ninja', chdir=CHDIR)
test.must_not_contain(ninja_file, '/Od')
test.must_not_contain(ninja_file, '/O1')
test.must_not_contain(ninja_file, '/Ox')
# Set by default if none specified.
test.must_contain(ninja_file, '/O2')
ninja_file = test.built_file_path('obj/test_opt_fpo.ninja', chdir=CHDIR)
test.must_contain(ninja_file, '/Oy')
test.must_not_contain(ninja_file, '/Oy-')
ninja_file = test.built_file_path('obj/test_opt_fpo_off.ninja', chdir=CHDIR)
test.must_contain(ninja_file, '/Oy-')
ninja_file = test.built_file_path('obj/test_opt_intrinsic.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Oi')
test.must_not_contain(ninja_file, '/Oi-')
ninja_file = test.built_file_path('obj/test_opt_intrinsic_off.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Oi-')
ninja_file = test.built_file_path('obj/test_opt_inline_off.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ob0')
ninja_file = test.built_file_path('obj/test_opt_inline_manual.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ob1')
ninja_file = test.built_file_path('obj/test_opt_inline_auto.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ob2')
ninja_file = test.built_file_path('obj/test_opt_neither.ninja',
chdir=CHDIR)
test.must_not_contain(ninja_file, '/Os')
test.must_not_contain(ninja_file, '/Ot')
ninja_file = test.built_file_path('obj/test_opt_size.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Os')
ninja_file = test.built_file_path('obj/test_opt_speed.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ot')
ninja_file = test.built_file_path('obj/test_opt_wpo.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/GL')
ninja_file = test.built_file_path('obj/test_opt_sp.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/GF')
ninja_file = test.built_file_path('obj/test_opt_sp_off.ninja',
chdir=CHDIR)
test.must_not_contain(ninja_file, '/GF')
ninja_file = test.built_file_path('obj/test_opt_fso.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/GT')
ninja_file = test.built_file_path('obj/test_opt_fso_off.ninja',
chdir=CHDIR)
test.must_not_contain(ninja_file, '/GT')
test.pass_test()
|
agpl-3.0
| -746,973,071,959,939,500 | -1,939,859,312,892,679,000 | 31.533333 | 80 | 0.68589 | false |
ibinti/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/osutil.py
|
90
|
5363
|
# osutil.py - pure Python version of osutil.c
#
# Copyright 2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
import stat as statmod
def _mode_to_kind(mode):
if statmod.S_ISREG(mode):
return statmod.S_IFREG
if statmod.S_ISDIR(mode):
return statmod.S_IFDIR
if statmod.S_ISLNK(mode):
return statmod.S_IFLNK
if statmod.S_ISBLK(mode):
return statmod.S_IFBLK
if statmod.S_ISCHR(mode):
return statmod.S_IFCHR
if statmod.S_ISFIFO(mode):
return statmod.S_IFIFO
if statmod.S_ISSOCK(mode):
return statmod.S_IFSOCK
return mode
def listdir(path, stat=False, skip=None):
'''listdir(path, stat=False) -> list_of_tuples
Return a sorted list containing information about the entries
in the directory.
If stat is True, each element is a 3-tuple:
(name, type, stat object)
Otherwise, each element is a 2-tuple:
(name, type)
'''
result = []
prefix = path
if not prefix.endswith(os.sep):
prefix += os.sep
names = os.listdir(path)
names.sort()
for fn in names:
st = os.lstat(prefix + fn)
if fn == skip and statmod.S_ISDIR(st.st_mode):
return []
if stat:
result.append((fn, _mode_to_kind(st.st_mode), st))
else:
result.append((fn, _mode_to_kind(st.st_mode)))
return result
if os.name != 'nt':
posixfile = open
else:
import ctypes, msvcrt
_kernel32 = ctypes.windll.kernel32
_DWORD = ctypes.c_ulong
_LPCSTR = _LPSTR = ctypes.c_char_p
_HANDLE = ctypes.c_void_p
_INVALID_HANDLE_VALUE = _HANDLE(-1).value
# CreateFile
_FILE_SHARE_READ = 0x00000001
_FILE_SHARE_WRITE = 0x00000002
_FILE_SHARE_DELETE = 0x00000004
_CREATE_ALWAYS = 2
_OPEN_EXISTING = 3
_OPEN_ALWAYS = 4
_GENERIC_READ = 0x80000000
_GENERIC_WRITE = 0x40000000
_FILE_ATTRIBUTE_NORMAL = 0x80
# open_osfhandle flags
_O_RDONLY = 0x0000
_O_RDWR = 0x0002
_O_APPEND = 0x0008
_O_TEXT = 0x4000
_O_BINARY = 0x8000
# types of parameters of C functions used (required by pypy)
_kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
_DWORD, _DWORD, _HANDLE]
_kernel32.CreateFileA.restype = _HANDLE
def _raiseioerror(name):
err = ctypes.WinError()
raise IOError(err.errno, '%s: %s' % (name, err.strerror))
class posixfile(object):
'''a file object aiming for POSIX-like semantics
CPython's open() returns a file that was opened *without* setting the
_FILE_SHARE_DELETE flag, which causes rename and unlink to abort.
This even happens if any hardlinked copy of the file is in open state.
We set _FILE_SHARE_DELETE here, so files opened with posixfile can be
renamed and deleted while they are held open.
Note that if a file opened with posixfile is unlinked, the file
remains but cannot be opened again or be recreated under the same name,
until all reading processes have closed the file.'''
def __init__(self, name, mode='r', bufsize=-1):
if 'b' in mode:
flags = _O_BINARY
else:
flags = _O_TEXT
m0 = mode[0]
if m0 == 'r' and '+' not in mode:
flags |= _O_RDONLY
access = _GENERIC_READ
else:
# work around http://support.microsoft.com/kb/899149 and
# set _O_RDWR for 'w' and 'a', even if mode has no '+'
flags |= _O_RDWR
access = _GENERIC_READ | _GENERIC_WRITE
if m0 == 'r':
creation = _OPEN_EXISTING
elif m0 == 'w':
creation = _CREATE_ALWAYS
elif m0 == 'a':
creation = _OPEN_ALWAYS
flags |= _O_APPEND
else:
raise ValueError("invalid mode: %s" % mode)
fh = _kernel32.CreateFileA(name, access,
_FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
None, creation, _FILE_ATTRIBUTE_NORMAL, None)
if fh == _INVALID_HANDLE_VALUE:
_raiseioerror(name)
fd = msvcrt.open_osfhandle(fh, flags)
if fd == -1:
_kernel32.CloseHandle(fh)
_raiseioerror(name)
f = os.fdopen(fd, mode, bufsize)
# unfortunately, f.name is '<fdopen>' at this point -- so we store
# the name on this wrapper. We cannot just assign to f.name,
# because that attribute is read-only.
object.__setattr__(self, 'name', name)
object.__setattr__(self, '_file', f)
def __iter__(self):
return self._file
def __getattr__(self, name):
return getattr(self._file, name)
def __setattr__(self, name, value):
'''mimics the read-only attributes of Python file objects
by raising 'TypeError: readonly attribute' if someone tries:
f = posixfile('foo.txt')
f.name = 'bla' '''
return self._file.__setattr__(name, value)
|
apache-2.0
| -2,737,856,423,673,754,000 | 1,523,718,707,919,300,000 | 30.547059 | 79 | 0.573187 | false |
mcepl/rope
|
rope/contrib/generate.py
|
3
|
14402
|
import rope.base.evaluate
from rope.base import libutils
from rope.base import (change, pyobjects, exceptions, pynames, worder,
codeanalyze)
from rope.refactor import sourceutils, importutils, functionutils, suites
def create_generate(kind, project, resource, offset, goal_resource=None):
"""A factory for creating `Generate` objects
`kind` can be 'variable', 'function', 'class', 'module' or
'package'.
"""
generate = eval('Generate' + kind.title())
return generate(project, resource, offset, goal_resource=goal_resource)
def create_module(project, name, sourcefolder=None):
"""Creates a module and returns a `rope.base.resources.File`"""
if sourcefolder is None:
sourcefolder = project.root
packages = name.split('.')
parent = sourcefolder
for package in packages[:-1]:
parent = parent.get_child(package)
return parent.create_file(packages[-1] + '.py')
def create_package(project, name, sourcefolder=None):
"""Creates a package and returns a `rope.base.resources.Folder`"""
if sourcefolder is None:
sourcefolder = project.root
packages = name.split('.')
parent = sourcefolder
for package in packages[:-1]:
parent = parent.get_child(package)
made_packages = parent.create_folder(packages[-1])
made_packages.create_file('__init__.py')
return made_packages
class _Generate(object):
def __init__(self, project, resource, offset, goal_resource=None):
self.project = project
self.resource = resource
self.goal_resource = goal_resource
self.info = self._generate_info(project, resource, offset)
self.name = self.info.get_name()
self._check_exceptional_conditions()
def _generate_info(self, project, resource, offset):
return _GenerationInfo(project.pycore, resource, offset, self.goal_resource)
def _check_exceptional_conditions(self):
if self.info.element_already_exists():
raise exceptions.RefactoringError(
'Element <%s> already exists.' % self.name)
if not self.info.primary_is_found():
raise exceptions.RefactoringError(
'Cannot determine the scope <%s> should be defined in.' %
self.name)
def get_changes(self):
changes = change.ChangeSet('Generate %s <%s>' %
(self._get_element_kind(), self.name))
indents = self.info.get_scope_indents()
blanks = self.info.get_blank_lines()
base_definition = sourceutils.fix_indentation(self._get_element(),
indents)
definition = '\n' * blanks[0] + base_definition + '\n' * blanks[1]
resource = self.info.get_insertion_resource()
start, end = self.info.get_insertion_offsets()
collector = codeanalyze.ChangeCollector(resource.read())
collector.add_change(start, end, definition)
changes.add_change(change.ChangeContents(
resource, collector.get_changed()))
if self.goal_resource:
relative_import = _add_relative_import_to_module(self.project, self.resource, self.goal_resource, self.name)
changes.add_change(relative_import)
return changes
def get_location(self):
return (self.info.get_insertion_resource(),
self.info.get_insertion_lineno())
def _get_element_kind(self):
raise NotImplementedError()
def _get_element(self):
raise NotImplementedError()
class GenerateFunction(_Generate):
def _generate_info(self, project, resource, offset):
return _FunctionGenerationInfo(project.pycore, resource, offset)
def _get_element(self):
decorator = ''
args = []
if self.info.is_static_method():
decorator = '@staticmethod\n'
if self.info.is_method() or self.info.is_constructor() or \
self.info.is_instance():
args.append('self')
args.extend(self.info.get_passed_args())
definition = '%sdef %s(%s):\n pass\n' % (decorator, self.name,
', '.join(args))
return definition
def _get_element_kind(self):
return 'Function'
class GenerateVariable(_Generate):
def _get_element(self):
return '%s = None\n' % self.name
def _get_element_kind(self):
return 'Variable'
class GenerateClass(_Generate):
def _get_element(self):
return 'class %s(object):\n pass\n' % self.name
def _get_element_kind(self):
return 'Class'
class GenerateModule(_Generate):
def get_changes(self):
package = self.info.get_package()
changes = change.ChangeSet('Generate Module <%s>' % self.name)
new_resource = self.project.get_file('%s/%s.py' %
(package.path, self.name))
if new_resource.exists():
raise exceptions.RefactoringError(
'Module <%s> already exists' % new_resource.path)
changes.add_change(change.CreateResource(new_resource))
changes.add_change(_add_import_to_module(
self.project, self.resource, new_resource))
return changes
def get_location(self):
package = self.info.get_package()
return (package.get_child('%s.py' % self.name), 1)
class GeneratePackage(_Generate):
def get_changes(self):
package = self.info.get_package()
changes = change.ChangeSet('Generate Package <%s>' % self.name)
new_resource = self.project.get_folder('%s/%s' %
(package.path, self.name))
if new_resource.exists():
raise exceptions.RefactoringError(
'Package <%s> already exists' % new_resource.path)
changes.add_change(change.CreateResource(new_resource))
changes.add_change(_add_import_to_module(
self.project, self.resource, new_resource))
child = self.project.get_folder(package.path + '/' + self.name)
changes.add_change(change.CreateFile(child, '__init__.py'))
return changes
def get_location(self):
package = self.info.get_package()
child = package.get_child(self.name)
return (child.get_child('__init__.py'), 1)
def _add_import_to_module(project, resource, imported):
pymodule = project.get_pymodule(resource)
import_tools = importutils.ImportTools(project)
module_imports = import_tools.module_imports(pymodule)
module_name = libutils.modname(imported)
new_import = importutils.NormalImport(((module_name, None), ))
module_imports.add_import(new_import)
return change.ChangeContents(resource, module_imports.get_changed_source())
def _add_relative_import_to_module(project, resource, imported, name):
pymodule = project.get_pymodule(resource)
import_tools = importutils.ImportTools(project)
module_imports = import_tools.module_imports(pymodule)
new_import = import_tools.get_from_import(imported, name)
module_imports.add_import(new_import)
return change.ChangeContents(resource, module_imports.get_changed_source())
class _GenerationInfo(object):
def __init__(self, pycore, resource, offset, goal_resource=None):
self.pycore = pycore
self.resource = resource
self.offset = offset
self.goal_resource = goal_resource
self.source_pymodule = self.pycore.project.get_pymodule(resource)
finder = rope.base.evaluate.ScopeNameFinder(self.source_pymodule)
self.primary, self.pyname = finder.get_primary_and_pyname_at(offset)
self._init_fields()
def _init_fields(self):
self.source_scope = self._get_source_scope()
self.goal_scope = self._get_goal_scope()
self.goal_pymodule = self._get_goal_module(self.goal_scope)
def _get_goal_scope(self):
if self.primary is None:
if self.goal_resource:
return self.pycore.project.get_pymodule(self.goal_resource).get_scope()
else:
return self._get_source_scope()
pyobject = self.primary.get_object()
if isinstance(pyobject, pyobjects.PyDefinedObject):
return pyobject.get_scope()
elif isinstance(pyobject.get_type(), pyobjects.PyClass):
return pyobject.get_type().get_scope()
def _get_goal_module(self, scope):
if scope is None:
return
while scope.parent is not None:
scope = scope.parent
return scope.pyobject
def _get_source_scope(self):
module_scope = self.source_pymodule.get_scope()
lineno = self.source_pymodule.lines.get_line_number(self.offset)
return module_scope.get_inner_scope_for_line(lineno)
def get_insertion_lineno(self):
lines = self.goal_pymodule.lines
if self.goal_scope == self.source_scope:
line_finder = self.goal_pymodule.logical_lines
lineno = lines.get_line_number(self.offset)
lineno = line_finder.logical_line_in(lineno)[0]
root = suites.ast_suite_tree(self.goal_scope.pyobject.get_ast())
suite = root.find_suite(lineno)
indents = sourceutils.get_indents(lines, lineno)
while self.get_scope_indents() < indents:
lineno = suite.get_start()
indents = sourceutils.get_indents(lines, lineno)
suite = suite.parent
return lineno
else:
return min(self.goal_scope.get_end() + 1, lines.length())
def get_insertion_resource(self):
return self.goal_pymodule.get_resource()
def get_insertion_offsets(self):
if self.goal_scope.get_kind() == 'Class':
start, end = sourceutils.get_body_region(self.goal_scope.pyobject)
if self.goal_pymodule.source_code[start:end].strip() == 'pass':
return start, end
lines = self.goal_pymodule.lines
start = lines.get_line_start(self.get_insertion_lineno())
return (start, start)
def get_scope_indents(self):
if self.goal_scope.get_kind() == 'Module':
return 0
return sourceutils.get_indents(self.goal_pymodule.lines,
self.goal_scope.get_start()) + 4
def get_blank_lines(self):
if self.goal_scope.get_kind() == 'Module':
base_blanks = 2
if self.goal_pymodule.source_code.strip() == '':
base_blanks = 0
if self.goal_scope.get_kind() == 'Class':
base_blanks = 1
if self.goal_scope.get_kind() == 'Function':
base_blanks = 0
if self.goal_scope == self.source_scope:
return (0, base_blanks)
return (base_blanks, 0)
def get_package(self):
primary = self.primary
if self.primary is None:
return self.pycore.project.get_source_folders()[0]
if isinstance(primary.get_object(), pyobjects.PyPackage):
return primary.get_object().get_resource()
raise exceptions.RefactoringError(
'A module/package can be only created in a package.')
def primary_is_found(self):
return self.goal_scope is not None
def element_already_exists(self):
if self.pyname is None or isinstance(self.pyname, pynames.UnboundName):
return False
return self.get_name() in self.goal_scope.get_defined_names()
def get_name(self):
return worder.get_name_at(self.resource, self.offset)
class _FunctionGenerationInfo(_GenerationInfo):
def _get_goal_scope(self):
if self.is_constructor():
return self.pyname.get_object().get_scope()
if self.is_instance():
return self.pyname.get_object().get_type().get_scope()
if self.primary is None:
return self._get_source_scope()
pyobject = self.primary.get_object()
if isinstance(pyobject, pyobjects.PyDefinedObject):
return pyobject.get_scope()
elif isinstance(pyobject.get_type(), pyobjects.PyClass):
return pyobject.get_type().get_scope()
def element_already_exists(self):
if self.pyname is None or isinstance(self.pyname, pynames.UnboundName):
return False
return self.get_name() in self.goal_scope.get_defined_names()
def is_static_method(self):
return self.primary is not None and \
isinstance(self.primary.get_object(), pyobjects.PyClass)
def is_method(self):
return self.primary is not None and \
isinstance(self.primary.get_object().get_type(), pyobjects.PyClass)
def is_constructor(self):
return self.pyname is not None and \
isinstance(self.pyname.get_object(), pyobjects.PyClass)
def is_instance(self):
if self.pyname is None:
return False
pyobject = self.pyname.get_object()
return isinstance(pyobject.get_type(), pyobjects.PyClass)
def get_name(self):
if self.is_constructor():
return '__init__'
if self.is_instance():
return '__call__'
return worder.get_name_at(self.resource, self.offset)
def get_passed_args(self):
result = []
source = self.source_pymodule.source_code
finder = worder.Worder(source)
if finder.is_a_function_being_called(self.offset):
start, end = finder.get_primary_range(self.offset)
parens_start, parens_end = finder.get_word_parens_range(end - 1)
call = source[start:parens_end]
parser = functionutils._FunctionParser(call, False)
args, keywords = parser.get_parameters()
for arg in args:
if self._is_id(arg):
result.append(arg)
else:
result.append('arg%d' % len(result))
for name, value in keywords:
result.append(name)
return result
def _is_id(self, arg):
def id_or_underline(c):
return c.isalpha() or c == '_'
for c in arg:
if not id_or_underline(c) and not c.isdigit():
return False
return id_or_underline(arg[0])
|
lgpl-3.0
| 9,086,445,505,280,848,000 | 7,520,959,147,018,621,000 | 37 | 120 | 0.611512 | false |
sigma-random/androguard
|
androguard/gui/treewindow.py
|
8
|
4341
|
from PySide import QtCore, QtGui
from androguard.core import androconf
from androguard.gui.xrefwindow import XrefDialog
from androguard.gui.sourcewindow import SourceWindow
from androguard.gui.helpers import classdot2class, Signature
class TreeWindow(QtGui.QTreeWidget):
'''TODO
'''
def __init__(self, parent=None, win=None):
super(TreeWindow, self).__init__(parent)
self.itemDoubleClicked.connect(self.itemDoubleClickedHandler)
self.mainwin = win
self.createActions()
self.header().close()
def fill(self, classes):
'''Parse all the paths (['Lcom/sogeti/example/myclass/MyActivity$1;', ...])
and build a tree using the QTreeWidgetItem insertion method.'''
root_path_node = ({}, self)
for c in sorted(classes, key=lambda c: c.name):
sig = Signature(c)
path_node = root_path_node
# Namespaces
for path in sig.class_path:
if path not in path_node[0]:
path_node[0][path] = ({},
QtGui.QTreeWidgetItem(path_node[1]))
path_node[0][path][1].setText(0, path)
path_node = path_node[0][path]
# Class
path_node[0][path] = ({},
QtGui.QTreeWidgetItem(path_node[1]))
path_node[0][path][1].setText(0, sig.class_name)
def item2path(self, item, column=0):
'''Browse all parents from QTreeWidgetItem item
in order to rebuild the complete path
Return both complete path (ex: "Landroid/support/AccessibilityServiceInfoCompat$1;")
and path_elts (ex: [u'Landroid', u'support', u'AccessibilityServiceInfoCompat$1;'])
'''
path_elts = []
while item is not None:
# print item.text(column)
path_elts.append(item.text(column))
item = item.parent()
path_elts.reverse()
path = ".".join(path_elts)
path = classdot2class(path)
return path, path_elts
def itemDoubleClickedHandler(self, item, column):
'''Signal sent by PySide when a tree element is clicked'''
# print "item %s has been double clicked at column %s" % (str(item), str(column))
path, path_elts = self.item2path(item)
if item.childCount() != 0:
self.mainwin.showStatus("Sources not available. %s is not a class" % path)
return
self.mainwin.openSourceWindow(path)
def createActions(self):
self.xrefAct = QtGui.QAction("Xref from...", self,
# shortcut=QtGui.QKeySequence("CTRL+B"),
statusTip="List the references where this element is used",
triggered=self.actionXref)
self.expandAct = QtGui.QAction("Expand...", self,
statusTip="Expand all the subtrees",
triggered=self.actionExpand)
self.collapseAct = QtGui.QAction("Collapse...", self,
statusTip="Collapse all the subtrees",
triggered=self.actionCollapse)
def actionXref(self):
item = self.currentItem()
path, path_elts = self.item2path(item)
if item.childCount() != 0:
self.mainwin.showStatus("Xref not available. %s is not a class" % path)
return
xrefs_list = XrefDialog.get_xrefs_list(self.mainwin.d, path=path)
if not xrefs_list:
self.mainwin.showStatus("No xref returned.")
return
xwin = XrefDialog(parent=self.mainwin, win=self.mainwin, xrefs_list=xrefs_list, path=path)
xwin.show()
def expand_children(self, item):
self.expandItem(item)
for i in range(item.childCount()):
self.expand_children(item.child(i))
def actionExpand(self):
self.expand_children(self.currentItem())
def collapse_children(self, item):
for i in range(item.childCount()):
self.collapse_children(item.child(i))
self.collapseItem(item)
def actionCollapse(self):
self.collapse_children(self.currentItem())
def contextMenuEvent(self, event):
menu = QtGui.QMenu(self)
menu.addAction(self.xrefAct)
menu.addAction(self.expandAct)
menu.addAction(self.collapseAct)
menu.exec_(event.globalPos())
|
apache-2.0
| -6,763,936,228,554,617,000 | 6,385,670,124,560,820,000 | 36.422414 | 98 | 0.600323 | false |
bardes/sonitus
|
tools/tone_gen.py
|
1
|
1271
|
#!/usr/bin/env python
from sys import argv, stderr
usage = \
"""
Usage: {program} <sample rate> <A4 freq.> [octaves=8]
e.g.: {program} 64000 442.0 5
""".format(program=argv[0])
if len(argv) < 3 or len(argv) > 4 :
print(usage, file = stderr)
exit(1)
A4 = 0
sample_rate = 0
octaves = 8
try:
A4 = float(argv[2])
except:
print("Error, invalid argument: Freq. must be a number!", file = stderr)
print(usage, file = stderr)
exit(1)
try:
sample_rate = int(argv[1])
except:
print("Error, invalid argument: Sample rate must be an integer!", \
file = stderr)
print(usage, file = stderr)
exit(1)
if len(argv) == 4 :
try:
octaves = int(argv[3])
except:
print("Error, invalid argument: Octaves must be an integer!", \
file = stderr)
print(usage, file = stderr)
exit(1)
freq_ratio = 2**(1/12)
base_freq = A4/(freq_ratio**57)
periods = [round(sample_rate/(2*base_freq*freq_ratio**t)) \
for t in range(0, 12*octaves)]
print("uint16_t tone_periods[{ntones}] = {{".format(ntones=12*octaves))
for o in range(0, octaves):
print('\t', end='')
for i in range(0, 12):
print("{period}, ".format(period=periods[12*o+i]), end='')
print('')
print("};")
|
mit
| -7,184,574,555,356,638,000 | 6,575,666,410,553,076,000 | 22.981132 | 76 | 0.585366 | false |
yewang15215/django
|
tests/template_tests/filter_tests/test_phone2numeric.py
|
176
|
1500
|
from django.template.defaultfilters import phone2numeric_filter
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class Phone2numericTests(SimpleTestCase):
@setup({'phone2numeric01': '{{ a|phone2numeric }} {{ b|phone2numeric }}'})
def test_phone2numeric01(self):
output = self.engine.render_to_string(
'phone2numeric01',
{'a': '<1-800-call-me>', 'b': mark_safe('<1-800-call-me>')},
)
self.assertEqual(output, '<1-800-2255-63> <1-800-2255-63>')
@setup({'phone2numeric02': '{% autoescape off %}{{ a|phone2numeric }} {{ b|phone2numeric }}{% endautoescape %}'})
def test_phone2numeric02(self):
output = self.engine.render_to_string(
'phone2numeric02',
{'a': '<1-800-call-me>', 'b': mark_safe('<1-800-call-me>')},
)
self.assertEqual(output, '<1-800-2255-63> <1-800-2255-63>')
@setup({'phone2numeric03': '{{ a|phone2numeric }}'})
def test_phone2numeric03(self):
output = self.engine.render_to_string(
'phone2numeric03',
{'a': 'How razorback-jumping frogs can level six piqued gymnasts!'},
)
self.assertEqual(
output,
'469 729672225-5867464 37647 226 53835 749 747833 49662787!'
)
class FunctionTests(SimpleTestCase):
def test_phone2numeric(self):
self.assertEqual(phone2numeric_filter('0800 flowers'), '0800 3569377')
|
bsd-3-clause
| 5,683,672,324,663,246,000 | -1,798,925,157,511,918,600 | 35.585366 | 117 | 0.622667 | false |
ellipsys/discover
|
misc/python/ex2.py
|
6
|
1050
|
#!/usr/bin/env python
import os
from xml.dom.minidom import parse
import xml.dom.minidom
os.system('clear')
# Open XML document using minidom parser
DOMTree = xml.dom.minidom.parse('movies.xml')
collection = DOMTree.documentElement
if collection.hasAttribute('shelf'):
print '\n\nRoot element: %s\n' % collection.getAttribute('shelf')
# Get all the movies in the collection
movies = collection.getElementsByTagName('movie')
# Print details of each movie.
for movie in movies:
print '***** Movie *****'
if movie.hasAttribute('title'):
print 'Title: %s' % movie.getAttribute('title')
type = movie.getElementsByTagName('type')[0]
print 'Type: %s' % type.childNodes[0].data
format = movie.getElementsByTagName('format')[0]
print 'Format: %s' % format.childNodes[0].data
rating = movie.getElementsByTagName('rating')[0]
print 'Rating: %s' % rating.childNodes[0].data
description = movie.getElementsByTagName('description')[0]
print 'Description: %s' % description.childNodes[0].data
|
bsd-3-clause
| -7,238,730,998,782,013,000 | -1,075,356,323,995,715,200 | 31.8125 | 70 | 0.69619 | false |
cihai/cihai
|
cihai/exc.py
|
1
|
2127
|
"""Exceptions raised from the Cihai library."""
class CihaiException(Exception):
"""Base Cihai Exception class."""
class ImportStringError(ImportError, CihaiException):
"""
Provides information about a failed :func:`import_string` attempt.
Notes
-----
This is from werkzeug.utils c769200 on May 23, LICENSE BSD.
https://github.com/pallets/werkzeug
Changes:
- Deferred load import import_string from cihai.util
- Format with black
"""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
from .utils import import_string
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s'
)
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, '__file__', None)))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (
import_name,
'\n'.join(track),
exception.__class__.__name__,
str(exception),
)
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (
self.__class__.__name__,
self.import_name,
self.exception,
)
|
mit
| 8,468,972,772,098,142,000 | 5,710,198,840,043,477,000 | 29.826087 | 75 | 0.520451 | false |
FireWRT/OpenWrt-Firefly-Libraries
|
staging_dir/host/lib/python3.4/test/test_imghdr.py
|
87
|
4413
|
import imghdr
import io
import os
import unittest
import warnings
from test.support import findfile, TESTFN, unlink
TEST_FILES = (
('python.png', 'png'),
('python.gif', 'gif'),
('python.bmp', 'bmp'),
('python.ppm', 'ppm'),
('python.pgm', 'pgm'),
('python.pbm', 'pbm'),
('python.jpg', 'jpeg'),
('python.ras', 'rast'),
('python.sgi', 'rgb'),
('python.tiff', 'tiff'),
('python.xbm', 'xbm')
)
class UnseekableIO(io.FileIO):
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args, **kwargs):
raise io.UnsupportedOperation
class TestImghdr(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testfile = findfile('python.png', subdir='imghdrdata')
with open(cls.testfile, 'rb') as stream:
cls.testdata = stream.read()
def tearDown(self):
unlink(TESTFN)
def test_data(self):
for filename, expected in TEST_FILES:
filename = findfile(filename, subdir='imghdrdata')
self.assertEqual(imghdr.what(filename), expected)
with open(filename, 'rb') as stream:
self.assertEqual(imghdr.what(stream), expected)
with open(filename, 'rb') as stream:
data = stream.read()
self.assertEqual(imghdr.what(None, data), expected)
self.assertEqual(imghdr.what(None, bytearray(data)), expected)
def test_register_test(self):
def test_jumbo(h, file):
if h.startswith(b'eggs'):
return 'ham'
imghdr.tests.append(test_jumbo)
self.addCleanup(imghdr.tests.pop)
self.assertEqual(imghdr.what(None, b'eggs'), 'ham')
def test_file_pos(self):
with open(TESTFN, 'wb') as stream:
stream.write(b'ababagalamaga')
pos = stream.tell()
stream.write(self.testdata)
with open(TESTFN, 'rb') as stream:
stream.seek(pos)
self.assertEqual(imghdr.what(stream), 'png')
self.assertEqual(stream.tell(), pos)
def test_bad_args(self):
with self.assertRaises(TypeError):
imghdr.what()
with self.assertRaises(AttributeError):
imghdr.what(None)
with self.assertRaises(TypeError):
imghdr.what(self.testfile, 1)
with self.assertRaises(AttributeError):
imghdr.what(os.fsencode(self.testfile))
with open(self.testfile, 'rb') as f:
with self.assertRaises(AttributeError):
imghdr.what(f.fileno())
def test_invalid_headers(self):
for header in (b'\211PN\r\n',
b'\001\331',
b'\x59\xA6',
b'cutecat',
b'000000JFI',
b'GIF80'):
self.assertIsNone(imghdr.what(None, header))
def test_string_data(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
for filename, _ in TEST_FILES:
filename = findfile(filename, subdir='imghdrdata')
with open(filename, 'rb') as stream:
data = stream.read().decode('latin1')
with self.assertRaises(TypeError):
imghdr.what(io.StringIO(data))
with self.assertRaises(TypeError):
imghdr.what(None, data)
def test_missing_file(self):
with self.assertRaises(FileNotFoundError):
imghdr.what('missing')
def test_closed_file(self):
stream = open(self.testfile, 'rb')
stream.close()
with self.assertRaises(ValueError) as cm:
imghdr.what(stream)
stream = io.BytesIO(self.testdata)
stream.close()
with self.assertRaises(ValueError) as cm:
imghdr.what(stream)
def test_unseekable(self):
with open(TESTFN, 'wb') as stream:
stream.write(self.testdata)
with UnseekableIO(TESTFN, 'rb') as stream:
with self.assertRaises(io.UnsupportedOperation):
imghdr.what(stream)
def test_output_stream(self):
with open(TESTFN, 'wb') as stream:
stream.write(self.testdata)
stream.seek(0)
with self.assertRaises(OSError) as cm:
imghdr.what(stream)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| -9,043,006,850,835,653,000 | 7,075,678,250,423,120,000 | 32.687023 | 74 | 0.568094 | false |
sam-tsai/django
|
tests/gis_tests/gis_migrations/migrations/0001_initial.py
|
269
|
2465
|
from django.db import connection, migrations, models
from ...models import models as gis_models
ops = [
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', gis_models.MultiPolygonField(srid=4326)),
],
options={
'required_db_features': ['gis_enabled'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Household',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('neighborhood', models.ForeignKey(
'gis_migrations.Neighborhood',
models.SET_NULL,
to_field='id',
null=True,
)),
('address', models.CharField(max_length=100)),
('zip_code', models.IntegerField(null=True, blank=True)),
('geom', gis_models.PointField(srid=4326, geography=True)),
],
options={
'required_db_features': ['gis_enabled'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Family',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='household',
name='family',
field=models.ForeignKey('gis_migrations.Family', models.SET_NULL, blank=True, null=True),
preserve_default=True,
)
]
if connection.features.gis_enabled and connection.features.supports_raster:
ops += [
migrations.CreateModel(
name='Heatmap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('rast', gis_models.fields.RasterField(srid=4326)),
],
options={
},
bases=(models.Model,),
),
]
class Migration(migrations.Migration):
"""
Used for gis-specific migration tests.
"""
operations = ops
|
bsd-3-clause
| -8,762,440,869,285,799,000 | 3,557,220,512,622,486,500 | 31.866667 | 114 | 0.55213 | false |
centrofermi/e3pipe
|
tracking/E3FittingTool2dUnweighted.py
|
1
|
2330
|
#!/usr/bin/env python
# *********************************************************************
# * Copyright (C) 2015 Luca Baldini ([email protected]) *
# * *
# * For the license terms see the file LICENSE, distributed *
# * along with this software. *
# *********************************************************************
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from e3pipe.tracking.E3FittingTool2d import E3FittingTool2d
from e3pipe.tracking.E3Point import E3Point
class E3FittingTool2dUnweighted(E3FittingTool2d):
""" Simple two-dimensional track-fitting tool.
"""
def __init__(self):
""" Constructor.
"""
E3FittingTool2d.__init__(self, weighted = False)
def test():
"""
"""
fitTool = E3FittingTool2dUnweighted()
# Real event from FRAS-02-2014-10-30-00018_dst.root (11878)
# Processed with e3pipe 2.1.0 gives:
# root [4] Events.Scan("XDir:YDir:ZDir:ChiSquare", "EventNumber==11878")
# ************************************************************
# * Row * XDir * YDir * ZDir * ChiSquare *
# ************************************************************
# * 11878 * -0.050563 * 0.1976770 * 0.9789620 * 1.6044100 *
# ************************************************************
hits = [E3Point(79.229, 38.400, 0.000),
E3Point(82.742, 32.000, 40.000),
E3Point(83.922, 22.400, 80.000)
]
fitTool.run(hits)
print fitTool.track()
if __name__ == '__main__':
test()
|
gpl-3.0
| -7,152,233,829,042,086,000 | 5,982,372,838,723,814,000 | 36.580645 | 76 | 0.530472 | false |
v-iam/azure-sdk-for-python
|
azure-mgmt-cognitiveservices/azure/mgmt/cognitiveservices/models/__init__.py
|
4
|
2416
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sku import Sku
from .cognitive_services_account_create_parameters import CognitiveServicesAccountCreateParameters
from .cognitive_services_account_update_parameters import CognitiveServicesAccountUpdateParameters
from .cognitive_services_account import CognitiveServicesAccount
from .cognitive_services_account_keys import CognitiveServicesAccountKeys
from .regenerate_key_parameters import RegenerateKeyParameters
from .cognitive_services_resource_and_sku import CognitiveServicesResourceAndSku
from .cognitive_services_account_enumerate_skus_result import CognitiveServicesAccountEnumerateSkusResult
from .error_body import ErrorBody
from .error import Error, ErrorException
from .operation_display_info import OperationDisplayInfo
from .operation_entity import OperationEntity
from .check_sku_availability_parameter import CheckSkuAvailabilityParameter
from .check_sku_availability_result import CheckSkuAvailabilityResult
from .check_sku_availability_result_list import CheckSkuAvailabilityResultList
from .cognitive_services_account_paged import CognitiveServicesAccountPaged
from .operation_entity_paged import OperationEntityPaged
from .cognitive_services_management_client_enums import (
SkuName,
SkuTier,
Kind,
ProvisioningState,
KeyName,
)
__all__ = [
'Sku',
'CognitiveServicesAccountCreateParameters',
'CognitiveServicesAccountUpdateParameters',
'CognitiveServicesAccount',
'CognitiveServicesAccountKeys',
'RegenerateKeyParameters',
'CognitiveServicesResourceAndSku',
'CognitiveServicesAccountEnumerateSkusResult',
'ErrorBody',
'Error', 'ErrorException',
'OperationDisplayInfo',
'OperationEntity',
'CheckSkuAvailabilityParameter',
'CheckSkuAvailabilityResult',
'CheckSkuAvailabilityResultList',
'CognitiveServicesAccountPaged',
'OperationEntityPaged',
'SkuName',
'SkuTier',
'Kind',
'ProvisioningState',
'KeyName',
]
|
mit
| -7,882,903,869,621,196,000 | -3,551,408,065,979,264,000 | 39.266667 | 105 | 0.754967 | false |
khalibartan/Antidote-DM
|
Antidotes DM/youtube_dl/extractor/ro220.py
|
176
|
1451
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class Ro220IE(InfoExtractor):
IE_NAME = '220.ro'
_VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/',
'md5': '03af18b73a07b4088753930db7a34add',
'info_dict': {
'id': 'LYV6doKo7f',
'ext': 'mp4',
'title': 'Luati-le Banii sez 4 ep 1',
'description': 're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
url = compat_urllib_parse_unquote(self._search_regex(
r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url'))
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
formats = [{
'format_id': 'sd',
'url': url,
'ext': 'mp4',
}]
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
|
gpl-2.0
| -9,174,996,382,507,130,000 | 4,237,017,690,069,613,600 | 32.744186 | 132 | 0.529979 | false |
olasitarska/django
|
django/db/models/sql/subqueries.py
|
15
|
10496
|
"""
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.query_utils import Q
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import DateField, DateTimeField, FieldDoesNotExist
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE, NO_RESULTS, SelectInfo
from django.db.models.sql.datastructures import Date, DateTime
from django.db.models.sql.query import Query
from django.utils import six
from django.utils import timezone
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
'DateTimeQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
self.get_compiler(using).execute_sql(NO_RESULTS)
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
if not field:
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
self.do_query(self.get_meta().db_table, self.where, using=using)
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if ((not innerq_used_tables or innerq_used_tables == self.tables)
and not len(innerq.having)):
# There is only the base table in use in the query, and there is
# no aggregate filtering going on.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return
self.delete_batch(values, using)
return
else:
innerq.clear_select_clause()
innerq.select = [
SelectInfo((self.get_initial_alias(), pk.column), None)
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
self.get_compiler(using).execute_sql(NO_RESULTS)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field, model, direct, m2m = self.get_meta().get_field_by_name(name)
if not direct or m2m:
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
class DateQuery(Query):
"""
A DateQuery is a normal query, except that it specifically selects a single
date field. This requires some special handling when converting the results
back to Python objects, so we put it in a separate class.
"""
compiler = 'SQLDateCompiler'
def add_select(self, field_name, lookup_type, order='ASC'):
"""
Converts the query into an extraction query.
"""
try:
field, _, _, joins, _ = self.setup_joins(
field_name.split(LOOKUP_SEP),
self.get_meta(),
self.get_initial_alias(),
)
except FieldError:
raise FieldDoesNotExist("%s has no field named '%s'" % (
self.get_meta().object_name, field_name
))
self._check_field(field) # overridden in DateTimeQuery
alias = joins[-1]
select = self._get_select((alias, field.column), lookup_type)
self.clear_select_clause()
self.select = [SelectInfo(select, None)]
self.distinct = True
self.order_by = [1] if order == 'ASC' else [-1]
if field.null:
self.add_filter(("%s__isnull" % field_name, False))
def _check_field(self, field):
assert isinstance(field, DateField), \
"%r isn't a DateField." % field.name
if settings.USE_TZ:
assert not isinstance(field, DateTimeField), \
"%r is a DateTimeField, not a DateField." % field.name
def _get_select(self, col, lookup_type):
return Date(col, lookup_type)
class DateTimeQuery(DateQuery):
"""
A DateTimeQuery is like a DateQuery but for a datetime field. If time zone
support is active, the tzinfo attribute contains the time zone to use for
converting the values before truncating them. Otherwise it's set to None.
"""
compiler = 'SQLDateTimeCompiler'
def clone(self, klass=None, memo=None, **kwargs):
if 'tzinfo' not in kwargs and hasattr(self, 'tzinfo'):
kwargs['tzinfo'] = self.tzinfo
return super(DateTimeQuery, self).clone(klass, memo, **kwargs)
def _check_field(self, field):
assert isinstance(field, DateTimeField), \
"%r isn't a DateTimeField." % field.name
def _get_select(self, col, lookup_type):
if self.tzinfo is None:
tzname = None
else:
tzname = timezone._get_timezone_name(self.tzinfo)
return DateTime(col, lookup_type, tzname)
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
|
bsd-3-clause
| -5,816,039,509,415,508,000 | 3,873,059,823,380,761,600 | 35.571429 | 96 | 0.600896 | false |
Edraak/edraak-platform
|
cms/djangoapps/contentstore/tests/test_course_create_rerun.py
|
14
|
7173
|
"""
Test view handler for rerun (and eventually create)
"""
import datetime
import ddt
from django.urls import reverse
from django.test.client import RequestFactory
from mock import patch
from opaque_keys.edx.keys import CourseKey
from contentstore.tests.utils import AjaxEnabledTestClient, parse_json
from student.roles import CourseInstructorRole, CourseStaffRole
from student.tests.factories import UserFactory
from util.organizations_helpers import add_organization, get_course_organizations
from xmodule.course_module import CourseFields
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class TestCourseListing(ModuleStoreTestCase):
"""
Unit tests for getting the list of courses for a logged in user
"""
def setUp(self):
"""
Add a user and a course
"""
super(TestCourseListing, self).setUp()
# create and log in a staff user.
# create and log in a non-staff user
self.user = UserFactory()
self.factory = RequestFactory()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password='test')
self.course_create_rerun_url = reverse('course_handler')
self.course_start = datetime.datetime.utcnow()
self.course_end = self.course_start + datetime.timedelta(days=30)
self.enrollment_start = self.course_start - datetime.timedelta(days=7)
self.enrollment_end = self.course_end - datetime.timedelta(days=14)
source_course = CourseFactory.create(
org='origin',
number='the_beginning',
run='first',
display_name='the one and only',
start=self.course_start,
end=self.course_end,
enrollment_start=self.enrollment_start,
enrollment_end=self.enrollment_end
)
self.source_course_key = source_course.id
for role in [CourseInstructorRole, CourseStaffRole]:
role(self.source_course_key).add_users(self.user)
def tearDown(self):
"""
Reverse the setup
"""
self.client.logout()
ModuleStoreTestCase.tearDown(self)
def test_rerun(self):
"""
Just testing the functionality the view handler adds over the tasks tested in test_clone_course
"""
response = self.client.ajax_post(self.course_create_rerun_url, {
'source_course_key': unicode(self.source_course_key),
'org': self.source_course_key.org, 'course': self.source_course_key.course, 'run': 'copy',
'display_name': 'not the same old name',
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
dest_course_key = CourseKey.from_string(data['destination_course_key'])
self.assertEqual(dest_course_key.run, 'copy')
source_course = self.store.get_course(self.source_course_key)
dest_course = self.store.get_course(dest_course_key)
self.assertEqual(dest_course.start, CourseFields.start.default)
self.assertEqual(dest_course.end, source_course.end)
self.assertEqual(dest_course.enrollment_start, None)
self.assertEqual(dest_course.enrollment_end, None)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_newly_created_course_has_web_certs_enabled(self, store):
"""
Tests newly created course has web certs enabled by default.
"""
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
new_course_key = CourseKey.from_string(data['course_key'])
course = self.store.get_course(new_course_key)
self.assertTrue(course.cert_html_view_enabled)
@patch.dict('django.conf.settings.FEATURES', {'ORGANIZATIONS_APP': False})
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_creation_without_org_app_enabled(self, store):
"""
Tests course creation workflow should not create course to org
link if organizations_app is not enabled.
"""
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
new_course_key = CourseKey.from_string(data['course_key'])
course_orgs = get_course_organizations(new_course_key)
self.assertEqual(course_orgs, [])
@patch.dict('django.conf.settings.FEATURES', {'ORGANIZATIONS_APP': True})
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_creation_with_org_not_in_system(self, store):
"""
Tests course creation workflow when course organization does not exist
in system.
"""
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 400)
data = parse_json(response)
self.assertIn(u'Organization you selected does not exist in the system', data['error'])
@patch.dict('django.conf.settings.FEATURES', {'ORGANIZATIONS_APP': True})
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_creation_with_org_in_system(self, store):
"""
Tests course creation workflow when course organization exist in system.
"""
add_organization({
'name': 'Test Organization',
'short_name': 'orgX',
'description': 'Testing Organization Description',
})
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
new_course_key = CourseKey.from_string(data['course_key'])
course_orgs = get_course_organizations(new_course_key)
self.assertEqual(len(course_orgs), 1)
self.assertEqual(course_orgs[0]['short_name'], 'orgX')
|
agpl-3.0
| 5,961,938,846,482,138,000 | 8,318,157,821,162,634,000 | 42.210843 | 103 | 0.631117 | false |
jaggu303619/asylum-v2.0
|
openerp/addons/hr/__openerp__.py
|
54
|
2515
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Directory',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Human Resources',
'sequence': 21,
'website': 'http://www.openerp.com',
'summary': 'Jobs, Departments, Employees Details',
'description': """
Human Resources Management
==========================
This application enables you to manage important aspects of your company's staff and other details such as their skills, contacts, working time...
You can manage:
---------------
* Employees and hierarchies : You can define your employee with User and display hierarchies
* HR Departments
* HR Jobs
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': [
'images/hr_department.jpeg',
'images/hr_employee.jpeg',
'images/hr_job_position.jpeg',
'static/src/img/default_image.png',
],
'depends': ['base_setup','mail', 'resource', 'board'],
'data': [
'security/hr_security.xml',
'security/ir.model.access.csv',
'board_hr_view.xml',
'hr_view.xml',
'hr_department_view.xml',
'process/hr_process.xml',
'hr_installer.xml',
'hr_data.xml',
'res_config_view.xml',
],
'demo': ['hr_demo.xml'],
'test': [
'test/open2recruit2close_job.yml',
'test/hr_demo.yml',
],
'installable': True,
'application': True,
'auto_install': False,
'css': [ 'static/src/css/hr.css' ],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -1,611,870,110,903,747,000 | 8,644,598,158,087,779,000 | 33.452055 | 146 | 0.589264 | false |
wangyum/tensorflow
|
tensorflow/contrib/distributions/python/kernel_tests/onehot_categorical_test.py
|
89
|
10643
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for OneHotCategorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import onehot_categorical
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
def make_onehot_categorical(batch_shape, num_classes, dtype=dtypes.int32):
logits = random_ops.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtypes.float32) - 50.
return onehot_categorical.OneHotCategorical(logits, dtype=dtype)
class OneHotCategoricalTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testP(self):
p = [0.2, 0.8]
dist = onehot_categorical.OneHotCategorical(probs=p)
with self.test_session():
self.assertAllClose(p, dist.probs.eval())
self.assertAllEqual([2], dist.logits.get_shape())
def testLogits(self):
p = np.array([0.2, 0.8], dtype=np.float32)
logits = np.log(p) - 50.
dist = onehot_categorical.OneHotCategorical(logits=logits)
with self.test_session():
self.assertAllEqual([2], dist.probs.get_shape())
self.assertAllEqual([2], dist.logits.get_shape())
self.assertAllClose(dist.probs.eval(), p)
self.assertAllClose(dist.logits.eval(), logits)
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_onehot_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape.as_list())
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape.as_list())
self.assertAllEqual([10], dist.event_shape_tensor().eval())
# event_shape is available as a constant because the shape is
# known at graph build time.
self.assertEqual(10,
tensor_util.constant_value(dist.event_shape_tensor()))
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_onehot_categorical(
batch_shape, constant_op.constant(10, dtype=dtypes.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape.as_list())
self.assertEqual(10, dist.event_shape_tensor().eval())
def testDtype(self):
dist = make_onehot_categorical([], 5, dtype=dtypes.int32)
self.assertEqual(dist.dtype, dtypes.int32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
dist = make_onehot_categorical([], 5, dtype=dtypes.int64)
self.assertEqual(dist.dtype, dtypes.int64)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.probs.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dist.entropy().dtype)
self.assertEqual(dist.logits.dtype, dist.prob(
np.array([1]+[0]*4, dtype=np.int64)).dtype)
self.assertEqual(dist.logits.dtype, dist.log_prob(
np.array([1]+[0]*4, dtype=np.int64)).dtype)
def testUnknownShape(self):
with self.test_session():
logits = array_ops.placeholder(dtype=dtypes.float32)
dist = onehot_categorical.OneHotCategorical(logits)
sample = dist.sample()
# Will sample class 1.
sample_value = sample.eval(feed_dict={logits: [-1000.0, 1000.0]})
self.assertAllEqual([0, 1], sample_value)
# Batch entry 0 will sample class 1, batch entry 1 will sample class 0.
sample_value_batch = sample.eval(
feed_dict={logits: [[-1000.0, 1000.0], [1000.0, -1000.0]]})
self.assertAllEqual([[0, 1], [1, 0]], sample_value_batch)
def testEntropyNoBatch(self):
logits = np.log([0.2, 0.8]) - 50.
dist = onehot_categorical.OneHotCategorical(logits)
with self.test_session():
self.assertAllClose(
dist.entropy().eval(),
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)))
def testEntropyWithBatch(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = onehot_categorical.OneHotCategorical(logits)
with self.test_session():
self.assertAllClose(dist.entropy().eval(), [
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),
-(0.6 * np.log(0.6) + 0.4 * np.log(0.4))
])
def testPmf(self):
# check that probability of samples correspond to their class probabilities
with self.test_session():
logits = self._rng.random_sample(size=(8, 2, 10))
prob = np.exp(logits)/np.sum(np.exp(logits), axis=-1, keepdims=True)
dist = onehot_categorical.OneHotCategorical(logits=logits)
np_sample = dist.sample().eval()
np_prob = dist.prob(np_sample).eval()
expected_prob = prob[np_sample.astype(np.bool)]
self.assertAllClose(expected_prob, np_prob.flatten())
def testSample(self):
with self.test_session():
probs = [[[0.2, 0.8], [0.4, 0.6]]]
dist = onehot_categorical.OneHotCategorical(math_ops.log(probs) - 50.)
n = 100
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.int32)
sample_values = samples.eval()
self.assertAllEqual([n, 1, 2, 2], sample_values.shape)
self.assertFalse(np.any(sample_values < 0))
self.assertFalse(np.any(sample_values > 1))
def testSampleWithSampleShape(self):
with self.test_session():
probs = [[[0.2, 0.8], [0.4, 0.6]]]
dist = onehot_categorical.OneHotCategorical(math_ops.log(probs) - 50.)
samples = dist.sample((100, 100), seed=123)
prob = dist.prob(samples)
prob_val = prob.eval()
self.assertAllClose([0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()],
atol=1e-2)
self.assertAllClose([0.4**2 + 0.6**2], [prob_val[:, :, :, 1].mean()],
atol=1e-2)
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.test_session() as sess:
for categories in [2, 10]:
for batch_size in [1, 2]:
p_logits = self._rng.random_sample((batch_size, categories))
q_logits = self._rng.random_sample((batch_size, categories))
p = onehot_categorical.OneHotCategorical(logits=p_logits)
q = onehot_categorical.OneHotCategorical(logits=q_logits)
prob_p = np_softmax(p_logits)
prob_q = np_softmax(q_logits)
kl_expected = np.sum(
prob_p * (np.log(prob_p) - np.log(prob_q)), axis=-1)
kl_actual = kullback_leibler.kl_divergence(p, q)
kl_same = kullback_leibler.kl_divergence(p, p)
x = p.sample(int(2e4), seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
# Compute empirical KL(p||q).
kl_sample = math_ops.reduce_mean(p.log_prob(x) - q.log_prob(x), 0)
[kl_sample_, kl_actual_, kl_same_] = sess.run([kl_sample, kl_actual,
kl_same])
self.assertEqual(kl_actual.get_shape(), (batch_size,))
self.assertAllClose(kl_same_, np.zeros_like(kl_expected))
self.assertAllClose(kl_actual_, kl_expected, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_expected, atol=1e-2, rtol=0.)
def testSampleUnbiasedNonScalarBatch(self):
with self.test_session() as sess:
logits = self._rng.rand(4, 3, 2).astype(np.float32)
dist = onehot_categorical.OneHotCategorical(logits=logits)
n = int(3e3)
x = dist.sample(n, seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.probs,
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.10)
def testSampleUnbiasedScalarBatch(self):
with self.test_session() as sess:
logits = self._rng.rand(3).astype(np.float32)
dist = onehot_categorical.OneHotCategorical(logits=logits)
n = int(1e4)
x = dist.sample(n, seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
sample_mean = math_ops.reduce_mean(x, 0) # elementwise mean
x_centered = x - sample_mean
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.probs,
dist.covariance(),
])
self.assertAllEqual([3], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.1)
self.assertAllEqual([3, 3], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.1)
if __name__ == "__main__":
test.main()
|
apache-2.0
| -3,325,629,517,104,220,700 | -2,435,447,313,179,538,000 | 41.234127 | 80 | 0.634502 | false |
decvalts/iris
|
docs/iris/src/sphinxext/generate_package_rst.py
|
3
|
9320
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import os
import sys
import re
import inspect
document_dict = {
# Use autoclass for classes.
'class': '''
{object_docstring}
..
.. autoclass:: {object_name}
:members:
:undoc-members:
:inherited-members:
''',
'function': '''
.. autofunction:: {object_name}
''',
# For everything else, let automodule do some magic...
None: '''
.. autodata:: {object_name}
'''}
horizontal_sep = '''
.. raw:: html
<p class="hr_p"><a href="#">↑   top   ↑</a></p>
<!--
-----------
.. raw:: html
-->
'''
def lookup_object_type(obj):
if inspect.isclass(obj):
return 'class'
elif inspect.isfunction(obj):
return 'function'
else:
return None
def auto_doc_module(file_path, import_name, root_package,
package_toc=None, title=None):
mod = __import__(import_name)
mod = sys.modules[import_name]
elems = dir(mod)
if '__all__' in elems:
document_these = [(attr_name, getattr(mod, attr_name))
for attr_name in mod.__all__]
else:
document_these = [(attr_name, getattr(mod, attr_name))
for attr_name in elems
if (not attr_name.startswith('_') and
not inspect.ismodule(getattr(mod, attr_name)))]
def is_from_this_module(arg):
name = arg[0]
obj = arg[1]
return (hasattr(obj, '__module__') and
obj.__module__ == mod.__name__)
sort_order = {'class': 2, 'function': 1}
# Sort them according to sort_order dict.
def sort_key(arg):
name = arg[0]
obj = arg[1]
return sort_order.get(lookup_object_type(obj), 0)
document_these = filter(is_from_this_module, document_these)
document_these = sorted(document_these, key=sort_key)
lines = []
for element, obj in document_these:
object_name = import_name + '.' + element
obj_content = document_dict[lookup_object_type(obj)].format(
object_name=object_name,
object_name_header_line='+' * len(object_name),
object_docstring=inspect.getdoc(obj))
lines.append(obj_content)
lines = horizontal_sep.join(lines)
module_elements = '\n'.join(' * :py:obj:`{}`'.format(element)
for element, obj in document_these)
lines = r'''.. _{import_name}:
{title_underline}
{title}
{title_underline}
{sidebar}
.. currentmodule:: {root_package}
.. automodule:: {import_name}
In this module:
{module_elements}
''' + lines
if package_toc:
sidebar = '''
.. sidebar:: Modules in this package
{package_toc_tree}
'''.format(package_toc_tree=package_toc)
else:
sidebar = ''
return lines.format(title=title or import_name,
title_underline='=' * len(title or import_name),
import_name=import_name, root_package=root_package,
sidebar=sidebar, module_elements=module_elements)
def auto_doc_package(file_path, import_name, root_package, sub_packages):
max_depth = 1 if import_name == 'iris' else 2
package_toc = '\n '.join(sub_packages)
package_toc = '''
.. toctree::
:maxdepth: {:d}
:titlesonly:
{}
'''.format(max_depth, package_toc)
if '.' in import_name:
title = None
else:
title = import_name.capitalize() + ' reference documentation'
return auto_doc_module(file_path, import_name, root_package,
package_toc=package_toc, title=title)
def auto_package_build(app):
root_package = app.config.autopackage_name
if root_package is None:
raise ValueError('set the autopackage_name variable in the '
'conf.py file')
if not isinstance(root_package, list):
raise ValueError('autopackage was expecting a list of packages to '
'document e.g. ["itertools"]')
for package in root_package:
do_package(package)
def do_package(package_name):
out_dir = package_name + os.path.sep
# Import the root package. If this fails then an import error will be
# raised.
module = __import__(package_name)
root_package = package_name
rootdir = os.path.dirname(module.__file__)
package_folder = []
module_folders = {}
for root, subFolders, files in os.walk(rootdir):
for fname in files:
name, ext = os.path.splitext(fname)
# Skip some non-relevant files.
if (fname.startswith('.') or fname.startswith('#') or
re.search('^_[^_]', fname) or fname.find('.svn') >= 0 or
not (ext in ['.py', '.so'])):
continue
rel_path = root_package + \
os.path.join(root, fname).split(rootdir)[-1]
mod_folder = root_package + \
os.path.join(root).split(rootdir)[-1].replace('/', '.')
# Only add this package to folder list if it contains an __init__
# script.
if name == '__init__':
package_folder.append([mod_folder, rel_path])
else:
import_name = mod_folder + '.' + name
mf_list = module_folders.setdefault(mod_folder, [])
mf_list.append((import_name, rel_path))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for package, package_path in package_folder:
if '._' in package or 'test' in package:
continue
paths = []
for spackage, spackage_path in package_folder:
# Ignore this packages, packages that are not children of this
# one, test packages, private packages, and packages that are
# subpackages of subpackages (they'll be part of the subpackage).
if spackage == package:
continue
if not spackage.startswith(package):
continue
if spackage.count('.') > package.count('.') + 1:
continue
if 'test' in spackage:
continue
split_path = spackage.rsplit('.', 2)[-2:]
if any(part[0] == '_' for part in split_path):
continue
paths.append(os.path.join(*split_path) + '.rst')
paths.extend(os.path.join(os.path.basename(os.path.dirname(path)),
os.path.splitext(os.path.basename(path))[0])
for imp_name, path in module_folders.get(package, []))
paths.sort()
doc = auto_doc_package(package_path, package, root_package, paths)
package_dir = out_dir + package.replace('.', os.path.sep)
if not os.path.exists(package_dir):
os.makedirs(out_dir + package.replace('.', os.path.sep))
out_path = package_dir + '.rst'
if not os.path.exists(out_path):
print('Creating non-existent document {} ...'.format(out_path))
with open(out_path, 'w') as fh:
fh.write(doc)
else:
with open(out_path, 'r') as fh:
existing_content = ''.join(fh.readlines())
if doc != existing_content:
print('Creating out of date document {} ...'.format(
out_path))
with open(out_path, 'w') as fh:
fh.write(doc)
for import_name, module_path in module_folders.get(package, []):
doc = auto_doc_module(module_path, import_name, root_package)
out_path = out_dir + import_name.replace('.', os.path.sep) + '.rst'
if not os.path.exists(out_path):
print('Creating non-existent document {} ...'.format(
out_path))
with open(out_path, 'w') as fh:
fh.write(doc)
else:
with open(out_path, 'r') as fh:
existing_content = ''.join(fh.readlines())
if doc != existing_content:
print('Creating out of date document {} ...'.format(
out_path))
with open(out_path, 'w') as fh:
fh.write(doc)
def setup(app):
app.connect('builder-inited', auto_package_build)
app.add_config_value('autopackage_name', None, 'env')
|
gpl-3.0
| -6,598,868,696,977,033,000 | 5,345,338,270,410,826,000 | 30.275168 | 79 | 0.556009 | false |
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/sphinx/util/i18n.py
|
1
|
10576
|
"""
sphinx.util.i18n
~~~~~~~~~~~~~~~~
Builder superclass for all builders.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import gettext
import os
import re
import warnings
from collections import namedtuple
from datetime import datetime
from os import path
import babel.dates
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po
from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.errors import SphinxError
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.matching import Matcher
from sphinx.util.osutil import SEP, relpath
logger = logging.getLogger(__name__)
if False:
# For type annotation
from typing import Callable, List, Set # NOQA
from sphinx.environment import BuildEnvironment # NOQA
LocaleFileInfoBase = namedtuple('CatalogInfo', 'base_dir,domain,charset')
class CatalogInfo(LocaleFileInfoBase):
@property
def po_file(self):
# type: () -> str
return self.domain + '.po'
@property
def mo_file(self):
# type: () -> str
return self.domain + '.mo'
@property
def po_path(self):
# type: () -> str
return path.join(self.base_dir, self.po_file)
@property
def mo_path(self):
# type: () -> str
return path.join(self.base_dir, self.mo_file)
def is_outdated(self):
# type: () -> bool
return (
not path.exists(self.mo_path) or
path.getmtime(self.mo_path) < path.getmtime(self.po_path))
def write_mo(self, locale):
# type: (str) -> None
with open(self.po_path, encoding=self.charset) as file_po:
try:
po = read_po(file_po, locale)
except Exception as exc:
logger.warning(__('reading error: %s, %s'), self.po_path, exc)
return
with open(self.mo_path, 'wb') as file_mo:
try:
write_mo(file_mo, po)
except Exception as exc:
logger.warning(__('writing error: %s, %s'), self.mo_path, exc)
def find_catalog(docname, compaction):
# type: (str, bool) -> str
if compaction:
ret = docname.split(SEP, 1)[0]
else:
ret = docname
return ret
def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction):
# type: (str, str, List[str], str, bool) -> List[str]
if not(lang and locale_dirs):
return []
domain = find_catalog(docname, compaction)
files = [gettext.find(domain, path.join(srcdir, dir_), [lang])
for dir_ in locale_dirs]
files = [relpath(f, srcdir) for f in files if f]
return files
def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact=None,
charset='utf-8', force_all=False,
excluded=Matcher([])):
# type: (List[str], str, List[str], bool, str, bool, Matcher) -> Set[CatalogInfo]
"""
:param list locale_dirs:
list of path as `['locale_dir1', 'locale_dir2', ...]` to find
translation catalogs. Each path contains a structure such as
`<locale>/LC_MESSAGES/domain.po`.
:param str locale: a language as `'en'`
:param list domains: list of domain names to get. If empty list or None
is specified, get all domain names. default is None.
:param boolean force_all:
Set True if you want to get all catalogs rather than updated catalogs.
default is False.
:return: [CatalogInfo(), ...]
"""
if gettext_compact is not None:
warnings.warn('gettext_compact argument for find_catalog_source_files() '
'is deprecated.', RemovedInSphinx30Warning, stacklevel=2)
catalogs = set() # type: Set[CatalogInfo]
if not locale:
return catalogs # locale is not specified
for locale_dir in locale_dirs:
if not locale_dir:
continue # skip system locale directory
base_dir = path.join(locale_dir, locale, 'LC_MESSAGES')
if not path.exists(base_dir):
continue # locale path is not found
for dirpath, dirnames, filenames in os.walk(base_dir, followlinks=True):
filenames = [f for f in filenames if f.endswith('.po')]
for filename in filenames:
if excluded(path.join(relpath(dirpath, base_dir), filename)):
continue
base = path.splitext(filename)[0]
domain = relpath(path.join(dirpath, base), base_dir).replace(path.sep, SEP)
if domains and domain not in domains:
continue
cat = CatalogInfo(base_dir, domain, charset)
if force_all or cat.is_outdated():
catalogs.add(cat)
return catalogs
# date_format mappings: ustrftime() to bable.dates.format_datetime()
date_format_mappings = {
'%a': 'EEE', # Weekday as locale’s abbreviated name.
'%A': 'EEEE', # Weekday as locale’s full name.
'%b': 'MMM', # Month as locale’s abbreviated name.
'%B': 'MMMM', # Month as locale’s full name.
'%c': 'medium', # Locale’s appropriate date and time representation.
'%-d': 'd', # Day of the month as a decimal number.
'%d': 'dd', # Day of the month as a zero-padded decimal number.
'%-H': 'H', # Hour (24-hour clock) as a decimal number [0,23].
'%H': 'HH', # Hour (24-hour clock) as a zero-padded decimal number [00,23].
'%-I': 'h', # Hour (12-hour clock) as a decimal number [1,12].
'%I': 'hh', # Hour (12-hour clock) as a zero-padded decimal number [01,12].
'%-j': 'D', # Day of the year as a decimal number.
'%j': 'DDD', # Day of the year as a zero-padded decimal number.
'%-m': 'M', # Month as a decimal number.
'%m': 'MM', # Month as a zero-padded decimal number.
'%-M': 'm', # Minute as a decimal number [0,59].
'%M': 'mm', # Minute as a zero-padded decimal number [00,59].
'%p': 'a', # Locale’s equivalent of either AM or PM.
'%-S': 's', # Second as a decimal number.
'%S': 'ss', # Second as a zero-padded decimal number.
'%U': 'WW', # Week number of the year (Sunday as the first day of the week)
# as a zero padded decimal number. All days in a new year preceding
# the first Sunday are considered to be in week 0.
'%w': 'e', # Weekday as a decimal number, where 0 is Sunday and 6 is Saturday.
'%-W': 'W', # Week number of the year (Monday as the first day of the week)
# as a decimal number. All days in a new year preceding the first
# Monday are considered to be in week 0.
'%W': 'WW', # Week number of the year (Monday as the first day of the week)
# as a zero-padded decimal number.
'%x': 'medium', # Locale’s appropriate date representation.
'%X': 'medium', # Locale’s appropriate time representation.
'%y': 'YY', # Year without century as a zero-padded decimal number.
'%Y': 'YYYY', # Year with century as a decimal number.
'%Z': 'zzzz', # Time zone name (no characters if no time zone exists).
'%%': '%',
}
date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
def babel_format_date(date, format, locale, formatter=babel.dates.format_date):
# type: (datetime, str, str, Callable) -> str
if locale is None:
locale = 'en'
# Check if we have the tzinfo attribute. If not we cannot do any time
# related formats.
if not hasattr(date, 'tzinfo'):
formatter = babel.dates.format_date
try:
return formatter(date, format, locale=locale)
except (ValueError, babel.core.UnknownLocaleError):
# fallback to English
return formatter(date, format, locale='en')
except AttributeError:
logger.warning(__('Invalid date format. Quote the string by single quote '
'if you want to output it directly: %s'), format)
return format
def format_date(format, date=None, language=None):
# type: (str, datetime, str) -> str
if date is None:
# If time is not specified, try to use $SOURCE_DATE_EPOCH variable
# See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal
source_date_epoch = os.getenv('SOURCE_DATE_EPOCH')
if source_date_epoch is not None:
date = datetime.utcfromtimestamp(float(source_date_epoch))
else:
date = datetime.now()
result = []
tokens = date_format_re.split(format)
for token in tokens:
if token in date_format_mappings:
babel_format = date_format_mappings.get(token, '')
# Check if we have to use a different babel formatter then
# format_datetime, because we only want to format a date
# or a time.
if token == '%x':
function = babel.dates.format_date
elif token == '%X':
function = babel.dates.format_time
else:
function = babel.dates.format_datetime
result.append(babel_format_date(date, babel_format, locale=language,
formatter=function))
else:
result.append(token)
return "".join(result)
def get_image_filename_for_language(filename, env):
# type: (str, BuildEnvironment) -> str
if not env.config.language:
return filename
filename_format = env.config.figure_language_filename
d = dict()
d['root'], d['ext'] = path.splitext(filename)
dirname = path.dirname(d['root'])
if dirname and not dirname.endswith(path.sep):
dirname += path.sep
d['path'] = dirname
d['basename'] = path.basename(d['root'])
d['language'] = env.config.language
try:
return filename_format.format(**d)
except KeyError as exc:
raise SphinxError('Invalid figure_language_filename: %r' % exc)
def search_image_for_language(filename, env):
# type: (str, BuildEnvironment) -> str
if not env.config.language:
return filename
translated = get_image_filename_for_language(filename, env)
dirname = path.dirname(env.docname)
if path.exists(path.join(env.srcdir, dirname, translated)):
return translated
else:
return filename
|
mit
| 7,869,881,178,573,197,000 | 6,685,799,515,575,873,000 | 36.052632 | 91 | 0.59697 | false |
ojengwa/sympy
|
sympy/logic/boolalg.py
|
2
|
41626
|
"""
Boolean algebra module for SymPy
"""
from __future__ import print_function, division
from collections import defaultdict
from itertools import product, islice
from sympy.core.basic import Basic
from sympy.core.cache import cacheit
from sympy.core.numbers import Number
from sympy.core.decorators import deprecated
from sympy.core.operations import LatticeOp, AssocOp
from sympy.core.function import Application
from sympy.core.compatibility import ordered, xrange, with_metaclass
from sympy.core.sympify import converter, _sympify, sympify
from sympy.core.singleton import Singleton, S
from sympy.utilities.iterables import multiset
class Boolean(Basic):
"""A boolean object is an object for which logic operations make sense."""
__slots__ = []
def __and__(self, other):
"""Overloading for & operator"""
return And(self, other)
__rand__ = __and__
def __or__(self, other):
"""Overloading for |"""
return Or(self, other)
__ror__ = __or__
def __invert__(self):
"""Overloading for ~"""
return Not(self)
def __rshift__(self, other):
"""Overloading for >>"""
return Implies(self, other)
def __lshift__(self, other):
"""Overloading for <<"""
return Implies(other, self)
__rrshift__ = __lshift__
__rlshift__ = __rshift__
def __xor__(self, other):
return Xor(self, other)
__rxor__ = __xor__
# Developer note: There is liable to be some confusion as to when True should
# be used and when S.true should be used in various contexts throughout SymPy.
# An important thing to remember is that sympify(True) returns S.true. This
# means that for the most part, you can just use True and it will
# automatically be converted to S.true when necessary, similar to how you can
# generally use 1 instead of S.One.
# The rule of thumb is:
# "If the boolean in question can be replaced by an arbitrary symbolic
# Boolean, like Or(x, y) or x > 1, use S.true. Otherwise, use True"
# In other words, use S.true only on those contexts where the boolean is being
# used as a symbolic representation of truth. For example, if the object ends
# up in the .args of any expression, then it must necessarily be S.true
# instead of True, as elements of .args must be Basic. On the other hand, ==
# is not a symbolic operation in SymPy, since it always returns True or False,
# and does so in terms of structural equality rather than mathematical, so it
# should return True. The assumptions system should use True and False. Aside
# from not satisfying the above rule of thumb, the assumptions system uses a
# three-valued logic (True, False, None), whereas S.true and S.false represent
# a two-valued logic. When it doubt, use True.
# 2. "S.true == True" is True.
# While "S.true is True" is False, "S.true == True" is True, so if there is
# any doubt over whether a function or expression will return S.true or True,
# just use "==" instead of "is" to do the comparison, and it will work in
# either case. Finally, for boolean flags, it's better to just use "if x"
# instead of "if x is True". To quote PEP 8:
# Don't compare boolean values to True or False using ==.
# Yes: if greeting:
# No: if greeting == True:
# Worse: if greeting is True:
class BooleanAtom(Boolean):
"""
Base class of BooleanTrue and BooleanFalse.
"""
class BooleanTrue(with_metaclass(Singleton, BooleanAtom)):
"""
SymPy version of True.
The instances of this class are singletonized and can be accessed via
S.true.
This is the SymPy version of True, for use in the logic module. The
primary advantage of using true instead of True is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
True they act bitwise on 1. Functions in the logic module will return this
class when they evaluate to true.
Examples
========
>>> from sympy import sympify, true, Or
>>> sympify(True)
True
>>> ~true
False
>>> ~True
-2
>>> Or(True, False)
True
See Also
========
sympy.logic.boolalg.BooleanFalse
"""
def __nonzero__(self):
return True
__bool__ = __nonzero__
def __hash__(self):
return hash(True)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import true
>>> true.as_set()
UniversalSet()
"""
return S.UniversalSet
class BooleanFalse(with_metaclass(Singleton, BooleanAtom)):
"""
SymPy version of False.
The instances of this class are singletonized and can be accessed via
S.false.
This is the SymPy version of False, for use in the logic module. The
primary advantage of using false instead of False is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
False they act bitwise on 0. Functions in the logic module will return this
class when they evaluate to false.
Examples
========
>>> from sympy import sympify, false, Or, true
>>> sympify(False)
False
>>> false >> false
True
>>> False >> False
0
>>> Or(True, False)
True
See Also
========
sympy.logic.boolalg.BooleanTrue
"""
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __hash__(self):
return hash(False)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import false
>>> false.as_set()
EmptySet()
"""
from sympy.core.sets import EmptySet
return EmptySet()
true = BooleanTrue()
false = BooleanFalse()
# We want S.true and S.false to work, rather than S.BooleanTrue and
# S.BooleanFalse, but making the class and instance names the same causes some
# major issues (like the inability to import the class directly from this
# file).
S.true = true
S.false = false
converter[bool] = lambda x: S.true if x else S.false
class BooleanFunction(Application, Boolean):
"""Boolean function is a function that lives in a boolean space
It is used as base class for And, Or, Not, etc.
"""
is_Boolean = True
def __call__(self, *args):
return self.func(*[arg(*args) for arg in self.args])
def _eval_simplify(self, ratio, measure):
return simplify_logic(self)
class And(LatticeOp, BooleanFunction):
"""
Logical AND function.
It evaluates its arguments in order, giving False immediately
if any of them are False, and True if they are all True.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.abc import x, y
>>> from sympy.logic.boolalg import And
>>> x & y
And(x, y)
Notes
=====
The ``&`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
and. Hence, ``And(a, b)`` and ``a & b`` will return different things if
``a`` and ``b`` are integers.
>>> And(x, y).subs(x, 1)
y
"""
zero = false
identity = true
nargs = None
@classmethod
def _new_args_filter(cls, args):
newargs = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
else:
newargs.append(x)
return LatticeOp._new_args_filter(newargs, And)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import And, Symbol
>>> x = Symbol('x', real=True)
>>> And(x<2, x>-2).as_set()
(-2, 2)
"""
from sympy.core.sets import Intersection
if len(self.free_symbols) == 1:
return Intersection(*[arg.as_set() for arg in self.args])
else:
raise NotImplementedError("Sorry, And.as_set has not yet been"
" implemented for multivariate"
" expressions")
class Or(LatticeOp, BooleanFunction):
"""
Logical OR function
It evaluates its arguments in order, giving True immediately
if any of them are True, and False if they are all False.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.abc import x, y
>>> from sympy.logic.boolalg import Or
>>> x | y
Or(x, y)
Notes
=====
The ``|`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
or. Hence, ``Or(a, b)`` and ``a | b`` will return different things if
``a`` and ``b`` are integers.
>>> Or(x, y).subs(x, 0)
y
"""
zero = true
identity = false
@classmethod
def _new_args_filter(cls, args):
newargs = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
else:
newargs.append(x)
return LatticeOp._new_args_filter(newargs, Or)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import Or, Symbol
>>> x = Symbol('x', real=True)
>>> Or(x>2, x<-2).as_set()
(-oo, -2) U (2, oo)
"""
from sympy.core.sets import Union
if len(self.free_symbols) == 1:
return Union(*[arg.as_set() for arg in self.args])
else:
raise NotImplementedError("Sorry, Or.as_set has not yet been"
" implemented for multivariate"
" expressions")
class Not(BooleanFunction):
"""
Logical Not function (negation)
Returns True if the statement is False
Returns False if the statement is True
Examples
========
>>> from sympy.logic.boolalg import Not, And, Or
>>> from sympy.abc import x
>>> Not(True)
False
>>> Not(False)
True
>>> Not(And(True, False))
True
>>> Not(Or(True, False))
False
>>> Not(And(And(True, x), Or(x, False)))
Not(x)
>>> ~x
Not(x)
Notes
=====
- De Morgan rules are applied automatically.
- The ``~`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
not. In particular, ``~a`` and ``Not(a)`` will be different if ``a`` is
an integer. Furthermore, since bools in Python subclass from ``int``,
``~True`` is the same as ``~1`` which is ``-2``, which has a boolean
value of True. To avoid this issue, use the SymPy boolean types
``true`` and ``false``.
>>> from sympy import true
>>> ~True
-2
>>> ~true
False
"""
is_Not = True
@classmethod
def eval(cls, arg):
if isinstance(arg, Number) or arg in (True, False):
return false if arg else true
# apply De Morgan Rules
if arg.func is And:
return Or(*[Not(a) for a in arg.args])
if arg.func is Or:
return And(*[Not(a) for a in arg.args])
if arg.func is Not:
return arg.args[0]
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import Not, Symbol
>>> x = Symbol('x', real=True)
>>> Not(x>0).as_set()
(-oo, 0]
"""
if len(self.free_symbols) == 1:
return self.args[0].as_set().complement
else:
raise NotImplementedError("Sorry, Not.as_set has not yet been"
" implemented for mutivariate"
" expressions")
class Xor(BooleanFunction):
"""
Logical XOR (exclusive OR) function.
Returns True if an odd number of the arguments are True and the rest are
False.
Returns False if an even number of the arguments are True and the rest are
False.
Examples
========
>>> from sympy.logic.boolalg import Xor
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Xor(True, False)
True
>>> Xor(True, True)
False
>>> Xor(True, False, True, True, False)
True
>>> Xor(True, False, True, False)
False
>>> x ^ y
Or(And(Not(x), y), And(Not(y), x))
Notes
=====
The ``^`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise xor. In
particular, ``a ^ b`` and ``Xor(a, b)`` will be different if ``a`` and
``b`` are integers.
>>> Xor(x, y).subs(y, 0)
x
"""
def __new__(cls, *args, **options):
args = [_sympify(arg) for arg in args]
argset = multiset(args) # dictionary
args_final=[]
# xor is commutative and is false if count of x is even and x
# if count of x is odd. Here x can be True, False or any Symbols
for x, freq in argset.items():
if freq % 2 == 0:
argset[x] = false
else:
argset[x] = x
for _, z in argset.items():
args_final.append(z)
argset = set(args_final)
truecount = 0
for x in args:
if isinstance(x, Number) or x in [True, False]: # Includes 0, 1
argset.discard(x)
if x:
truecount += 1
if len(argset) < 1:
return true if truecount % 2 != 0 else false
if truecount % 2 != 0:
return Not(Xor(*argset))
_args = frozenset(argset)
obj = super(Xor, cls).__new__(cls, *_args, **options)
if isinstance(obj, Xor):
obj._argset = _args
return obj
@property
@cacheit
def args(self):
return tuple(ordered(self._argset))
@classmethod
def eval(cls, *args):
if not args:
return false
args = list(args)
A = args.pop()
while args:
B = args.pop()
A = Or(And(A, Not(B)), And(Not(A), B))
return A
class Nand(BooleanFunction):
"""
Logical NAND function.
It evaluates its arguments in order, giving True immediately if any
of them are False, and False if they are all True.
Returns True if any of the arguments are False
Returns False if all arguments are True
Examples
========
>>> from sympy.logic.boolalg import Nand
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Nand(False, True)
True
>>> Nand(True, True)
False
>>> Nand(x, y)
Or(Not(x), Not(y))
"""
@classmethod
def eval(cls, *args):
return Not(And(*args))
class Nor(BooleanFunction):
"""
Logical NOR function.
It evaluates its arguments in order, giving False immediately if any
of them are True, and True if they are all False.
Returns False if any argument is True
Returns True if all arguments are False
Examples
========
>>> from sympy.logic.boolalg import Nor
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Nor(True, False)
False
>>> Nor(True, True)
False
>>> Nor(False, True)
False
>>> Nor(False, False)
True
>>> Nor(x, y)
And(Not(x), Not(y))
"""
@classmethod
def eval(cls, *args):
return Not(Or(*args))
class Implies(BooleanFunction):
"""
Logical implication.
A implies B is equivalent to !A v B
Accepts two Boolean arguments; A and B.
Returns False if A is True and B is False
Returns True otherwise.
Examples
========
>>> from sympy.logic.boolalg import Implies
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Implies(True, False)
False
>>> Implies(False, False)
True
>>> Implies(True, True)
True
>>> Implies(False, True)
True
>>> x >> y
Implies(x, y)
>>> y << x
Implies(x, y)
Notes
=====
The ``>>`` and ``<<`` operators are provided as a convenience, but note
that their use here is different from their normal use in Python, which is
bit shifts. Hence, ``Implies(a, b)`` and ``a >> b`` will return different
things if ``a`` and ``b`` are integers. In particular, since Python
considers ``True`` and ``False`` to be integers, ``True >> True`` will be
the same as ``1 >> 1``, i.e., 0, which has a truth value of False. To
avoid this issue, use the SymPy objects ``true`` and ``false``.
>>> from sympy import true, false
>>> True >> False
1
>>> true >> false
False
"""
@classmethod
def eval(cls, *args):
try:
newargs = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
else:
newargs.append(x)
A, B = newargs
except ValueError:
raise ValueError(
"%d operand(s) used for an Implies "
"(pairs are required): %s" % (len(args), str(args)))
if A == True or A == False or B == True or B == False:
return Or(Not(A), B)
else:
return Basic.__new__(cls, *args)
class Equivalent(BooleanFunction):
"""
Equivalence relation.
Equivalent(A, B) is True iff A and B are both True or both False
Returns True if all of the arguments are logically equivalent.
Returns False otherwise.
Examples
========
>>> from sympy.logic.boolalg import Equivalent, And
>>> from sympy.abc import x, y
>>> Equivalent(False, False, False)
True
>>> Equivalent(True, False, False)
False
>>> Equivalent(x, And(x, True))
True
"""
def __new__(cls, *args, **options):
args = [_sympify(arg) for arg in args]
argset = set(args)
for x in args:
if isinstance(x, Number) or x in [True, False]: # Includes 0, 1
argset.discard(x)
argset.add(True if x else False)
if len(argset) <= 1:
return true
if True in argset:
argset.discard(True)
return And(*argset)
if False in argset:
argset.discard(False)
return Nor(*argset)
_args = frozenset(argset)
obj = super(Equivalent, cls).__new__(cls, _args)
obj._argset = _args
return obj
@property
@cacheit
def args(self):
return tuple(ordered(self._argset))
class ITE(BooleanFunction):
"""
If then else clause.
ITE(A, B, C) evaluates and returns the result of B if A is true
else it returns the result of C
Examples
========
>>> from sympy.logic.boolalg import ITE, And, Xor, Or
>>> from sympy.abc import x, y, z
>>> ITE(True, False, True)
False
>>> ITE(Or(True, False), And(True, True), Xor(True, True))
True
>>> ITE(x, y, z)
Or(And(Not(x), z), And(x, y))
"""
@classmethod
def eval(cls, *args):
args = list(args)
if len(args) == 3:
return Or(And(args[0], args[1]), And(Not(args[0]), args[2]))
raise ValueError("ITE expects 3 arguments, but got %d: %s" %
(len(args), str(args)))
### end class definitions. Some useful methods
def conjuncts(expr):
"""Return a list of the conjuncts in the expr s.
Examples
========
>>> from sympy.logic.boolalg import conjuncts
>>> from sympy.abc import A, B
>>> conjuncts(A & B)
frozenset([A, B])
>>> conjuncts(A | B)
frozenset([Or(A, B)])
"""
return And.make_args(expr)
def disjuncts(expr):
"""Return a list of the disjuncts in the sentence s.
Examples
========
>>> from sympy.logic.boolalg import disjuncts
>>> from sympy.abc import A, B
>>> disjuncts(A | B)
frozenset([A, B])
>>> disjuncts(A & B)
frozenset([And(A, B)])
"""
return Or.make_args(expr)
def distribute_and_over_or(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
Examples
========
>>> from sympy.logic.boolalg import distribute_and_over_or, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_and_over_or(Or(A, And(Not(B), Not(C))))
And(Or(A, Not(B)), Or(A, Not(C)))
"""
return _distribute((expr, And, Or))
def distribute_or_over_and(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in DNF.
Note that the output is NOT simplified.
Examples
========
>>> from sympy.logic.boolalg import distribute_or_over_and, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_or_over_and(And(Or(Not(A), B), C))
Or(And(B, C), And(C, Not(A)))
"""
return _distribute((expr, Or, And))
def _distribute(info):
"""
Distributes info[1] over info[2] with respect to info[0].
"""
if info[0].func is info[2]:
for arg in info[0].args:
if arg.func is info[1]:
conj = arg
break
else:
return info[0]
rest = info[2](*[a for a in info[0].args if a is not conj])
return info[1](*list(map(_distribute,
[(info[2](c, rest), info[1], info[2]) for c in conj.args])))
elif info[0].func is info[1]:
return info[1](*list(map(_distribute,
[(x, info[1], info[2]) for x in info[0].args])))
else:
return info[0]
def to_cnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to conjunctive normal form.
That is, of the form ((A | ~B | ...) & (B | C | ...) & ...)
If simplify is True, the expr is evaluated to its simplest CNF form.
Examples
========
>>> from sympy.logic.boolalg import to_cnf
>>> from sympy.abc import A, B, D
>>> to_cnf(~(A | B) | D)
And(Or(D, Not(A)), Or(D, Not(B)))
>>> to_cnf((A | B) & (A | ~A), True)
Or(A, B)
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'cnf', True)
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_and_over_or(expr)
def to_dnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to disjunctive normal form.
That is, of the form ((A & ~B & ...) | (B & C & ...) | ...)
If simplify is True, the expr is evaluated to its simplest DNF form.
Examples
========
>>> from sympy.logic.boolalg import to_dnf
>>> from sympy.abc import A, B, C
>>> to_dnf(B & (A | C))
Or(And(A, B), And(B, C))
>>> to_dnf((A & B) | (A & ~B) | (B & C) | (~B & C), True)
Or(A, C)
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'dnf', True)
# Don't convert unless we have to
if is_dnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_or_over_and(expr)
def is_cnf(expr):
"""
Test whether or not an expression is in conjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_cnf
>>> from sympy.abc import A, B, C
>>> is_cnf(A | B | C)
True
>>> is_cnf(A & B & C)
True
>>> is_cnf((A & B) | C)
False
"""
return _is_form(expr, And, Or)
def is_dnf(expr):
"""
Test whether or not an expression is in disjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_dnf
>>> from sympy.abc import A, B, C
>>> is_dnf(A | B | C)
True
>>> is_dnf(A & B & C)
True
>>> is_dnf((A & B) | C)
True
>>> is_dnf(A & (B | C))
False
"""
return _is_form(expr, Or, And)
def _is_form(expr, function1, function2):
"""
Test whether or not an expression is of the required form.
"""
expr = sympify(expr)
# Special case of an Atom
if expr.is_Atom:
return True
# Special case of a single expression of function2
if expr.func is function2:
for lit in expr.args:
if lit.func is Not:
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
# Special case of a single negation
if expr.func is Not:
if not expr.args[0].is_Atom:
return False
if expr.func is not function1:
return False
for cls in expr.args:
if cls.is_Atom:
continue
if cls.func is Not:
if not cls.args[0].is_Atom:
return False
elif cls.func is not function2:
return False
for lit in cls.args:
if lit.func is Not:
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
def eliminate_implications(expr):
"""
Change >>, <<, and Equivalent into &, |, and ~. That is, return an
expression that is equivalent to s, but has only &, |, and ~ as logical
operators.
Examples
========
>>> from sympy.logic.boolalg import Implies, Equivalent, \
eliminate_implications
>>> from sympy.abc import A, B, C
>>> eliminate_implications(Implies(A, B))
Or(B, Not(A))
>>> eliminate_implications(Equivalent(A, B))
And(Or(A, Not(B)), Or(B, Not(A)))
>>> eliminate_implications(Equivalent(A, B, C))
And(Or(A, Not(C)), Or(B, Not(A)), Or(C, Not(B)))
"""
expr = sympify(expr)
if expr.is_Atom:
return expr # (Atoms are unchanged.)
args = list(map(eliminate_implications, expr.args))
if expr.func is Implies:
a, b = args[0], args[-1]
return (~a) | b
elif expr.func is Equivalent:
clauses = []
for a, b in zip(islice(args, None), islice(args, 1, None)):
clauses.append(Or(Not(a), b))
a, b = args[-1], args[0]
clauses.append(Or(Not(a), b))
return And(*clauses)
else:
return expr.func(*args)
@deprecated(
useinstead="sympify", issue=6550, deprecated_since_version="0.7.3")
def compile_rule(s):
"""
Transforms a rule into a SymPy expression
A rule is a string of the form "symbol1 & symbol2 | ..."
Note: This function is deprecated. Use sympify() instead.
"""
import re
return sympify(re.sub(r'([a-zA-Z_][a-zA-Z0-9_]*)', r'Symbol("\1")', s))
def to_int_repr(clauses, symbols):
"""
Takes clauses in CNF format and puts them into an integer representation.
Examples
========
>>> from sympy.logic.boolalg import to_int_repr
>>> from sympy.abc import x, y
>>> to_int_repr([x | y, y], [x, y]) == [set([1, 2]), set([2])]
True
"""
# Convert the symbol list into a dict
symbols = dict(list(zip(symbols, list(xrange(1, len(symbols) + 1)))))
def append_symbol(arg, symbols):
if arg.func is Not:
return -symbols[arg.args[0]]
else:
return symbols[arg]
return [set(append_symbol(arg, symbols) for arg in Or.make_args(c))
for c in clauses]
def _check_pair(minterm1, minterm2):
"""
Checks if a pair of minterms differs by only one bit. If yes, returns
index, else returns -1.
"""
index = -1
for x, (i, j) in enumerate(zip(minterm1, minterm2)):
if i != j:
if index == -1:
index = x
else:
return -1
return index
def _convert_to_varsSOP(minterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for SOP).
"""
temp = []
for i, m in enumerate(minterm):
if m == 0:
temp.append(Not(variables[i]))
elif m == 1:
temp.append(variables[i])
else:
pass # ignore the 3s
return And(*temp)
def _convert_to_varsPOS(maxterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for POS).
"""
temp = []
for i, m in enumerate(maxterm):
if m == 1:
temp.append(Not(variables[i]))
elif m == 0:
temp.append(variables[i])
else:
pass # ignore the 3s
return Or(*temp)
def _simplified_pairs(terms):
"""
Reduces a set of minterms, if possible, to a simplified set of minterms
with one less variable in the terms using QM method.
"""
simplified_terms = []
todo = list(range(len(terms)))
for i, ti in enumerate(terms[:-1]):
for j_i, tj in enumerate(terms[(i + 1):]):
index = _check_pair(ti, tj)
if index != -1:
todo[i] = todo[j_i + i + 1] = None
newterm = ti[:]
newterm[index] = 3
if newterm not in simplified_terms:
simplified_terms.append(newterm)
simplified_terms.extend(
[terms[i] for i in [_ for _ in todo if _ is not None]])
return simplified_terms
def _compare_term(minterm, term):
"""
Return True if a binary term is satisfied by the given term. Used
for recognizing prime implicants.
"""
for i, x in enumerate(term):
if x != 3 and x != minterm[i]:
return False
return True
def _rem_redundancy(l1, terms):
"""
After the truth table has been sufficiently simplified, use the prime
implicant table method to recognize and eliminate redundant pairs,
and return the essential arguments.
"""
essential = []
for x in terms:
temporary = []
for y in l1:
if _compare_term(x, y):
temporary.append(y)
if len(temporary) == 1:
if temporary[0] not in essential:
essential.append(temporary[0])
for x in terms:
for y in essential:
if _compare_term(x, y):
break
else:
for z in l1:
if _compare_term(x, z):
if z not in essential:
essential.append(z)
break
return essential
def SOPform(variables, minterms, dontcares=None):
"""
The SOPform function uses simplified_pairs and a redundant group-
eliminating algorithm to convert the list of all input combos that
generate '1' (the minterms) into the smallest Sum of Products form.
The variables must be given as the first argument.
Return a logical Or function (i.e., the "sum of products" or "SOP"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import SOPform
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1],
... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> SOPform(['w','x','y','z'], minterms, dontcares)
Or(And(Not(w), z), And(y, z))
References
==========
.. [1] en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
from sympy.core.symbol import Symbol
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
old = None
new = minterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, minterms)
return Or(*[_convert_to_varsSOP(x, variables) for x in essential])
def POSform(variables, minterms, dontcares=None):
"""
The POSform function uses simplified_pairs and a redundant-group
eliminating algorithm to convert the list of all input combinations
that generate '1' (the minterms) into the smallest Product of Sums form.
The variables must be given as the first argument.
Return a logical And function (i.e., the "product of sums" or "POS"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import POSform
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1],
... [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> POSform(['w','x','y','z'], minterms, dontcares)
And(Or(Not(w), y), z)
References
==========
.. [1] en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
from sympy.core.symbol import Symbol
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
maxterms = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if (t not in minterms) and (t not in dontcares):
maxterms.append(t)
old = None
new = maxterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, maxterms)
return And(*[_convert_to_varsPOS(x, variables) for x in essential])
def _find_predicates(expr):
"""Helper to find logical predicates in BooleanFunctions.
A logical predicate is defined here as anything within a BooleanFunction
that is not a BooleanFunction itself.
"""
if not isinstance(expr, BooleanFunction):
return set([expr])
return set.union(*(_find_predicates(i) for i in expr.args))
def simplify_logic(expr, form=None, deep=True):
"""
This function simplifies a boolean function to its simplified version
in SOP or POS form. The return type is an Or or And object in SymPy.
Parameters
==========
expr : string or boolean expression
form : string ('cnf' or 'dnf') or None (default).
If 'cnf' or 'dnf', the simplest expression in the corresponding
normal form is returned; if None, the answer is returned
according to the form with fewest args (in CNF by default).
deep : boolean (default True)
indicates whether to recursively simplify any
non-boolean functions contained within the input.
Examples
========
>>> from sympy.logic import simplify_logic
>>> from sympy.abc import x, y, z
>>> from sympy import S
>>> b = '(~x & ~y & ~z) | ( ~x & ~y & z)'
>>> simplify_logic(b)
And(Not(x), Not(y))
>>> S(b)
Or(And(Not(x), Not(y), Not(z)), And(Not(x), Not(y), z))
>>> simplify_logic(_)
And(Not(x), Not(y))
"""
if form == 'cnf' or form == 'dnf' or form is None:
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
variables = _find_predicates(expr)
truthtable = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if expr.xreplace(dict(zip(variables, t))) == True:
truthtable.append(t)
if deep:
from sympy.simplify.simplify import simplify
variables = [simplify(v) for v in variables]
if form == 'dnf' or \
(form is None and len(truthtable) >= (2 ** (len(variables) - 1))):
return SOPform(variables, truthtable)
elif form == 'cnf' or form is None:
return POSform(variables, truthtable)
else:
raise ValueError("form can be cnf or dnf only")
def _finger(eq):
"""
Assign a 5-item fingerprint to each symbol in the equation:
[
# of times it appeared as a Symbol,
# of times it appeared as a Not(symbol),
# of times it appeared as a Symbol in an And or Or,
# of times it appeared as a Not(Symbol) in an And or Or,
sum of the number of arguments with which it appeared,
counting Symbol as 1 and Not(Symbol) as 2
]
>>> from sympy.logic.boolalg import _finger as finger
>>> from sympy import And, Or, Not
>>> from sympy.abc import a, b, x, y
>>> eq = Or(And(Not(y), a), And(Not(y), b), And(x, y))
>>> dict(finger(eq))
{(0, 0, 1, 0, 2): [x], (0, 0, 1, 0, 3): [a, b], (0, 0, 1, 2, 8): [y]}
So y and x have unique fingerprints, but a and b do not.
"""
f = eq.free_symbols
d = dict(list(zip(f, [[0] * 5 for fi in f])))
for a in eq.args:
if a.is_Symbol:
d[a][0] += 1
elif a.is_Not:
d[a.args[0]][1] += 1
else:
o = len(a.args) + sum(ai.func is Not for ai in a.args)
for ai in a.args:
if ai.is_Symbol:
d[ai][2] += 1
d[ai][-1] += o
else:
d[ai.args[0]][3] += 1
d[ai.args[0]][-1] += o
inv = defaultdict(list)
for k, v in ordered(iter(d.items())):
inv[tuple(v)].append(k)
return inv
def bool_map(bool1, bool2):
"""
Return the simplified version of bool1, and the mapping of variables
that makes the two expressions bool1 and bool2 represent the same
logical behaviour for some correspondence between the variables
of each.
If more than one mappings of this sort exist, one of them
is returned.
For example, And(x, y) is logically equivalent to And(a, b) for
the mapping {x: a, y:b} or {x: b, y:a}.
If no such mapping exists, return False.
Examples
========
>>> from sympy import SOPform, bool_map, Or, And, Not, Xor
>>> from sympy.abc import w, x, y, z, a, b, c, d
>>> function1 = SOPform(['x','z','y'],[[1, 0, 1], [0, 0, 1]])
>>> function2 = SOPform(['a','b','c'],[[1, 0, 1], [1, 0, 0]])
>>> bool_map(function1, function2)
(And(Not(z), y), {y: a, z: b})
The results are not necessarily unique, but they are canonical. Here,
``(w, z)`` could be ``(a, d)`` or ``(d, a)``:
>>> eq = Or(And(Not(y), w), And(Not(y), z), And(x, y))
>>> eq2 = Or(And(Not(c), a), And(Not(c), d), And(b, c))
>>> bool_map(eq, eq2)
(Or(And(Not(y), w), And(Not(y), z), And(x, y)), {w: a, x: b, y: c, z: d})
>>> eq = And(Xor(a, b), c, And(c,d))
>>> bool_map(eq, eq.subs(c, x))
(And(Or(Not(a), Not(b)), Or(a, b), c, d), {a: a, b: b, c: d, d: x})
"""
def match(function1, function2):
"""Return the mapping that equates variables between two
simplified boolean expressions if possible.
By "simplified" we mean that a function has been denested
and is either an And (or an Or) whose arguments are either
symbols (x), negated symbols (Not(x)), or Or (or an And) whose
arguments are only symbols or negated symbols. For example,
And(x, Not(y), Or(w, Not(z))).
Basic.match is not robust enough (see issue 4835) so this is
a workaround that is valid for simplified boolean expressions
"""
# do some quick checks
if function1.__class__ != function2.__class__:
return None
if len(function1.args) != len(function2.args):
return None
if function1.is_Symbol:
return {function1: function2}
# get the fingerprint dictionaries
f1 = _finger(function1)
f2 = _finger(function2)
# more quick checks
if len(f1) != len(f2):
return False
# assemble the match dictionary if possible
matchdict = {}
for k in f1.keys():
if k not in f2:
return False
if len(f1[k]) != len(f2[k]):
return False
for i, x in enumerate(f1[k]):
matchdict[x] = f2[k][i]
return matchdict
a = simplify_logic(bool1)
b = simplify_logic(bool2)
m = match(a, b)
if m:
return a, m
return m is not None
@deprecated(
useinstead="bool_map", issue=7197, deprecated_since_version="0.7.4")
def bool_equal(bool1, bool2, info=False):
"""Return True if the two expressions represent the same logical
behaviour for some correspondence between the variables of each
(which may be different). For example, And(x, y) is logically
equivalent to And(a, b) for {x: a, y: b} (or vice versa). If the
mapping is desired, then set ``info`` to True and the simplified
form of the functions and mapping of variables will be returned.
"""
mapping = bool_map(bool1, bool2)
if not mapping:
return False
if info:
return mapping
return True
|
bsd-3-clause
| 8,806,993,994,031,341,000 | -3,940,520,083,924,236,300 | 27.012113 | 79 | 0.568707 | false |
dvhbru/dvhb-hybrid
|
dvhb_hybrid/mailer/django.py
|
1
|
2561
|
import base64
from concurrent.futures import ThreadPoolExecutor
from django.core import mail
from . import base
class DjangoConnection(base.BaseConnection):
def __init__(self, loop, conf, **kwargs):
super().__init__(**kwargs)
self.loop = loop
self.executor = ThreadPoolExecutor(max_workers=1)
self._conn = None
self.conf = conf
async def send_message(self, message):
if not self._conn:
raise ConnectionError()
kwargs = dict(
subject=message.subject,
body=message.body,
from_email=self.conf['from_email'],
to=message.mail_to,
connection=self._conn,
)
if message.html:
msg = mail.EmailMultiAlternatives(**kwargs)
await self.loop.run_in_executor(
self.executor, msg.attach_alternative,
message.html, "text/html")
else:
msg = mail.EmailMessage(**kwargs)
def attach_files(message, attachments):
if attachments:
for attachment in attachments:
path = attachment.get('path')
filename = attachment.get('filename')
mimetype = attachment.get('mimetype')
if path:
message.attach_file(path, mimetype=mimetype)
elif filename:
content = attachment.get('content')
if content:
message.attach(filename,
base64.decodebytes(
content.encode()),
mimetype)
await self.loop.run_in_executor(self.executor, attach_files,
msg, message.attachments)
return await self.loop.run_in_executor(self.executor, msg.send)
async def close(self):
if self._conn:
await self.loop.run_in_executor(
self.executor, self._conn.close)
async def open(self):
await self.close()
if not self._conn:
params = {
'backend': self.conf.get('django_email_backend'),
**self.conf.get('django_email_backend_params', {}),
}
self._conn = mail.get_connection(**params)
await self.loop.run_in_executor(self.executor, self._conn.open)
return self
class Mailer(base.BaseMailer):
connection_class = DjangoConnection
|
mit
| 6,970,640,901,862,332,000 | 4,892,234,302,113,888,000 | 34.569444 | 71 | 0.522843 | false |
felipenaselva/felipe.repository
|
script.module.resolveurl/lib/resolveurl/plugins/grifthost.py
|
3
|
1209
|
"""
grifthost resolveurl plugin
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from lib import helpers
from resolveurl.resolver import ResolveUrl, ResolverError
class GrifthostResolver(ResolveUrl):
name = "grifthost"
domains = ["grifthost.com"]
pattern = '(?://|\.)(grifthost\.com)/(?:embed-)?([0-9a-zA-Z/]+)'
def get_media_url(self, host, media_id):
return helpers.get_media_url(self.get_url(host, media_id), patterns=['''file:\s*['"](?P<url>[^'"]+)''']).replace(' ', '%20')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
|
gpl-2.0
| 7,654,480,134,709,085,000 | -6,522,141,710,900,055,000 | 36.78125 | 132 | 0.708023 | false |
aidan-/ansible-modules-extras
|
cloud/vmware/vmware_dvs_portgroup.py
|
31
|
6867
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_dvs_portgroup
short_description: Create or remove a Distributed vSwitch portgroup
description:
- Create or remove a Distributed vSwitch portgroup
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
portgroup_name:
description:
- The name of the portgroup that is to be created or deleted
required: True
switch_name:
description:
- The name of the distributed vSwitch the port group should be created on.
required: True
vlan_id:
description:
- The VLAN ID that should be configured with the portgroup
required: True
num_ports:
description:
- The number of ports the portgroup should contain
required: True
portgroup_type:
description:
- See VMware KB 1022312 regarding portgroup types
required: True
choices:
- 'earlyBinding'
- 'lateBinding'
- 'ephemeral'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create Management portgroup
local_action:
module: vmware_dvs_portgroup
hostname: vcenter_ip_or_hostname
username: vcenter_username
password: vcenter_password
portgroup_name: Management
switch_name: dvSwitch
vlan_id: 123
num_ports: 120
portgroup_type: earlyBinding
state: present
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareDvsPortgroup(object):
def __init__(self, module):
self.module = module
self.dvs_portgroup = None
self.switch_name = self.module.params['switch_name']
self.portgroup_name = self.module.params['portgroup_name']
self.vlan_id = self.module.params['vlan_id']
self.num_ports = self.module.params['num_ports']
self.portgroup_type = self.module.params['portgroup_type']
self.dv_switch = None
self.state = self.module.params['state']
self.content = connect_to_api(module)
def process_state(self):
try:
dvspg_states = {
'absent': {
'present': self.state_destroy_dvspg,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvspg,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvspg,
}
}
dvspg_states[self.state][self.check_dvspg_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def create_port_group(self):
config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
config.name = self.portgroup_name
config.numPorts = self.num_ports
# vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation
# but this is the correct managed object type.
config.defaultPortConfig = vim.VMwareDVSPortSetting()
# vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the
# pyvmomi documentation but this is the correct managed object type
config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec()
config.defaultPortConfig.vlan.inherited = False
config.defaultPortConfig.vlan.vlanId = self.vlan_id
config.type = self.portgroup_type
spec = [config]
task = self.dv_switch.AddDVPortgroup_Task(spec)
changed, result = wait_for_task(task)
return changed, result
def state_destroy_dvspg(self):
changed = True
result = None
if not self.module.check_mode:
task = self.dvs_portgroup.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=str(result))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_dvspg(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def state_create_dvspg(self):
changed = True
result = None
if not self.module.check_mode:
changed, result = self.create_port_group()
self.module.exit_json(changed=changed, result=str(result))
def check_dvspg_state(self):
self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
if self.dv_switch is None:
raise Exception("A distributed virtual switch with name %s does not exist" % self.switch_name)
self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.portgroup_name)
if self.dvs_portgroup is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
vlan_id=dict(required=True, type='int'),
num_ports=dict(required=True, type='int'),
portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_dvs_portgroup = VMwareDvsPortgroup(module)
vmware_dvs_portgroup.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
| 4,333,951,175,903,200,000 | 4,304,959,708,830,619,000 | 33.681818 | 126 | 0.633173 | false |
crossbario/autobahn-python
|
examples/twisted/wamp/pubsub/basic/backend.py
|
3
|
2100
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that publishes an event every second.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
counter = 0
while True:
print('backend publishing com.myapp.topic1', counter)
self.publish('com.myapp.topic1', counter)
counter += 1
yield sleep(1)
if __name__ == '__main__':
url = environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws")
realm = "crossbardemo"
runner = ApplicationRunner(url, realm)
runner.run(Component)
|
mit
| 6,118,172,626,218,522,000 | 1,389,524,820,717,582,000 | 37.181818 | 79 | 0.672381 | false |
jayceyxc/hue
|
desktop/core/ext-py/Django-1.6.10/django/db/models/aggregates.py
|
114
|
2601
|
"""
Classes to represent the definitions of aggregate functions.
"""
from django.db.models.constants import LOOKUP_SEP
def refs_aggregate(lookup_parts, aggregates):
"""
A little helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for match.
"""
for i in range(len(lookup_parts) + 1):
if LOOKUP_SEP.join(lookup_parts[0:i]) in aggregates:
return True
return False
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
|
apache-2.0
| 5,300,736,851,139,021,000 | -7,233,190,525,705,872,000 | 31.5125 | 82 | 0.653979 | false |
CDrummond/cantata
|
icons/yaru/render-bitmaps.py
|
2
|
6913
|
#!/usr/bin/python3
#
# This file has been take from Suru, and modified to just generate cantata icons
#
# ------------8<----------
# Legal Stuff:
#
# This file is part of the Suru Icon Theme and is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; version 3.
#
# This file is part of the Suru Icon Theme and is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <https://www.gnu.org/licenses/lgpl-3.0.txt>
#
#
# Thanks to the GNOME icon developers for the original version of this script
# ------------8<----------
import os
import sys
import xml.sax
import subprocess
import argparse
INKSCAPE = '/usr/bin/inkscape'
OPTIPNG = '/usr/bin/optipng'
# DPI multipliers to render at
DPIS = [1, 2]
inkscape_process = None
def main(SRC):
def optimize_png(png_file):
if os.path.exists(OPTIPNG):
process = subprocess.Popen([OPTIPNG, '-quiet', '-o7', png_file])
process.wait()
def wait_for_prompt(process, command=None):
if command is not None:
process.stdin.write((command+'\n').encode('utf-8'))
# This is kinda ugly ...
# Wait for just a '>', or '\n>' if some other char appearead first
output = process.stdout.read(1)
if output == b'>':
return
output += process.stdout.read(1)
while output != b'\n>':
output += process.stdout.read(1)
output = output[1:]
def start_inkscape():
process = subprocess.Popen([INKSCAPE, '--shell'], bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
wait_for_prompt(process)
return process
def inkscape_render_rect(icon_file, rect, dpi, output_file):
global inkscape_process
if inkscape_process is None:
inkscape_process = start_inkscape()
cmd = [icon_file,
'--export-dpi', str(dpi),
'-i', rect,
'-e', output_file]
wait_for_prompt(inkscape_process, ' '.join(cmd))
optimize_png(output_file)
class ContentHandler(xml.sax.ContentHandler):
ROOT = 0
SVG = 1
LAYER = 2
OTHER = 3
TEXT = 4
def __init__(self, path, force=False, filter=None):
self.stack = [self.ROOT]
self.inside = [self.ROOT]
self.path = path
self.rects = []
self.state = self.ROOT
self.chars = ""
self.force = force
self.filter = filter
def endDocument(self):
pass
def startElement(self, name, attrs):
if self.inside[-1] == self.ROOT:
if name == "svg":
self.stack.append(self.SVG)
self.inside.append(self.SVG)
return
elif self.inside[-1] == self.SVG:
if (name == "g" and ('inkscape:groupmode' in attrs) and ('inkscape:label' in attrs)
and attrs['inkscape:groupmode'] == 'layer' and attrs['inkscape:label'].startswith('Baseplate')):
self.stack.append(self.LAYER)
self.inside.append(self.LAYER)
self.context = None
self.icon_name = None
self.rects = []
return
elif self.inside[-1] == self.LAYER:
if name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'context':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text='context'
self.chars = ""
return
elif name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'icon-name':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text='icon-name'
self.chars = ""
return
elif name == "rect":
self.rects.append(attrs)
self.stack.append(self.OTHER)
def endElement(self, name):
stacked = self.stack.pop()
if self.inside[-1] == stacked:
self.inside.pop()
if stacked == self.TEXT and self.text is not None:
assert self.text in ['context', 'icon-name']
if self.text == 'context':
self.context = self.chars
elif self.text == 'icon-name':
self.icon_name = self.chars
self.text = None
elif stacked == self.LAYER:
assert self.icon_name
assert self.context
if self.filter is not None and not self.icon_name in self.filter:
return
print (self.context, self.icon_name)
for rect in self.rects:
for dpi_factor in DPIS:
width = rect['width']
height = rect['height']
id = rect['id']
dpi = 96 * dpi_factor
size_str = "%sx%s" % (width, height)
if dpi_factor != 1:
size_str += "@%sx" % dpi_factor
outfile = self.icon_name+'-'+size_str+'.png'
# Do a time based check!
if self.force or not os.path.exists(outfile):
inkscape_render_rect(self.path, id, dpi, outfile)
sys.stdout.write('.')
else:
stat_in = os.stat(self.path)
stat_out = os.stat(outfile)
if stat_in.st_mtime > stat_out.st_mtime:
inkscape_render_rect(self.path, id, dpi, outfile)
sys.stdout.write('.')
else:
sys.stdout.write('-')
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
def characters(self, chars):
self.chars += chars.strip()
print ('')
print ('Rendering from SVGs in', SRC)
print ('')
for file in os.listdir(SRC):
if file[-4:] == '.svg':
file = os.path.join(SRC, file)
handler = ContentHandler(file)
xml.sax.parse(open(file), handler)
print ('')
main('.')
|
gpl-3.0
| 8,275,368,725,027,607,000 | -7,219,756,153,367,619,000 | 35.005208 | 115 | 0.504701 | false |
klmitch/nova
|
nova/policies/extensions.py
|
3
|
1318
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:extensions'
extensions_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ANY,
description="List available extensions and show information "
"for an extension by alias",
operations=[
{
'method': 'GET',
'path': '/extensions'
},
{
'method': 'GET',
'path': '/extensions/{alias}'
}
],
scope_types=['system', 'project']),
]
def list_rules():
return extensions_policies
|
apache-2.0
| 2,849,385,513,655,705,000 | -3,298,709,689,084,098,600 | 28.288889 | 78 | 0.629742 | false |
andrejb/cloudant_bigcouch
|
couchjs/scons/scons-local-2.0.1/SCons/Options/__init__.py
|
61
|
2667
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/__init__.py 5134 2010/08/16 23:02:40 bdeegan"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
from BoolOption import BoolOption # okay
from EnumOption import EnumOption # okay
from ListOption import ListOption # naja
from PackageOption import PackageOption # naja
from PathOption import PathOption # okay
warned = False
class Options(SCons.Variables.Variables):
def __init__(self, *args, **kw):
global warned
if not warned:
msg = "The Options class is deprecated; use the Variables class instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
SCons.Variables.Variables.__init__(self, *args, **kw)
def AddOptions(self, *args, **kw):
return SCons.Variables.Variables.AddVariables(self, *args, **kw)
def UnknownOptions(self, *args, **kw):
return SCons.Variables.Variables.UnknownVariables(self, *args, **kw)
def FormatOptionHelpText(self, *args, **kw):
return SCons.Variables.Variables.FormatVariableHelpText(self, *args,
**kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
apache-2.0
| 3,756,459,453,544,298,000 | -3,783,316,097,527,614,000 | 38.80597 | 95 | 0.730034 | false |
rtruxal/metagoofil
|
pdfminer/pdffont.py
|
32
|
26471
|
#!/usr/bin/env python2
import sys
import struct
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from cmapdb import CMapDB, CMapParser, FileUnicodeMap, CMap
from encodingdb import EncodingDB, name2unicode
from psparser import PSStackParser
from psparser import PSSyntaxError, PSEOF
from psparser import LIT, KWD, STRICT
from psparser import PSLiteral, literal_name
from pdftypes import PDFException, resolve1
from pdftypes import int_value, float_value, num_value
from pdftypes import str_value, list_value, dict_value, stream_value
from fontmetrics import FONT_METRICS
from utils import apply_matrix_norm, nunpack, choplist
def get_widths(seq):
widths = {}
r = []
for v in seq:
if isinstance(v, list):
if r:
char1 = r[-1]
for (i,w) in enumerate(v):
widths[char1+i] = w
r = []
elif isinstance(v, int):
r.append(v)
if len(r) == 3:
(char1,char2,w) = r
for i in xrange(char1, char2+1):
widths[i] = w
r = []
return widths
#assert get_widths([1]) == {}
#assert get_widths([1,2,3]) == {1:3, 2:3}
#assert get_widths([1,[2,3],6,[7,8]]) == {1:2,2:3, 6:7,7:8}
def get_widths2(seq):
widths = {}
r = []
for v in seq:
if isinstance(v, list):
if r:
char1 = r[-1]
for (i,(w,vx,vy)) in enumerate(choplist(3,v)):
widths[char1+i] = (w,(vx,vy))
r = []
elif isinstance(v, int):
r.append(v)
if len(r) == 5:
(char1,char2,w,vx,vy) = r
for i in xrange(char1, char2+1):
widths[i] = (w,(vx,vy))
r = []
return widths
#assert get_widths2([1]) == {}
#assert get_widths2([1,2,3,4,5]) == {1:(3,(4,5)), 2:(3,(4,5))}
#assert get_widths2([1,[2,3,4,5],6,[7,8,9]]) == {1:(2,(3,4)), 6:(7,(8,9))}
## FontMetricsDB
##
class FontMetricsDB(object):
@classmethod
def get_metrics(klass, fontname):
return FONT_METRICS[fontname]
## Type1FontHeaderParser
##
class Type1FontHeaderParser(PSStackParser):
KEYWORD_BEGIN = KWD('begin')
KEYWORD_END = KWD('end')
KEYWORD_DEF = KWD('def')
KEYWORD_PUT = KWD('put')
KEYWORD_DICT = KWD('dict')
KEYWORD_ARRAY = KWD('array')
KEYWORD_READONLY = KWD('readonly')
KEYWORD_FOR = KWD('for')
KEYWORD_FOR = KWD('for')
def __init__(self, data):
PSStackParser.__init__(self, data)
self._cid2unicode = {}
return
def get_encoding(self):
while 1:
try:
(cid,name) = self.nextobject()
except PSEOF:
break
try:
self._cid2unicode[cid] = name2unicode(name)
except KeyError:
pass
return self._cid2unicode
def do_keyword(self, pos, token):
if token is self.KEYWORD_PUT:
((_,key),(_,value)) = self.pop(2)
if (isinstance(key, int) and
isinstance(value, PSLiteral)):
self.add_results((key, literal_name(value)))
return
## CFFFont
## (Format specified in Adobe Technical Note: #5176
## "The Compact Font Format Specification")
##
NIBBLES = ('0','1','2','3','4','5','6','7','8','9','.','e','e-',None,'-')
def getdict(data):
d = {}
fp = StringIO(data)
stack = []
while 1:
c = fp.read(1)
if not c: break
b0 = ord(c)
if b0 <= 21:
d[b0] = stack
stack = []
continue
if b0 == 30:
s = ''
loop = True
while loop:
b = ord(fp.read(1))
for n in (b >> 4, b & 15):
if n == 15:
loop = False
else:
s += NIBBLES[n]
value = float(s)
elif 32 <= b0 and b0 <= 246:
value = b0-139
else:
b1 = ord(fp.read(1))
if 247 <= b0 and b0 <= 250:
value = ((b0-247)<<8)+b1+108
elif 251 <= b0 and b0 <= 254:
value = -((b0-251)<<8)-b1-108
else:
b2 = ord(fp.read(1))
if 128 <= b1: b1 -= 256
if b0 == 28:
value = b1<<8 | b2
else:
value = b1<<24 | b2<<16 | struct.unpack('>H', fp.read(2))[0]
stack.append(value)
return d
class CFFFont(object):
STANDARD_STRINGS = (
'.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft',
'parenright', 'asterisk', 'plus', 'comma', 'hyphen', 'period',
'slash', 'zero', 'one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal',
'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G',
'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a',
'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'braceleft', 'bar', 'braceright', 'asciitilde', 'exclamdown',
'cent', 'sterling', 'fraction', 'yen', 'florin', 'section',
'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash',
'dagger', 'daggerdbl', 'periodcentered', 'paragraph', 'bullet',
'quotesinglbase', 'quotedblbase', 'quotedblright',
'guillemotright', 'ellipsis', 'perthousand', 'questiondown',
'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut',
'ogonek', 'caron', 'emdash', 'AE', 'ordfeminine', 'Lslash',
'Oslash', 'OE', 'ordmasculine', 'ae', 'dotlessi', 'lslash',
'oslash', 'oe', 'germandbls', 'onesuperior', 'logicalnot', 'mu',
'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
'onequarter', 'divide', 'brokenbar', 'degree', 'thorn',
'threequarters', 'twosuperior', 'registered', 'minus', 'eth',
'multiply', 'threesuperior', 'copyright', 'Aacute',
'Acircumflex', 'Adieresis', 'Agrave', 'Aring', 'Atilde',
'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde',
'Oacute', 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde',
'Scaron', 'Uacute', 'Ucircumflex', 'Udieresis', 'Ugrave',
'Yacute', 'Ydieresis', 'Zcaron', 'aacute', 'acircumflex',
'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', 'eacute',
'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex',
'odieresis', 'ograve', 'otilde', 'scaron', 'uacute',
'ucircumflex', 'udieresis', 'ugrave', 'yacute', 'ydieresis',
'zcaron', 'exclamsmall', 'Hungarumlautsmall', 'dollaroldstyle',
'dollarsuperior', 'ampersandsmall', 'Acutesmall',
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader',
'onedotenleader', 'zerooldstyle', 'oneoldstyle', 'twooldstyle',
'threeoldstyle', 'fouroldstyle', 'fiveoldstyle', 'sixoldstyle',
'sevenoldstyle', 'eightoldstyle', 'nineoldstyle',
'commasuperior', 'threequartersemdash', 'periodsuperior',
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior',
'dsuperior', 'esuperior', 'isuperior', 'lsuperior', 'msuperior',
'nsuperior', 'osuperior', 'rsuperior', 'ssuperior', 'tsuperior',
'ff', 'ffi', 'ffl', 'parenleftinferior', 'parenrightinferior',
'Circumflexsmall', 'hyphensuperior', 'Gravesmall', 'Asmall',
'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall',
'Hsmall', 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall',
'Nsmall', 'Osmall', 'Psmall', 'Qsmall', 'Rsmall', 'Ssmall',
'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', 'Ysmall',
'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall',
'Zcaronsmall', 'Dieresissmall', 'Brevesmall', 'Caronsmall',
'Dotaccentsmall', 'Macronsmall', 'figuredash', 'hypheninferior',
'Ogoneksmall', 'Ringsmall', 'Cedillasmall', 'questiondownsmall',
'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
'onethird', 'twothirds', 'zerosuperior', 'foursuperior',
'fivesuperior', 'sixsuperior', 'sevensuperior', 'eightsuperior',
'ninesuperior', 'zeroinferior', 'oneinferior', 'twoinferior',
'threeinferior', 'fourinferior', 'fiveinferior', 'sixinferior',
'seveninferior', 'eightinferior', 'nineinferior',
'centinferior', 'dollarinferior', 'periodinferior',
'commainferior', 'Agravesmall', 'Aacutesmall',
'Acircumflexsmall', 'Atildesmall', 'Adieresissmall',
'Aringsmall', 'AEsmall', 'Ccedillasmall', 'Egravesmall',
'Eacutesmall', 'Ecircumflexsmall', 'Edieresissmall',
'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall',
'Oacutesmall', 'Ocircumflexsmall', 'Otildesmall',
'Odieresissmall', 'OEsmall', 'Oslashsmall', 'Ugravesmall',
'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000',
'001.001', '001.002', '001.003', 'Black', 'Bold', 'Book',
'Light', 'Medium', 'Regular', 'Roman', 'Semibold',
)
class INDEX(object):
def __init__(self, fp):
self.fp = fp
self.offsets = []
(count, offsize) = struct.unpack('>HB', self.fp.read(3))
for i in xrange(count+1):
self.offsets.append(nunpack(self.fp.read(offsize)))
self.base = self.fp.tell()-1
self.fp.seek(self.base+self.offsets[-1])
return
def __repr__(self):
return '<INDEX: size=%d>' % len(self)
def __len__(self):
return len(self.offsets)-1
def __getitem__(self, i):
self.fp.seek(self.base+self.offsets[i])
return self.fp.read(self.offsets[i+1]-self.offsets[i])
def __iter__(self):
return iter( self[i] for i in xrange(len(self)) )
def __init__(self, name, fp):
self.name = name
self.fp = fp
# Header
(_major,_minor,hdrsize,offsize) = struct.unpack('BBBB', self.fp.read(4))
self.fp.read(hdrsize-4)
# Name INDEX
self.name_index = self.INDEX(self.fp)
# Top DICT INDEX
self.dict_index = self.INDEX(self.fp)
# String INDEX
self.string_index = self.INDEX(self.fp)
# Global Subr INDEX
self.subr_index = self.INDEX(self.fp)
# Top DICT DATA
self.top_dict = getdict(self.dict_index[0])
(charset_pos,) = self.top_dict.get(15, [0])
(encoding_pos,) = self.top_dict.get(16, [0])
(charstring_pos,) = self.top_dict.get(17, [0])
# CharStrings
self.fp.seek(charstring_pos)
self.charstring = self.INDEX(self.fp)
self.nglyphs = len(self.charstring)
# Encodings
self.code2gid = {}
self.gid2code = {}
self.fp.seek(encoding_pos)
format = self.fp.read(1)
if format == '\x00':
# Format 0
(n,) = struct.unpack('B', self.fp.read(1))
for (code,gid) in enumerate(struct.unpack('B'*n, self.fp.read(n))):
self.code2gid[code] = gid
self.gid2code[gid] = code
elif format == '\x01':
# Format 1
(n,) = struct.unpack('B', self.fp.read(1))
code = 0
for i in xrange(n):
(first,nleft) = struct.unpack('BB', self.fp.read(2))
for gid in xrange(first,first+nleft+1):
self.code2gid[code] = gid
self.gid2code[gid] = code
code += 1
else:
raise ValueError('unsupported encoding format: %r' % format)
# Charsets
self.name2gid = {}
self.gid2name = {}
self.fp.seek(charset_pos)
format = self.fp.read(1)
if format == '\x00':
# Format 0
n = self.nglyphs-1
for (gid,sid) in enumerate(struct.unpack('>'+'H'*n, self.fp.read(2*n))):
gid += 1
name = self.getstr(sid)
self.name2gid[name] = gid
self.gid2name[gid] = name
elif format == '\x01':
# Format 1
(n,) = struct.unpack('B', self.fp.read(1))
sid = 0
for i in xrange(n):
(first,nleft) = struct.unpack('BB', self.fp.read(2))
for gid in xrange(first,first+nleft+1):
name = self.getstr(sid)
self.name2gid[name] = gid
self.gid2name[gid] = name
sid += 1
elif format == '\x02':
# Format 2
assert 0
else:
raise ValueError('unsupported charset format: %r' % format)
#print self.code2gid
#print self.name2gid
#assert 0
return
def getstr(self, sid):
if sid < len(self.STANDARD_STRINGS):
return self.STANDARD_STRINGS[sid]
return self.string_index[sid-len(self.STANDARD_STRINGS)]
## TrueTypeFont
##
class TrueTypeFont(object):
class CMapNotFound(Exception): pass
def __init__(self, name, fp):
self.name = name
self.fp = fp
self.tables = {}
self.fonttype = fp.read(4)
(ntables, _1, _2, _3) = struct.unpack('>HHHH', fp.read(8))
for _ in xrange(ntables):
(name, tsum, offset, length) = struct.unpack('>4sLLL', fp.read(16))
self.tables[name] = (offset, length)
return
def create_unicode_map(self):
if 'cmap' not in self.tables:
raise TrueTypeFont.CMapNotFound
(base_offset, length) = self.tables['cmap']
fp = self.fp
fp.seek(base_offset)
(version, nsubtables) = struct.unpack('>HH', fp.read(4))
subtables = []
for i in xrange(nsubtables):
subtables.append(struct.unpack('>HHL', fp.read(8)))
char2gid = {}
# Only supports subtable type 0, 2 and 4.
for (_1, _2, st_offset) in subtables:
fp.seek(base_offset+st_offset)
(fmttype, fmtlen, fmtlang) = struct.unpack('>HHH', fp.read(6))
if fmttype == 0:
char2gid.update(enumerate(struct.unpack('>256B', fp.read(256))))
elif fmttype == 2:
subheaderkeys = struct.unpack('>256H', fp.read(512))
firstbytes = [0]*8192
for (i,k) in enumerate(subheaderkeys):
firstbytes[k/8] = i
nhdrs = max(subheaderkeys)/8 + 1
hdrs = []
for i in xrange(nhdrs):
(firstcode,entcount,delta,offset) = struct.unpack('>HHhH', fp.read(8))
hdrs.append((i,firstcode,entcount,delta,fp.tell()-2+offset))
for (i,firstcode,entcount,delta,pos) in hdrs:
if not entcount: continue
first = firstcode + (firstbytes[i] << 8)
fp.seek(pos)
for c in xrange(entcount):
gid = struct.unpack('>H', fp.read(2))
if gid:
gid += delta
char2gid[first+c] = gid
elif fmttype == 4:
(segcount, _1, _2, _3) = struct.unpack('>HHHH', fp.read(8))
segcount /= 2
ecs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
fp.read(2)
scs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
idds = struct.unpack('>%dh' % segcount, fp.read(2*segcount))
pos = fp.tell()
idrs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
for (ec,sc,idd,idr) in zip(ecs, scs, idds, idrs):
if idr:
fp.seek(pos+idr)
for c in xrange(sc, ec+1):
char2gid[c] = (struct.unpack('>H', fp.read(2))[0] + idd) & 0xffff
else:
for c in xrange(sc, ec+1):
char2gid[c] = (c + idd) & 0xffff
else:
assert 0
# create unicode map
unicode_map = FileUnicodeMap()
for (char,gid) in char2gid.iteritems():
unicode_map.add_cid2unichr(gid, char)
return unicode_map
## Fonts
##
class PDFFontError(PDFException): pass
class PDFUnicodeNotDefined(PDFFontError): pass
LITERAL_STANDARD_ENCODING = LIT('StandardEncoding')
LITERAL_TYPE1C = LIT('Type1C')
# PDFFont
class PDFFont(object):
def __init__(self, descriptor, widths, default_width=None):
self.descriptor = descriptor
self.widths = widths
self.fontname = resolve1(descriptor.get('FontName', 'unknown'))
if isinstance(self.fontname, PSLiteral):
self.fontname = literal_name(self.fontname)
self.flags = int_value(descriptor.get('Flags', 0))
self.ascent = num_value(descriptor.get('Ascent', 0))
self.descent = num_value(descriptor.get('Descent', 0))
self.italic_angle = num_value(descriptor.get('ItalicAngle', 0))
self.default_width = default_width or num_value(descriptor.get('MissingWidth', 0))
self.leading = num_value(descriptor.get('Leading', 0))
self.bbox = list_value(descriptor.get('FontBBox', (0,0,0,0)))
self.hscale = self.vscale = .001
return
def __repr__(self):
return '<PDFFont>'
def is_vertical(self):
return False
def is_multibyte(self):
return False
def decode(self, bytes):
return map(ord, bytes)
def get_ascent(self):
return self.ascent * self.vscale
def get_descent(self):
return self.descent * self.vscale
def get_width(self):
w = self.bbox[2]-self.bbox[0]
if w == 0:
w = -self.default_width
return w * self.hscale
def get_height(self):
h = self.bbox[3]-self.bbox[1]
if h == 0:
h = self.ascent - self.descent
return h * self.vscale
def char_width(self, cid):
return self.widths.get(cid, self.default_width) * self.hscale
def char_disp(self, cid):
return 0
def string_width(self, s):
return sum( self.char_width(cid) for cid in self.decode(s) )
# PDFSimpleFont
class PDFSimpleFont(PDFFont):
def __init__(self, descriptor, widths, spec):
# Font encoding is specified either by a name of
# built-in encoding or a dictionary that describes
# the differences.
if 'Encoding' in spec:
encoding = resolve1(spec['Encoding'])
else:
encoding = LITERAL_STANDARD_ENCODING
if isinstance(encoding, dict):
name = literal_name(encoding.get('BaseEncoding', LITERAL_STANDARD_ENCODING))
diff = list_value(encoding.get('Differences', None))
self.cid2unicode = EncodingDB.get_encoding(name, diff)
else:
self.cid2unicode = EncodingDB.get_encoding(literal_name(encoding))
self.unicode_map = None
if 'ToUnicode' in spec:
strm = stream_value(spec['ToUnicode'])
self.unicode_map = FileUnicodeMap()
CMapParser(self.unicode_map, StringIO(strm.get_data())).run()
PDFFont.__init__(self, descriptor, widths)
return
def to_unichr(self, cid):
if self.unicode_map:
try:
return self.unicode_map.get_unichr(cid)
except KeyError:
pass
try:
return self.cid2unicode[cid]
except KeyError:
raise PDFUnicodeNotDefined(None, cid)
# PDFType1Font
class PDFType1Font(PDFSimpleFont):
def __init__(self, rsrcmgr, spec):
try:
self.basefont = literal_name(spec['BaseFont'])
except KeyError:
if STRICT:
raise PDFFontError('BaseFont is missing')
self.basefont = 'unknown'
try:
(descriptor, widths) = FontMetricsDB.get_metrics(self.basefont)
except KeyError:
descriptor = dict_value(spec.get('FontDescriptor', {}))
firstchar = int_value(spec.get('FirstChar', 0))
lastchar = int_value(spec.get('LastChar', 255))
widths = list_value(spec.get('Widths', [0]*256))
widths = dict( (i+firstchar,w) for (i,w) in enumerate(widths) )
PDFSimpleFont.__init__(self, descriptor, widths, spec)
if 'Encoding' not in spec and 'FontFile' in descriptor:
# try to recover the missing encoding info from the font file.
self.fontfile = stream_value(descriptor.get('FontFile'))
length1 = int_value(self.fontfile['Length1'])
data = self.fontfile.get_data()[:length1]
parser = Type1FontHeaderParser(StringIO(data))
self.cid2unicode = parser.get_encoding()
return
def __repr__(self):
return '<PDFType1Font: basefont=%r>' % self.basefont
# PDFTrueTypeFont
class PDFTrueTypeFont(PDFType1Font):
def __repr__(self):
return '<PDFTrueTypeFont: basefont=%r>' % self.basefont
# PDFType3Font
class PDFType3Font(PDFSimpleFont):
def __init__(self, rsrcmgr, spec):
firstchar = int_value(spec.get('FirstChar', 0))
lastchar = int_value(spec.get('LastChar', 0))
widths = list_value(spec.get('Widths', [0]*256))
widths = dict( (i+firstchar,w) for (i,w) in enumerate(widths))
if 'FontDescriptor' in spec:
descriptor = dict_value(spec['FontDescriptor'])
else:
descriptor = {'Ascent':0, 'Descent':0,
'FontBBox':spec['FontBBox']}
PDFSimpleFont.__init__(self, descriptor, widths, spec)
self.matrix = tuple(list_value(spec.get('FontMatrix')))
(_,self.descent,_,self.ascent) = self.bbox
(self.hscale,self.vscale) = apply_matrix_norm(self.matrix, (1,1))
return
def __repr__(self):
return '<PDFType3Font>'
# PDFCIDFont
class PDFCIDFont(PDFFont):
def __init__(self, rsrcmgr, spec):
try:
self.basefont = literal_name(spec['BaseFont'])
except KeyError:
if STRICT:
raise PDFFontError('BaseFont is missing')
self.basefont = 'unknown'
self.cidsysteminfo = dict_value(spec.get('CIDSystemInfo', {}))
self.cidcoding = '%s-%s' % (self.cidsysteminfo.get('Registry', 'unknown'),
self.cidsysteminfo.get('Ordering', 'unknown'))
try:
name = literal_name(spec['Encoding'])
except KeyError:
if STRICT:
raise PDFFontError('Encoding is unspecified')
name = 'unknown'
try:
self.cmap = CMapDB.get_cmap(name)
except CMapDB.CMapNotFound, e:
if STRICT:
raise PDFFontError(e)
self.cmap = CMap()
try:
descriptor = dict_value(spec['FontDescriptor'])
except KeyError:
if STRICT:
raise PDFFontError('FontDescriptor is missing')
descriptor = {}
ttf = None
if 'FontFile2' in descriptor:
self.fontfile = stream_value(descriptor.get('FontFile2'))
ttf = TrueTypeFont(self.basefont,
StringIO(self.fontfile.get_data()))
self.unicode_map = None
if 'ToUnicode' in spec:
strm = stream_value(spec['ToUnicode'])
self.unicode_map = FileUnicodeMap()
CMapParser(self.unicode_map, StringIO(strm.get_data())).run()
elif self.cidcoding == 'Adobe-Identity':
if ttf:
try:
self.unicode_map = ttf.create_unicode_map()
except TrueTypeFont.CMapNotFound:
pass
else:
try:
self.unicode_map = CMapDB.get_unicode_map(self.cidcoding, self.cmap.is_vertical())
except CMapDB.CMapNotFound, e:
pass
self.vertical = self.cmap.is_vertical()
if self.vertical:
# writing mode: vertical
widths = get_widths2(list_value(spec.get('W2', [])))
self.disps = dict( (cid,(vx,vy)) for (cid,(_,(vx,vy))) in widths.iteritems() )
(vy,w) = spec.get('DW2', [880, -1000])
self.default_disp = (None,vy)
widths = dict( (cid,w) for (cid,(w,_)) in widths.iteritems() )
default_width = w
else:
# writing mode: horizontal
self.disps = {}
self.default_disp = 0
widths = get_widths(list_value(spec.get('W', [])))
default_width = spec.get('DW', 1000)
PDFFont.__init__(self, descriptor, widths, default_width=default_width)
return
def __repr__(self):
return '<PDFCIDFont: basefont=%r, cidcoding=%r>' % (self.basefont, self.cidcoding)
def is_vertical(self):
return self.vertical
def is_multibyte(self):
return True
def decode(self, bytes):
return self.cmap.decode(bytes)
def char_disp(self, cid):
"Returns an integer for horizontal fonts, a tuple for vertical fonts."
return self.disps.get(cid, self.default_disp)
def to_unichr(self, cid):
try:
if not self.unicode_map: raise KeyError(cid)
return self.unicode_map.get_unichr(cid)
except KeyError:
raise PDFUnicodeNotDefined(self.cidcoding, cid)
# main
def main(argv):
for fname in argv[1:]:
fp = file(fname, 'rb')
#font = TrueTypeFont(fname, fp)
font = CFFFont(fname, fp)
print font
fp.close()
return
if __name__ == '__main__': sys.exit(main(sys.argv))
|
gpl-2.0
| -5,386,326,879,320,784,000 | 5,910,715,622,449,237,000 | 36.707977 | 98 | 0.540478 | false |
xkfz007/binutils-gdb
|
gdb/copyright.py
|
19
|
11540
|
#! /usr/bin/env python
# Copyright (C) 2011-2015 Free Software Foundation, Inc.
#
# This file is part of GDB.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""copyright.py
This script updates the list of years in the copyright notices in
most files maintained by the GDB project.
Usage: cd src/gdb && python copyright.py
Always review the output of this script before committing it!
A useful command to review the output is:
% filterdiff -x \*.c -x \*.cc -x \*.h -x \*.exp updates.diff
This removes the bulk of the changes which are most likely to be correct.
"""
import datetime
import os
import os.path
import subprocess
def get_update_list():
"""Return the list of files to update.
Assumes that the current working directory when called is the root
of the GDB source tree (NOT the gdb/ subdirectory!). The names of
the files are relative to that root directory.
"""
result = []
for gdb_dir in ('gdb', 'sim', 'include/gdb'):
for root, dirs, files in os.walk(gdb_dir, topdown=True):
for dirname in dirs:
reldirname = "%s/%s" % (root, dirname)
if (dirname in EXCLUDE_ALL_LIST
or reldirname in EXCLUDE_LIST
or reldirname in NOT_FSF_LIST
or reldirname in BY_HAND):
# Prune this directory from our search list.
dirs.remove(dirname)
for filename in files:
relpath = "%s/%s" % (root, filename)
if (filename in EXCLUDE_ALL_LIST
or relpath in EXCLUDE_LIST
or relpath in NOT_FSF_LIST
or relpath in BY_HAND):
# Ignore this file.
pass
else:
result.append(relpath)
return result
def update_files(update_list):
"""Update the copyright header of the files in the given list.
We use gnulib's update-copyright script for that.
"""
# We want to use year intervals in the copyright notices, and
# all years should be collapsed to one single year interval,
# even if there are "holes" in the list of years found in the
# original copyright notice (OK'ed by the FSF, case [gnu.org #719834]).
os.environ['UPDATE_COPYRIGHT_USE_INTERVALS'] = '2'
# Perform the update, and save the output in a string.
update_cmd = ['bash', 'gdb/gnulib/import/extra/update-copyright']
update_cmd += update_list
p = subprocess.Popen(update_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
update_out = p.communicate()[0]
# Process the output. Typically, a lot of files do not have
# a copyright notice :-(. The update-copyright script prints
# a well defined warning when it did not find the copyright notice.
# For each of those, do a sanity check and see if they may in fact
# have one. For the files that are found not to have one, we filter
# the line out from the output, since there is nothing more to do,
# short of looking at each file and seeing which notice is appropriate.
# Too much work! (~4,000 files listed as of 2012-01-03).
update_out = update_out.splitlines()
warning_string = ': warning: copyright statement not found'
warning_len = len(warning_string)
for line in update_out:
if line.endswith('\n'):
line = line[:-1]
if line.endswith(warning_string):
filename = line[:-warning_len]
if may_have_copyright_notice(filename):
print line
else:
# Unrecognized file format. !?!
print "*** " + line
def may_have_copyright_notice(filename):
"""Check that the given file does not seem to have a copyright notice.
The filename is relative to the root directory.
This function assumes that the current working directory is that root
directory.
The algorigthm is fairly crude, meaning that it might return
some false positives. I do not think it will return any false
negatives... We might improve this function to handle more
complex cases later...
"""
# For now, it may have a copyright notice if we find the word
# "Copyright" at the (reasonable) start of the given file, say
# 50 lines...
MAX_LINES = 50
fd = open(filename)
lineno = 1
for line in fd:
if 'Copyright' in line:
return True
lineno += 1
if lineno > 50:
return False
return False
def main ():
"""The main subprogram."""
if not os.path.isfile("gnulib/import/extra/update-copyright"):
print "Error: This script must be called from the gdb directory."
root_dir = os.path.dirname(os.getcwd())
os.chdir(root_dir)
update_list = get_update_list()
update_files (update_list)
# Remind the user that some files need to be updated by HAND...
if BY_HAND:
print
print "\033[31mREMINDER: The following files must be updated by hand." \
"\033[0m"
for filename in BY_HAND + MULTIPLE_COPYRIGHT_HEADERS:
print " ", filename
############################################################################
#
# Some constants, placed at the end because they take up a lot of room.
# The actual value of these constants is not significant to the understanding
# of the script.
#
############################################################################
# Files which should not be modified, either because they are
# generated, non-FSF, or otherwise special (e.g. license text,
# or test cases which must be sensitive to line numbering).
#
# Filenames are relative to the root directory.
EXCLUDE_LIST = (
'gdb/nat/glibc_thread_db.h',
'gdb/CONTRIBUTE',
'gdb/gnulib/import'
)
# Files which should not be modified, either because they are
# generated, non-FSF, or otherwise special (e.g. license text,
# or test cases which must be sensitive to line numbering).
#
# Matches any file or directory name anywhere. Use with caution.
# This is mostly for files that can be found in multiple directories.
# Eg: We want all files named COPYING to be left untouched.
EXCLUDE_ALL_LIST = (
"COPYING", "COPYING.LIB", "CVS", "configure", "copying.c",
"fdl.texi", "gpl.texi", "aclocal.m4",
)
# The list of files to update by hand.
BY_HAND = (
# These files are sensitive to line numbering.
"gdb/testsuite/gdb.base/step-line.inp",
"gdb/testsuite/gdb.base/step-line.c",
)
# Files containing multiple copyright headers. This script is only
# fixing the first one it finds, so we need to finish the update
# by hand.
MULTIPLE_COPYRIGHT_HEADERS = (
"gdb/doc/gdb.texinfo",
"gdb/doc/refcard.tex",
"gdb/gdbarch.sh",
)
# The list of file which have a copyright, but not head by the FSF.
# Filenames are relative to the root directory.
NOT_FSF_LIST = (
"gdb/exc_request.defs",
"gdb/gdbtk",
"gdb/testsuite/gdb.gdbtk/",
"sim/arm/armemu.h", "sim/arm/armos.c", "sim/arm/gdbhost.c",
"sim/arm/dbg_hif.h", "sim/arm/dbg_conf.h", "sim/arm/communicate.h",
"sim/arm/armos.h", "sim/arm/armcopro.c", "sim/arm/armemu.c",
"sim/arm/kid.c", "sim/arm/thumbemu.c", "sim/arm/armdefs.h",
"sim/arm/armopts.h", "sim/arm/dbg_cp.h", "sim/arm/dbg_rdi.h",
"sim/arm/parent.c", "sim/arm/armsupp.c", "sim/arm/armrdi.c",
"sim/arm/bag.c", "sim/arm/armvirt.c", "sim/arm/main.c", "sim/arm/bag.h",
"sim/arm/communicate.c", "sim/arm/gdbhost.h", "sim/arm/armfpe.h",
"sim/arm/arminit.c",
"sim/common/cgen-fpu.c", "sim/common/cgen-fpu.h",
"sim/common/cgen-accfp.c",
"sim/erc32/sis.h", "sim/erc32/erc32.c", "sim/erc32/func.c",
"sim/erc32/float.c", "sim/erc32/interf.c", "sim/erc32/sis.c",
"sim/erc32/exec.c",
"sim/mips/m16run.c", "sim/mips/sim-main.c",
"sim/moxie/moxie-gdb.dts",
# Not a single file in sim/ppc/ appears to be copyright FSF :-(.
"sim/ppc/filter.h", "sim/ppc/gen-support.h", "sim/ppc/ld-insn.h",
"sim/ppc/hw_sem.c", "sim/ppc/hw_disk.c", "sim/ppc/idecode_branch.h",
"sim/ppc/sim-endian.h", "sim/ppc/table.c", "sim/ppc/hw_core.c",
"sim/ppc/gen-support.c", "sim/ppc/gen-semantics.h", "sim/ppc/cpu.h",
"sim/ppc/sim_callbacks.h", "sim/ppc/RUN", "sim/ppc/Makefile.in",
"sim/ppc/emul_chirp.c", "sim/ppc/hw_nvram.c", "sim/ppc/dc-test.01",
"sim/ppc/hw_phb.c", "sim/ppc/hw_eeprom.c", "sim/ppc/bits.h",
"sim/ppc/hw_vm.c", "sim/ppc/cap.h", "sim/ppc/os_emul.h",
"sim/ppc/options.h", "sim/ppc/gen-idecode.c", "sim/ppc/filter.c",
"sim/ppc/corefile-n.h", "sim/ppc/std-config.h", "sim/ppc/ld-decode.h",
"sim/ppc/filter_filename.h", "sim/ppc/hw_shm.c",
"sim/ppc/pk_disklabel.c", "sim/ppc/dc-simple", "sim/ppc/misc.h",
"sim/ppc/device_table.h", "sim/ppc/ld-insn.c", "sim/ppc/inline.c",
"sim/ppc/emul_bugapi.h", "sim/ppc/hw_cpu.h", "sim/ppc/debug.h",
"sim/ppc/hw_ide.c", "sim/ppc/debug.c", "sim/ppc/gen-itable.h",
"sim/ppc/interrupts.c", "sim/ppc/hw_glue.c", "sim/ppc/emul_unix.c",
"sim/ppc/sim_calls.c", "sim/ppc/dc-complex", "sim/ppc/ld-cache.c",
"sim/ppc/registers.h", "sim/ppc/dc-test.02", "sim/ppc/options.c",
"sim/ppc/igen.h", "sim/ppc/registers.c", "sim/ppc/device.h",
"sim/ppc/emul_chirp.h", "sim/ppc/hw_register.c", "sim/ppc/hw_init.c",
"sim/ppc/sim-endian-n.h", "sim/ppc/filter_filename.c",
"sim/ppc/bits.c", "sim/ppc/idecode_fields.h", "sim/ppc/hw_memory.c",
"sim/ppc/misc.c", "sim/ppc/double.c", "sim/ppc/psim.h",
"sim/ppc/hw_trace.c", "sim/ppc/emul_netbsd.h", "sim/ppc/psim.c",
"sim/ppc/ppc-instructions", "sim/ppc/tree.h", "sim/ppc/README",
"sim/ppc/gen-icache.h", "sim/ppc/gen-model.h", "sim/ppc/ld-cache.h",
"sim/ppc/mon.c", "sim/ppc/corefile.h", "sim/ppc/vm.c",
"sim/ppc/INSTALL", "sim/ppc/gen-model.c", "sim/ppc/hw_cpu.c",
"sim/ppc/corefile.c", "sim/ppc/hw_opic.c", "sim/ppc/gen-icache.c",
"sim/ppc/events.h", "sim/ppc/os_emul.c", "sim/ppc/emul_generic.c",
"sim/ppc/main.c", "sim/ppc/hw_com.c", "sim/ppc/gen-semantics.c",
"sim/ppc/emul_bugapi.c", "sim/ppc/device.c", "sim/ppc/emul_generic.h",
"sim/ppc/tree.c", "sim/ppc/mon.h", "sim/ppc/interrupts.h",
"sim/ppc/cap.c", "sim/ppc/cpu.c", "sim/ppc/hw_phb.h",
"sim/ppc/device_table.c", "sim/ppc/lf.c", "sim/ppc/lf.c",
"sim/ppc/dc-stupid", "sim/ppc/hw_pal.c", "sim/ppc/ppc-spr-table",
"sim/ppc/emul_unix.h", "sim/ppc/words.h", "sim/ppc/basics.h",
"sim/ppc/hw_htab.c", "sim/ppc/lf.h", "sim/ppc/ld-decode.c",
"sim/ppc/sim-endian.c", "sim/ppc/gen-itable.c",
"sim/ppc/idecode_expression.h", "sim/ppc/table.h", "sim/ppc/dgen.c",
"sim/ppc/events.c", "sim/ppc/gen-idecode.h", "sim/ppc/emul_netbsd.c",
"sim/ppc/igen.c", "sim/ppc/vm_n.h", "sim/ppc/vm.h",
"sim/ppc/hw_iobus.c", "sim/ppc/inline.h",
"sim/testsuite/sim/bfin/s21.s", "sim/testsuite/sim/mips/mips32-dsp2.s",
)
if __name__ == "__main__":
main()
|
gpl-2.0
| -3,126,046,760,536,517,600 | 7,929,296,357,762,321,000 | 40.362007 | 80 | 0.630503 | false |
lmiccini/sos
|
sos/plugins/lvm2.py
|
5
|
3024
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Lvm2(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""LVM2 volume manager
"""
plugin_name = 'lvm2'
profiles = ('storage',)
option_list = [("lvmdump", 'collect an lvmdump tarball', 'fast', False),
("lvmdump-am", 'attempt to collect an lvmdump with '
'advanced options and raw metadata collection', 'slow',
False)]
def do_lvmdump(self, metadata=False):
"""Collects an lvmdump in standard format with optional metadata
archives for each physical volume present.
"""
lvmdump_cmd = "lvmdump %s -d '%s'"
lvmdump_opts = ""
if metadata:
lvmdump_opts = "-a -m"
cmd = lvmdump_cmd % (lvmdump_opts,
self.get_cmd_output_path(name="lvmdump"))
self.add_cmd_output(cmd)
def setup(self):
# use locking_type 0 (no locks) when running LVM2 commands,
# from lvm.conf:
# Turn locking off by setting to 0 (dangerous: risks metadata
# corruption if LVM2 commands get run concurrently).
# None of the commands issued by sos ever modify metadata and this
# avoids the possibility of hanging lvm commands when another process
# or node holds a conflicting lock.
lvm_opts = '--config="global{locking_type=0}"'
self.add_cmd_output(
"vgdisplay -vv %s" % lvm_opts,
root_symlink="vgdisplay"
)
pvs_cols = 'pv_mda_free,pv_mda_size,pv_mda_count,pv_mda_used_count'
pvs_cols = pvs_cols + ',' + 'pe_start'
vgs_cols = 'vg_mda_count,vg_mda_free,vg_mda_size,vg_mda_used_count'
vgs_cols = vgs_cols + ',' + 'vg_tags'
lvs_cols = 'lv_tags,devices'
self.add_cmd_output([
"vgscan -vvv %s" % lvm_opts,
"pvscan -v %s" % lvm_opts,
"pvs -a -v -o +%s %s" % (pvs_cols, lvm_opts),
"vgs -v -o +%s %s" % (vgs_cols, lvm_opts),
"lvs -a -o +%s %s" % (lvs_cols, lvm_opts)
])
self.add_copy_spec("/etc/lvm")
if self.get_option('lvmdump'):
self.do_lvmdump()
elif self.get_option('lvmdump-am'):
self.do_lvmdump(metadata=True)
# vim: et ts=4 sw=4
|
gpl-2.0
| 9,021,493,368,768,127,000 | 1,110,703,567,965,509,500 | 38.272727 | 77 | 0.607474 | false |
vthorsteinsson/tensor2tensor
|
tensor2tensor/models/research/transformer_aux.py
|
1
|
5657
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer with auxiliary losses from https://arxiv.org/abs/1803.00144."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_layers
from tensor2tensor.models import transformer
from tensor2tensor.utils import registry
import tensorflow as tf
def shift_and_pad(tensor, shift, axis=0):
"""Shifts and pads with zero along an axis.
Example:
shift_and_pad([1, 2, 3, 4], 2) --> [0, 0, 1, 2]
shift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0]
Args:
tensor: Tensor; to be shifted and padded.
shift: int; number of positions to shift by.
axis: int; along which axis to shift and pad.
Returns:
A Tensor with the same shape as the input tensor.
"""
shape = tensor.shape
rank = len(shape)
assert 0 <= abs(axis) < rank
length = int(shape[axis])
assert 0 <= abs(shift) < length
paddings = [(0, 0)] * rank
begin = [0] * rank
size = [-1] * rank
if shift > 0:
paddings[axis] = (shift, 0)
size[axis] = length - shift
elif shift < 0:
paddings[axis] = (0, -shift)
begin[axis] = -shift
ret = tf.pad(tf.slice(tensor, begin, size), paddings)
return ret
@registry.register_model
class TransformerAux(transformer.Transformer):
"""Attention net. See file docstring."""
def _extract_shift_values(self):
"""Parses the shift string.
The hparams should contain the key shift_values, which maps to a
comma-separated string of integers. These integers specify the number of
timesteps to predict/reconstruct to compute auxiliary losses.
For instance, "-4,2,6" means to reconstruct the target 4 steps before and
predict the targets 2 steps and 6 steps ahead.
Returns:
List of int != 0 shift values to compute the auxiliary losses.
"""
shift_values_str = self._hparams.get("shift_values", "")
shift_values = [int(x) for x in shift_values_str.split(",")]
tf.logging.info(
"Computing auxiliary losses for the following shifts: %s",
shift_values)
return shift_values
def auxiliary_loss(self, body_output, features, shift):
"""Auxiliary predict loss.
Args:
body_output: Tensor with shape [batch_size, decoder_length, hidden_dim].
features: Map of features to the model. Must contain the following:
"targets": Target decoder outputs.
[batch_size, decoder_length, 1, hidden_dim]
shift: int != 0, amount to shift/pad the target sequence.
If shift > 0, it represents the number of previous timesteps to
reconstruct; if shift < 0, it represents the number of future timesteps
to predict.
Returns:
A 2-tuple of the numerator and denominator of the cross-entropy loss.
Raises:
ValueError: if features does not contain a targets_raw tensor.
"""
assert isinstance(shift, int) and shift != 0
name = "reconst_%d" % shift if shift > 0 else "predict_%d" % abs(shift)
if features and "targets_raw" in features:
targets = features["targets_raw"]
targets = common_layers.flatten4d3d(targets)
else:
raise ValueError(
"Feature map must contain a targets_raw tensor.")
with tf.variable_scope(name):
logits = self.top(body_output, features)
labels = shift_and_pad(targets, shift, axis=1)
return common_layers.padded_cross_entropy(
logits,
labels,
self._hparams.label_smoothing)
def body(self, features):
"""Transformer main model_fn.
Args:
features: Map of features to the model. Should contain the following:
"inputs": Transformer inputs.
[batch_size, input_length, 1, hidden_dim].
"targets": Target decoder outputs.
[batch_size, target_length, 1, hidden_dim]
"target_space_id": A scalar int from data_generators.problem.SpaceID.
Returns:
A 2-tuple containing:
Logit tensor. [batch_size, decoder_length, vocab_size]
Map of keys to loss tensors. Should contain the following:
"training": Training loss (shift == 0).
"auxiliary": Auxiliary loss (shift != 0).
"""
output = super(TransformerAux, self).body(features)
output, losses = self._normalize_body_output(output)
aux = 0.0
for shift in self._extract_shift_values():
loss_num, loss_den = self.auxiliary_loss(output, features, shift)
aux += loss_num / loss_den
losses["auxiliary"] = aux
return output, losses
@registry.register_hparams
def transformer_aux_base():
"""Set of hyperparameters."""
hparams = transformer.transformer_base()
hparams.shared_embedding_and_softmax_weights = False
hparams.add_hparam("shift_values", "1,2,3,4")
return hparams
@registry.register_hparams
def transformer_aux_tiny():
"""Set of hyperparameters."""
hparams = transformer.transformer_tiny()
hparams.shared_embedding_and_softmax_weights = False
hparams.add_hparam("shift_values", "1,2")
return hparams
|
apache-2.0
| 6,529,369,463,695,924,000 | -2,187,120,470,643,247,000 | 31.511494 | 79 | 0.671027 | false |
mitchelljkotler/django
|
django/contrib/staticfiles/views.py
|
581
|
1329
|
"""
Views and functions for serving static files. These are only to be used during
development, and SHOULD NOT be used in a production setting.
"""
import os
import posixpath
from django.conf import settings
from django.contrib.staticfiles import finders
from django.http import Http404
from django.utils.six.moves.urllib.parse import unquote
from django.views import static
def serve(request, path, insecure=False, **kwargs):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the staticfiles finders.
To use, put a URL pattern such as::
from django.contrib.staticfiles import views
url(r'^(?P<path>.*)$', views.serve)
in your URLconf.
It uses the django.views.static.serve() view to serve the found files.
"""
if not settings.DEBUG and not insecure:
raise Http404
normalized_path = posixpath.normpath(unquote(path)).lstrip('/')
absolute_path = finders.find(normalized_path)
if not absolute_path:
if path.endswith('/') or path == '':
raise Http404("Directory indexes are not allowed here.")
raise Http404("'%s' could not be found" % path)
document_root, path = os.path.split(absolute_path)
return static.serve(request, path, document_root=document_root, **kwargs)
|
bsd-3-clause
| -8,605,514,895,929,157,000 | 6,273,753,636,298,279,000 | 32.225 | 78 | 0.705794 | false |
bluemini/kuma
|
vendor/packages/pygments/lexers/__init__.py
|
73
|
8735
|
# -*- coding: utf-8 -*-
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, itervalues, guess_decode
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer'] + list(LEXERS)
_lexer_cache = {}
_pattern_cache = {}
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_lexers(module_name):
"""Load a lexer (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in itervalues(LEXERS):
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def get_lexer_by_name(_alias, **options):
"""Get a lexer by an alias.
Raises ClassNotFound if not found.
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in itervalues(LEXERS):
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def find_lexer_class_for_filename(_fn, code=None):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in itervalues(LEXERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if _fn_matches(fn, filename):
matches.append((cls, filename))
if sys.version_info > (3,) and isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus
return cls.priority + bonus
if matches:
matches.sort(key=get_rating)
# print "Possible lexers, after sort:", matches
return matches[-1][0]
def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
"""
res = find_lexer_class_for_filename(_fn, code)
if not res:
raise ClassNotFound('no lexer for filename %r found' % _fn)
return res(**options)
def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
for lexer in find_plugin_lexers():
yield lexer
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
result.sort(key=type_sort)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
mpl-2.0
| -3,719,976,003,029,241,000 | -4,203,897,078,159,076,000 | 30.996337 | 78 | 0.60996 | false |
hrjn/scikit-learn
|
sklearn/cluster/tests/test_hierarchical.py
|
33
|
20167
|
"""
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
assert_raises(ValueError, clustering.fit, X)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
|
bsd-3-clause
| 3,993,354,556,501,120,500 | 3,288,843,881,290,622,500 | 37.782692 | 82 | 0.621312 | false |
jarn0ld/gnuradio
|
gr-vocoder/python/vocoder/qa_g721_vocoder.py
|
57
|
1573
|
#!/usr/bin/env python
#
# Copyright 2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, vocoder, blocks
class test_g721_vocoder (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block()
def tearDown (self):
self.tb = None
def test001_module_load (self):
data = (8,24,36,52,56,64,76,88,104,124,132,148,172,
196,220,244,280,320,372,416,468,524,580,648)
src = blocks.vector_source_s(data)
enc = vocoder.g721_encode_sb()
dec = vocoder.g721_decode_bs()
snk = blocks.vector_sink_s()
self.tb.connect(src, enc, dec, snk)
self.tb.run()
actual_result = snk.data()
self.assertEqual(data, actual_result)
if __name__ == '__main__':
gr_unittest.run(test_g721_vocoder, "test_g721_vocoder.xml")
|
gpl-3.0
| 4,536,384,372,396,425,000 | 5,631,701,988,485,565,000 | 33.195652 | 70 | 0.67705 | false |
GGFHF/NGScloud
|
Package/gdialogs.py
|
1
|
44030
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
'''
This software has been developed by:
GI Genética, Fisiología e Historia Forestal
Dpto. Sistemas y Recursos Naturales
ETSI Montes, Forestal y del Medio Natural
Universidad Politécnica de Madrid
http://gfhforestal.com/
https://github.com/ggfhf/
Licence: GNU General Public Licence Version 3.
'''
#-------------------------------------------------------------------------------
'''
This source contains the dialog classes corresponding to the graphical user interface of
the NGScloud software package.
'''
#-------------------------------------------------------------------------------
import os
import PIL.Image
import PIL.ImageTk
import tkinter
import tkinter.font
import tkinter.ttk
import sys
import datetime
import os
import xlib
import xssh
#-------------------------------------------------------------------------------
class DialogTable(tkinter.Toplevel):
#---------------
def __init__(self, parent, title_text, window_height, window_width, data_list, data_dict, item_dict, action=None, params=[]):
'''
Execute actions correspending to the creation of a "DialogTable" instance.
'''
# save initial parameters in instance variables
self.parent = parent
self.title_text = title_text
self.window_height = window_height
self.window_width = window_width
self.data_list = data_list
self.data_dict = data_dict
self.item_dict = item_dict
self.action = action
self.params = params
# call the parent init method
tkinter.Toplevel.__init__(self)
# create the window of the Dialog Table.
self.create_window()
# build the graphical user interface
self.build_gui()
# populate the table with data
self.populate_table()
#---------------
def create_window(self):
'''
Create the window of "DialogTable".
'''
# define the dimensions
self.minsize(height=self.window_height, width=self.window_width)
self.maxsize(height=self.window_height, width=self.window_width)
x = round((self.winfo_screenwidth() - self.window_width) / 2)
y = round((self.winfo_screenheight() - self.window_height) / 2)
self.geometry('{}x{}+{}+{}'.format(self.window_width, self.window_height, x, y))
# set the title
self.title('{0} - {1} - Table'.format(xlib.get_project_name(), self.title_text))
# set the icon
image_app = PIL.Image.open(xlib.get_project_image_file())
self.photoimage_app = PIL.ImageTk.PhotoImage(image_app)
self.tk.call('wm', 'iconphoto', self._w, self.photoimage_app)
# associate this window with the parent window
self.transient(self.parent)
#---------------
def build_gui(self):
'''
Build the graphical interface user of "DialogTable".
'''
# create "imagetk_close"
image_close = PIL.Image.open('./image_close.png')
imagetk_close = PIL.ImageTk.PhotoImage(image_close)
# create "frame_toolbar" and register it with the pack geometry manager
self.frame_toolbar = tkinter.Frame(self, borderwidth=1, relief='raised')
self.frame_toolbar.pack(side='top', fill='x')
# create "button_close" and register it with the pack geometry manager
self.button_close = tkinter.Button(self.frame_toolbar, command=self.close, relief='flat', image=imagetk_close)
self.button_close.image = imagetk_close
self.button_close.pack(side='left', padx=2, pady=5)
# create "treeview" and register it with the pack geometry manager
self.treeview = tkinter.ttk.Treeview(self)
self.treeview.pack(side='left', fill='both', expand=True)
# set columns in Treeview widget
self.treeview['columns'] = self.data_list
self.treeview['show'] = 'headings'
for datum in self.data_list:
# -- self.treeview.column(datum, width=self.data_dict[datum]['width'])
if self.data_dict[datum]['aligment'] == 'left':
aligment = tkinter.W
elif self.data_dict[datum]['aligment'] == 'centre':
aligment = tkinter.W + tkinter.E
elif self.data_dict[datum]['aligment'] == 'right':
aligment = tkinter.E
self.treeview.column(datum, minwidth=self.data_dict[datum]['width'], width=self.data_dict[datum]['width'], anchor=aligment, stretch=False)
self.treeview.heading(datum, text=self.data_dict[datum]['text'])
# create "scrollbar_x" and register it with the pack geometry manager
self.scrollbar_x = tkinter.Scrollbar(self.treeview, orient='horizontal', command=self.treeview.xview)
self.scrollbar_x.pack(side='bottom', fill='x')
self.treeview.configure(xscrollcommand=self.scrollbar_x.set)
# create "scrollbar_y" and register it with the pack geometry manager
self.scrollbar_y = tkinter.Scrollbar(self.treeview, orient='vertical', command=self.treeview.yview)
self.scrollbar_y.pack(side='right', fill='y')
self.treeview.configure(yscrollcommand=self.scrollbar_y.set)
# link a handler to events
self.treeview.bind("<Double-1>", self.double_click)
# link a handler to interactions between the application and the window manager
self.protocol('WM_DELETE_WINDOW', self.close)
#---------------
def populate_table(self):
'''
Populate the Treeview widget with the data of "DialogTable".
'''
# insert the items in Treeview widget
for item_key in sorted(self.item_dict.keys()):
row_values_list = []
for datum in self.data_list:
row_values_list.append(self.item_dict[item_key][datum])
self.treeview.insert('', 'end', values=row_values_list)
#---------------
def double_click(self, event):
'''
Manege the action of a dobule click on a table item.
'''
# manege the action
try:
# get the table item selected
item = self.treeview.selection()[0]
except:
message = 'There is not any action asociated with this table item.'
OK = tkinter.messagebox.showwarning(self.title(), message)
else:
if self.action == 'view_submission_logs':
run_id = self.treeview.item(item)['values'][0]
self.view_local_process_log(run_id)
elif self.action == 'view_result_logs':
experiment_id = self.treeview.item(item)['values'][0]
run_id = self.treeview.item(item)['values'][1]
self.view_log(experiment_id, run_id)
elif self.action == 'list_directory':
file_type = self.treeview.item(item)['values'][0]
file_name = self.treeview.item(item)['values'][1]
if file_type == 'directory':
self.list_directory(file_name)
else:
self.show_file_details(file_name)
else:
message = 'There is not any action asociated with this table item.'
OK = tkinter.messagebox.showwarning(self.title(), message)
#---------------
def close(self, event=None):
'''
Close the "DialogTable".
'''
# delete all widgets and terminate the mainloop
self.destroy()
#---------------
def view_local_process_log(self, run_id):
'''
View the log of a local process.
'''
# get the log file name and build cluster path
log_file = '{0}/{1}'.format(xlib.get_log_dir(), run_id)
# create and show a instance "DialogViewer" to view the log file
dialog_viewer = DialogViewer(self, log_file, None)
self.wait_window(dialog_viewer)
#---------------
def view_log(self, experiment_id, run_id):
'''
View the log of the run identification.
'''
# get the cluster name
cluster_name = self.params[0]
# get the log file name and build cluster path
log_file = xlib.get_cluster_log_file()
cluster_file_path = '{0}/{1}/{2}'.format(xlib.get_cluster_experiment_result_dir(experiment_id), run_id, log_file)
# create and show a instance "DialogViewer" to view the log file
dialog_viewer = DialogViewer(self, cluster_file_path, cluster_name)
self.wait_window(dialog_viewer)
#---------------
def list_directory(self, directory_name):
'''
View the directory of a dataset.
'''
# get the cluster name
parent_directory = self.params[0]
# get the SSH client
ssh_client = self.params[1]
# get the directory dictionary of directories in the volume
command = 'ls -la {0}/{1}'.format(parent_directory, directory_name)
(OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command)
if OK:
directory_dict = {}
for line in stdout:
line = line.rstrip('\n')
if line.startswith('d') or line.startswith('-'):
file_data_list = line.split()
file_type = 'directory' if file_data_list[0][0] == 'd' else 'file'
permissions = file_data_list[0][1:]
links_number = file_data_list[1]
owner_name = file_data_list[2]
owner_group = file_data_list[3]
file_size = file_data_list[4]
modification_month = file_data_list[5]
modification_day = file_data_list[6]
modification_time = file_data_list[7]
file_name = file_data_list[8]
if file_name not in ['.', '..', 'lost+found']:
key = '{0}-{1}'.format(file_type, file_name)
directory_dict[key] = {'file_type': file_type, 'permissions': permissions, 'links_number': links_number, 'owner_name': owner_name, 'owner_group': owner_group, 'file_size': file_size, 'modification_month': modification_month, 'modification_day': modification_day, 'modification_time': modification_time, 'file_name': file_name}
# verify if there are any nodes running
if OK:
if directory_dict == {}:
message = 'There is not any file.'
tkinter.messagebox.showwarning('{0} - {1}'.format(xlib.get_project_name(), self.head), message)
# build the data list
if OK:
data_list = ['file_type', 'file_name']
# build the data dictionary
if OK:
data_dict = {}
data_dict['file_type']= {'text': 'Type', 'width': 120, 'aligment': 'left'}
data_dict['file_name'] = {'text': 'Name', 'width': 400, 'aligment': 'left'}
# create the dialog Table to show the nodes running
if OK:
dialog_table = DialogTable(self, 'Directory {0}/{1}'.format(parent_directory, directory_name), 400, 600, data_list, data_dict, directory_dict, 'list_directory', ['{0}/{1}'.format(parent_directory, directory_name), ssh_client])
self.wait_window(dialog_table)
#---------------
def show_file_details(self, file_name):
'''
View the directory of a dataset.
'''
# get the cluster name
parent_directory = self.params[0]
# get the SSH client
ssh_client = self.params[1]
# get the directory dictionary of directories in the volume
command = 'ls -la {0}/{1}'.format(parent_directory, file_name)
(OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command)
if OK:
file_detail_dict = {}
for line in stdout:
line = line.rstrip('\n')
file_data_list = line.split()
permissions = file_data_list[0][1:]
links_number = file_data_list[1]
owner_name = file_data_list[2]
owner_group = file_data_list[3]
day = int(file_data_list[6])
try:
month = 1 + ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Sep', 'Oct', 'Nov', 'Dec'].index(file_data_list[5])
except:
month = 0
if file_data_list[7].find(':') > -1:
year = datetime.datetime.now().year
modification_date = '{0:4}-{1:02d}-{2:02d}'.format(year, month, day)
modification_time = file_data_list[7]
else:
year = int(file_data_list[7])
modification_date = '{0:4}-{1:02}-{2:02}'.format(year, month, day)
modification_time = ' '
file_name = file_data_list[8]
file_detail_dict[0] = {'data': 'directory', 'value': os.path.dirname(file_name)}
file_detail_dict[1] = {'data': 'name', 'value': os.path.basename(file_name)}
file_detail_dict[2] = {'data': 'size', 'value': file_data_list[4]}
file_detail_dict[3] = {'data': 'permissions', 'value': file_data_list[0][1:]}
file_detail_dict[4] = {'data': 'modification date', 'value': modification_date}
file_detail_dict[5] = {'data': 'modification time', 'value': modification_time}
file_detail_dict[6] = {'data': 'owner group', 'value': file_data_list[3]}
file_detail_dict[7] = {'data': 'owner name', 'value': file_data_list[2]}
# verify if there are any nodes running
if OK:
if file_detail_dict == {}:
message = 'There is not any detail.'
tkinter.messagebox.showwarning('{0} - {1}'.format(xlib.get_project_name(), self.head), message)
# build the data list
if OK:
data_list = ['data', 'value']
# build the data dictionary
if OK:
data_dict = {}
data_dict['data']= {'text': 'Data', 'width': 120, 'aligment': 'left'}
data_dict['value'] = {'text': 'Value', 'width': 400, 'aligment': 'left'}
# create the dialog Table to show the nodes running
if OK:
dialog_table = DialogTable(self, 'File {0}/{1}'.format(parent_directory, file_name), 400, 600, data_list, data_dict, file_detail_dict)
self.wait_window(dialog_table)
#---------------
#-------------------------------------------------------------------------------
class DialogLog(tkinter.Toplevel):
#---------------
WINDOW_MIN_HEIGHT = 680
WINDOW_MIN_WIDTH = 680
#---------------
def __init__(self, parent, head='', calling_function=None):
'''
Execute actions correspending to the creation a "DialogLog" instance.
'''
# save initial parameters in instance variables
self.parent = parent
self.head = head
self.calling_function = calling_function
self.is_enabled_button_close = False
# call the parent init method
tkinter.Toplevel.__init__(self)
# create the window of the Dialog Log.
self.create_window()
# build the graphical user interface
self.build_gui()
# set cursor to show busy status
self.config(cursor='watch')
self.update()
self.text.config(cursor='watch')
self.text.update()
# get the local log file
self.log_file = xlib.get_log_file(self.calling_function)
# open the local log file
try:
if not os.path.exists(os.path.dirname(self.log_file)):
os.makedirs(os.path.dirname(self.log_file))
self.log_file_id = open(self.log_file, mode='w', encoding='iso-8859-1')
except:
message = '*** ERROR: The file {0} can not be created'.format(self.log_file)
tkinter.messagebox.showerror('{0} - {1}'.format(xlib.get_project_name(), self.head), message)
# delete all widgets and terminate the mainloop
self.destroy()
#---------------
def create_window(self):
'''
Create the window of "DialogLog".
'''
# define the dimensions
self.minsize(height=self.WINDOW_MIN_HEIGHT, width=self.WINDOW_MIN_WIDTH)
self.maxsize(height=self.winfo_screenheight(), width=self.winfo_screenwidth())
x = round((self.winfo_screenwidth() - self.WINDOW_MIN_WIDTH) / 2)
y = round((self.winfo_screenheight() - self.WINDOW_MIN_HEIGHT) / 2)
self.geometry('{}x{}+{}+{}'.format(self.WINDOW_MIN_WIDTH, self.WINDOW_MIN_HEIGHT, x, y))
# set the title
self.title('{0} - {1} - Log'.format(xlib.get_project_name(), self.head))
# set the icon
image_app = PIL.Image.open(xlib.get_project_image_file())
self.photoimage_app = PIL.ImageTk.PhotoImage(image_app)
self.tk.call('wm', 'iconphoto', self._w, self.photoimage_app)
# associate this window with the parent window
self.transient(self.parent)
#---------------
def build_gui(self):
'''
Build the graphical interface user of "DialogLog".
'''
# create "imagetk_close"
image_close = PIL.Image.open('./image_close.png')
imagetk_close = PIL.ImageTk.PhotoImage(image_close)
# create "frame_toolbar" and register it with the grid geometry manager
self.frame_toolbar = tkinter.Frame(self, borderwidth=1, relief='raised')
self.frame_toolbar.pack(side='top', fill='x')
# create "button_close" and register it with the pack geometry manager
self.button_close = tkinter.Button(self.frame_toolbar, command=self.close, relief='flat', image=imagetk_close, state='disabled')
self.button_close.image = imagetk_close
self.button_close.pack(side='left', padx=2, pady=5)
# create "text" and register it with the grid geometry manager
self.text = tkinter.Text(self, wrap='none', state='disabled')
self.text.pack(expand=True, fill='both')
# create "scrollbar_x" and register it with the pack geometry manager
self.scrollbar_x = tkinter.Scrollbar(self.text, orient='horizontal', command=self.text.xview)
self.scrollbar_x.pack(side='bottom', fill='x')
self.text.configure(xscrollcommand=self.scrollbar_x.set)
# create "scrollbar_y" and register it with the pack geometry manager
self.scrollbar_y = tkinter.Scrollbar(self.text, orient='vertical', command=self.text.yview)
self.scrollbar_y.pack(side='right', fill='y')
self.text.configure(yscrollcommand=self.scrollbar_y.set)
# link a handler to interactions between the application and the window manager
self.protocol('WM_DELETE_WINDOW', self.close)
#---------------
def close(self, event=None):
'''
Close "DialogLog".
'''
# close the local log file
self.log_file_id.close()
# delete all widgets and terminate the mainloop
if self.is_enabled_button_close:
self.destroy()
#---------------
def enable_button_close(self):
'''
Enable "button_close".
'''
# set cursor to show normal status
self.config(cursor='')
self.update()
self.text.config(cursor='')
self.text.update()
# set state "normal" to "button_close"
self.button_close['state'] = 'normal'
self.is_enabled_button_close = True
#---------------
def write(self, message=''):
'''
Add a message in the widget "text" and in the log file.
'''
# write the message in widget "text"
self.text.configure(state='normal')
self.text.insert('end', message)
self.text.see('end')
self.text.update()
self.text.configure(state='disabled')
# write in the log file
self.log_file_id.write(message)
self.log_file_id.flush()
os.fsync(self.log_file_id.fileno())
#---------------
def get_log_file(self):
'''
Get the current log file name
'''
return self.log_file
#---------------
#-------------------------------------------------------------------------------
class DialogViewer(tkinter.Toplevel):
#---------------
WINDOW_MIN_HEIGHT = 650
WINDOW_MIN_WIDTH = 800
#---------------
def __init__(self, parent, file_path, cluster_name=None):
'''
Execute actions correspending to the creation of a "DialogViewer" instance.
'''
# save initial parameters in instance variables
self.parent = parent
self.file_path = file_path
self.cluster_name = cluster_name
# call the parent init method
tkinter.Toplevel.__init__(self)
# create the window of the Dialog Viewer.
self.create_window()
# build the graphical user interface
self.build_gui()
self.open_file()
#---------------
def create_window(self):
'''
Create the window of "DialogViewer".
'''
# define the dimensions
self.minsize(height=self.WINDOW_MIN_HEIGHT, width=self.WINDOW_MIN_WIDTH)
self.maxsize(height=self.winfo_screenheight(), width=self.winfo_screenwidth())
x = round((self.winfo_screenwidth() - self.WINDOW_MIN_WIDTH) / 2)
y = round((self.winfo_screenheight() - self.WINDOW_MIN_HEIGHT) / 2)
self.geometry('{}x{}+{}+{}'.format(self.WINDOW_MIN_WIDTH, self.WINDOW_MIN_HEIGHT, x, y))
# set the title
self.title('{0} - View - {1}'.format(xlib.get_project_name(), self.file_path))
# set the icon
image_app = PIL.Image.open(xlib.get_project_image_file())
self.photoimage_app = PIL.ImageTk.PhotoImage(image_app)
self.tk.call('wm', 'iconphoto', self._w, self.photoimage_app)
# associate this window with the parent window
self.transient(self.parent)
#---------------
def build_gui(self):
'''
Build the graphical interface user of "DialogViewer".
'''
# create "imagetk_close"
image_close = PIL.Image.open('./image_close.png')
imagetk_close = PIL.ImageTk.PhotoImage(image_close)
# create "imagetk_refresh"
image_refresh = PIL.Image.open('./image_refresh.png')
imagetk_refresh = PIL.ImageTk.PhotoImage(image_refresh)
# create "frame_toolbar" and register it with the grid geometry manager
self.frame_toolbar = tkinter.Frame(self, borderwidth=1, relief='raised')
self.frame_toolbar.pack(side='top', fill='x')
# create "button_close" and register it with the pack geometry manager
self.button_close = tkinter.Button(self.frame_toolbar, command=self.close, relief='flat', image=imagetk_close)
self.button_close.image = imagetk_close
self.button_close.pack(side='left', padx=2, pady=5)
# create "separator" and register it with the pack geometry manager
self.separator = tkinter.ttk.Separator(self.frame_toolbar, orient='vertical')
self.separator.pack(side='left', fill='y', padx=2, pady=2)
# create "button_refresh" and register it with the pack geometry manager
self.button_refresh = tkinter.Button(self.frame_toolbar, command=self.open_file, relief='flat', image=imagetk_refresh)
self.button_refresh.image = imagetk_refresh
self.button_refresh.pack(side='left', padx=2, pady=5)
# create "text" and register it with the grid geometry manager
self.text = tkinter.Text(self, wrap='none', state='disabled')
self.text.pack(expand='yes', fill='both')
# create "scrollbar_x" and register it with the pack geometry manager
self.scrollbar_x = tkinter.Scrollbar(self.text, orient='horizontal', command=self.text.xview)
self.scrollbar_x.pack(side='bottom', fill='x')
self.text.configure(xscrollcommand=self.scrollbar_x.set)
# create "scrollbar_y" and register it with the pack geometry manager
self.scrollbar_y = tkinter.Scrollbar(self.text, orient='vertical', command=self.text.yview)
self.scrollbar_y.pack(side='right', fill='y')
self.text.configure(yscrollcommand=self.scrollbar_y.set)
# link a handler to events
self.bind('<Alt-F4>', self.close)
# link a handler to interactions between the application and the window manager
self.protocol('WM_DELETE_WINDOW', self.close)
#---------------
def open_file(self):
'''
Open a config file in "DialogViewer".
'''
# set cursor to show busy status
self.config(cursor='watch')
self.update()
self.text.config(cursor='watch')
self.text.update()
# initialize the control variable
OK = True
# when the file is in the local computer
if self.cluster_name == None:
local_file_path = self.file_path
# when the file is in a cluster
else:
# create the SSH transport connection
if OK:
(OK, error_list, ssh_transport) = xssh.create_ssh_transport_connection(self.cluster_name, 'master')
if not OK:
message = ''
for error in error_list:
message = '{0}{1}\n'.format(message, error)
tkinter.messagebox.showerror(self.title(), message)
# create the SFTP client
if OK:
sftp_client = xssh.create_sftp_client(ssh_transport)
# create the local path
if not os.path.exists(xlib.get_temp_dir()):
os.makedirs(xlib.get_temp_dir())
# get the log file name and build local and cluster paths
if OK:
local_file_path = '{0}/{1}'.format(xlib.get_temp_dir(), os.path.basename(self.file_path))
# download the log file from the cluster
if OK:
OK = xssh.get_file(sftp_client, self.file_path, local_file_path)
if not OK:
message = 'The log file {0} could not be downloaded.'.format(self.file_path)
tkinter.messagebox.showerror(self.title(), message)
# close the SSH transport connection
xssh.close_ssh_transport_connection(ssh_transport)
# load the file content in "text"
if OK:
self.text.configure(state='normal')
self.text.delete('1.0', 'end')
try:
with open(local_file_path) as local_file_id:
self.text.insert('1.0', local_file_id.read())
except:
tkinter.messagebox.showerror('{0} - Open'.format(xlib.get_project_name()), 'The file {0} can not be opened.'.format(local_file_path))
else:
self.text.configure(state='disable')
# set cursor to show normal status
self.config(cursor='')
self.update()
self.text.config(cursor='')
self.text.update()
#---------------
def close(self, event=None):
'''
Close "DialogViewer".
'''
# deletes all widgets and terminate the mainloop
self.destroy()
#---------------
#-------------------------------------------------------------------------------
class DialogEditor(tkinter.Toplevel):
#---------------
WINDOW_MIN_HEIGHT = 650
WINDOW_MIN_WIDTH = 800
#---------------
def __init__(self, parent, file_path):
'''
Execute actions correspending to the creation of a "DialogEditor" instance.
'''
# save initial parameters in instance variables
self.parent = parent
self.file_path = file_path
# call the parent init method
tkinter.Toplevel.__init__(self)
# create the window of the Dialog Editor.
self.create_window()
# build the graphical user interface
self.build_gui()
self.open_file()
#---------------
def create_window(self):
'''
Create the window of "DialogEditor".
'''
# define the dimensions
self.minsize(height=self.WINDOW_MIN_HEIGHT, width=self.WINDOW_MIN_WIDTH)
self.maxsize(height=self.winfo_screenheight(), width=self.winfo_screenwidth())
x = round((self.winfo_screenwidth() - self.WINDOW_MIN_WIDTH) / 2)
y = round((self.winfo_screenheight() - self.WINDOW_MIN_HEIGHT) / 2)
self.geometry('{}x{}+{}+{}'.format(self.WINDOW_MIN_WIDTH, self.WINDOW_MIN_HEIGHT, x, y))
# set the title
self.title('{0} - Edit - {1}'.format(xlib.get_project_name(), self.file_path))
# set the icon
image_app = PIL.Image.open(xlib.get_project_image_file())
self.photoimage_app = PIL.ImageTk.PhotoImage(image_app)
self.tk.call('wm', 'iconphoto', self._w, self.photoimage_app)
# associate this window with the parent window
self.transient(self.parent)
#---------------
def build_gui(self):
'''
Build the graphical interface user of "DialogEditor".
'''
# create "imagetk_close"
image_close = PIL.Image.open('./image_close.png')
imagetk_close = PIL.ImageTk.PhotoImage(image_close)
# create "imagetk_save"
image_save = PIL.Image.open('./image_save.png')
imagetk_save = PIL.ImageTk.PhotoImage(image_save)
# create "imagetk_undo"
image_undo = PIL.Image.open('./image_undo.gif')
imagetk_undo = PIL.ImageTk.PhotoImage(image_undo)
# create "imagetk_redo"
image_redo = PIL.Image.open('./image_redo.gif')
imagetk_redo = PIL.ImageTk.PhotoImage(image_redo)
# create "imagetk_cut"
image_cut = PIL.Image.open('./image_cut.gif')
imagetk_cut = PIL.ImageTk.PhotoImage(image_cut)
# create "imagetk_copy"
image_copy = PIL.Image.open('./image_copy.gif')
imagetk_copy = PIL.ImageTk.PhotoImage(image_copy)
# create "imagetk_paste"
image_paste = PIL.Image.open('./image_paste.gif')
imagetk_paste = PIL.ImageTk.PhotoImage(image_paste)
# create "frame_toolbar" and register it with the grid geometry manager
self.frame_toolbar = tkinter.Frame(self, borderwidth=1, relief='raised')
self.frame_toolbar.pack(side='top', fill='x')
# create "button_close" and register it with the pack geometry manager
self.button_close = tkinter.Button(self.frame_toolbar, command=self.close, relief='flat', image=imagetk_close)
self.button_close.image = imagetk_close
self.button_close.pack(side='left', padx=2, pady=5)
# create "separator_1" and register it with the pack geometry manager
self.separator_1 = tkinter.ttk.Separator(self.frame_toolbar, orient='vertical')
self.separator_1.pack(side='left', fill='y', padx=2, pady=2)
# create "button_save" and register it with the pack geometry manager
self.button_save = tkinter.Button(self.frame_toolbar, command=self.save, relief='flat', image=imagetk_save)
self.button_save.image = imagetk_save
self.button_save.pack(side='left', padx=2, pady=5)
# create "separator_2" and register it with the pack geometry manager
self.separator_2 = tkinter.ttk.Separator(self.frame_toolbar, orient='vertical')
self.separator_2.pack(side='left', fill='y', padx=2, pady=2)
# create "button_undo" and register it with the pack geometry manager
self.button_undo = tkinter.Button(self.frame_toolbar, command=self.undo, relief='flat', image=imagetk_undo)
self.button_undo.image = imagetk_undo
self.button_undo.pack(side='left', padx=2, pady=5)
# create "button_redo" and register it with the pack geometry manager
self.button_redo = tkinter.Button(self.frame_toolbar, command=self.redo, relief='flat', image=imagetk_redo)
self.button_redo.image = imagetk_redo
self.button_redo.pack(side='left', padx=2, pady=5)
# create "separator_3" and register it with the pack geometry manager
self.separator_3 = tkinter.ttk.Separator(self.frame_toolbar, orient='vertical')
self.separator_3.pack(side='left', fill='y', padx=2, pady=2)
# create "button_cut" and register it with the pack geometry manager
self.button_cut = tkinter.Button(self.frame_toolbar, command=self.cut, relief='flat', image=imagetk_cut)
self.button_cut.image = imagetk_cut
self.button_cut.pack(side='left', padx=2, pady=5)
# create "button_copy" and register it with the pack geometry manager
self.button_copy = tkinter.Button(self.frame_toolbar, command=self.copy, relief='flat', image=imagetk_copy)
self.button_copy.image = imagetk_copy
self.button_copy.pack(side='left', padx=2, pady=5)
# create "button_paste" and register it with the pack geometry manager
self.button_paste = tkinter.Button(self.frame_toolbar, command=self.paste, relief='flat', image=imagetk_paste)
self.button_paste.image = imagetk_paste
self.button_paste.pack(side='left', padx=2, pady=5)
# create "text" and register it with the grid geometry manager
self.text = tkinter.Text(self, wrap='none', undo=True)
self.text.pack(expand='yes', fill='both')
# create "scrollbar_x" and register it with the pack geometry manager
self.scrollbar_x = tkinter.Scrollbar(self.text, orient='horizontal', command=self.text.xview)
self.scrollbar_x.pack(side='bottom', fill='x')
self.text.configure(xscrollcommand=self.scrollbar_x.set)
# create "scrollbar_y" and register it with the pack geometry manager
self.scrollbar_y = tkinter.Scrollbar(self.text, orient='vertical', command=self.text.yview)
self.scrollbar_y.pack(side='right', fill='y')
self.text.configure(yscrollcommand=self.scrollbar_y.set)
# create "menu_popup" add add its menu items
self.menu_popup = tkinter.Menu(self.text)
self.menu_popup.add_command(label='Undo', command=self.undo, underline=0)
self.menu_popup.add_command(label='Redo', command=self.redo, underline=0)
self.menu_popup.add_separator()
self.menu_popup.add_command(label='Cut', command=self.cut, underline=0)
self.menu_popup.add_command(label='Copy', command=self.copy, underline=1)
self.menu_popup.add_command(label='Paste', command=self.paste, underline=0)
# link a handler to events
self.bind('<Alt-F4>', self.close)
# -- self.bind('<Control-c>', self.copy)
# -- self.bind('<Control-C>', self.copy)
self.bind('<Control-s>', self.save)
self.bind('<Control-S>', self.save)
# -- self.bind('<Control-v>', self.paste)
# -- self.bind('<Control-V>', self.paste)
# -- self.bind('<Control-x>', self.copy)
# -- self.bind('<Control-X>', self.copy)
self.bind('<Control-y>', self.redo)
self.bind('<Control-Y>', self.redo)
self.bind('<Control-z>', self.undo)
self.bind('<Control-Z>', self.undo)
self.text.bind('<Button-3>', self.show_menu_popup)
# link a handler to interactions between the application and the window manager
self.protocol('WM_DELETE_WINDOW', self.close)
#---------------
def open_file(self):
'''
Open a config file in "DialogEditor".
'''
self.text.delete('1.0', 'end')
try:
with open(self.file_path) as id_config_file:
self.text.insert('1.0', id_config_file.read())
except:
tkinter.messagebox.showerror('{0} - Open'.format(xlib.get_project_name()), 'The file {0} can not be opened.'.format(self.file_path))
else:
self.text.edit_modified(False)
#---------------
def close(self, event=None):
'''
Close "DialogEditor".
'''
if self.text.edit_modified():
if tkinter.messagebox.askyesno('{0} - Close'.format(xlib.get_project_name()), 'The file {0} has been modified. Do you save it?'.format(self.file_path)):
self.save()
# delete all widgets and terminate the mainloop
self.destroy()
#---------------
def save(self, event=None):
'''
Save the file opened in "DialogEditor".
'''
try:
document = self.text.get('1.0', 'end')
with open(self.file_path, 'w') as id_config_file:
id_config_file.write(document)
except IOError:
tkinter.messagebox.showwarning('{0} - Save'.formar(xlib.get_project_name()), 'The file {0} can not be saved.'.format(config_file))
else:
self.text.edit_modified(False)
#---------------
def undo(self, event=None):
'''
Undo the last change.
'''
self.text.event_generate('<<Undo>>')
return 'break'
#---------------
def redo(self, event=None):
'''
Redo the last change.
'''
self.text.event_generate("<<Redo>>")
return 'break'
#---------------
def cut(self, event=None):
'''
Cut the selected text and put in the clipboard.
'''
self.text.event_generate('<<Cut>>')
return 'break'
#---------------
def copy(self, event=None):
'''
Copy the selected text in the clipboard.
'''
self.text.event_generate('<<Copy>>')
return 'break'
#---------------
def paste(self, event=None):
'''
Paste the text from the clipboard.
'''
self.text.event_generate('<<Paste>>')
return 'break'
#---------------
def show_menu_popup(self, event=None):
'''
Show the popup menu.
'''
self.menu_popup.tk_popup(event.x_root, event.y_root)
#---------------
#-------------------------------------------------------------------------------
class DialogAbout(tkinter.Toplevel):
#---------------
WINDOW_HEIGHT = 300
WINDOW_WIDTH = 525
#---------------
def __init__(self, parent):
'''
Execute actions correspending to the creation of a "DialogAbout" instance.
'''
# save initial parameters in instance variables
self.parent = parent
# call the parent init method
tkinter.Toplevel.__init__(self)
# create the window of the Dialog About.
self.create_window()
# build the graphical user interface
self.build_gui()
#---------------
def create_window(self):
'''
Create the window of "DialogAbout".
'''
# define the dimensions
self.minsize(height=self.WINDOW_HEIGHT, width=self.WINDOW_WIDTH)
self.maxsize(height=self.WINDOW_HEIGHT, width=self.WINDOW_WIDTH)
x = round((self.winfo_screenwidth() - self.WINDOW_WIDTH) / 2)
y = round((self.winfo_screenheight() - self.WINDOW_HEIGHT) / 2)
self.geometry('{}x{}+{}+{}'.format(self.WINDOW_WIDTH, self.WINDOW_HEIGHT, x, y))
# set the title
self.title('{0} - About'.format(xlib.get_project_name()))
# set the icon
image_app = PIL.Image.open(xlib.get_project_image_file())
self.photoimage_app = PIL.ImageTk.PhotoImage(image_app)
self.tk.call('wm', 'iconphoto', self._w, self.photoimage_app)
# associate this window with the parent window
self.transient(self.parent)
#---------------
def build_gui(self):
'''
Build the graphical interface user of "DialogAbout".
'''
# create "label_proyect" and register it with the grid geometry manager
self.label_proyect = tkinter.Label(self, text='{0} v{1}'.format(xlib.get_project_name(), xlib.get_project_version()), font=tkinter.font.Font(size=10, weight='bold'))
self.label_proyect.grid(row=0, column=1, padx=(5,5), pady=(20,5), sticky='w')
# create "canvas_photoimage_app" and register it with the grid geometry manager
self.canvas_photoimage_app = tkinter.Canvas(self)
self.canvas_photoimage_app.create_image(128/2, 128/2, image=self.parent.photoimage_app)
self.canvas_photoimage_app.config(width=128, height=128)
self.canvas_photoimage_app.grid(row=1, column=0, rowspan=6, padx=(5,5), pady=(40,5), sticky='nsew')
# create "label_group" and register it with the grid geometry manager
self.label_group = tkinter.Label(self, text='GI Genética, Fisiología e Historia Forestal')
self.label_group.grid(row=1, column=1, padx=(5,5), pady=(20,5), sticky='w')
# create "label_department" and register it with the grid geometry manager
self.label_department = tkinter.Label(self, text='Dpto. Sistemas y Recursos Naturales')
self.label_department.grid(row=2, column=1, padx=(5,5), pady=(5,5), sticky='w')
# create "label_school" and register it with the grid geometry manager
self.label_school = tkinter.Label(self, text='ETSI Montes, Forestal y del Medio Natural')
self.label_school.grid(row=3, column=1, padx=(5,5), pady=(5,5), sticky='w')
# create "label_university" and register it with the grid geometry manager
self.label_university = tkinter.Label(self, text='Universidad Politécnica de Madrid')
self.label_university.grid(row=4, column=1, padx=(5,5), pady=(5,5), sticky='w')
# create "label_www1" and register it with the grid geometry manager
self.label_www1 = tkinter.Label(self, text='http://gfhforestal.com/')
self.label_www1.grid(row=5, column=1, padx=(5,5), pady=(5,5), sticky='w')
# create "label_www2" and register it with the grid geometry manager
self.label_www2 = tkinter.Label(self, text='https://github.com/ggfhf/')
self.label_www2.grid(row=6, column=1, padx=(5,5), pady=(5,5), sticky='w')
# create "label_fit" and register it with the grid geometry manager
self.label_fit = tkinter.Label(self, text=' '*5)
self.label_fit.grid(row=7, column=2, padx=(0,0), pady=(20,5), sticky='e')
# create "label_separator" and register it with the grid geometry manager
self.button_close = tkinter.ttk.Button(self, text='Close', underline=0, command=self.close)
self.button_close.grid(row=7, column=3, padx=(5,5), pady=(20,5), sticky='e')
# link a handler to events
self.bind('<Alt-c>', (lambda evento: self.button_close.invoke()))
self.bind('<Alt-C>', (lambda evento: self.button_close.invoke()))
self.bind('<KP_Enter>', (lambda evento: self.button_close.invoke()))
self.bind('<Return>', (lambda evento: self.button_close.invoke()))
# link a handler to interactions between the application and the window manager
self.protocol('WM_DELETE_WINDOW', self.close)
# set the focus in "button_close"
self.button_close.focus_set()
#---------------
def close(self):
'''
Close "DialogAbout".
'''
# delete all widgets and terminate the mainloop
self.destroy()
#---------------
#-------------------------------------------------------------------------------
if __name__ == '__main__':
print('This file contains the dialog classes corresponding to the graphical user interface of the NGScloud software package.')
sys.exit(0)
#-------------------------------------------------------------------------------
|
gpl-3.0
| 5,759,465,785,346,039,000 | -5,938,522,171,830,210,000 | 36.62735 | 350 | 0.57714 | false |
xen0l/ansible
|
lib/ansible/modules/remote_management/ucs/ucs_ntp_server.py
|
11
|
4688
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_ntp_server
short_description: Configures NTP server on Cisco UCS Manager
extends_documentation_fragment:
- ucs
description:
- Configures NTP server on Cisco UCS Manager.
- Examples can be used with the L(UCS Platform Emulator,https://communities.cisco.com/ucspe).
options:
state:
description:
- If C(absent), will remove an NTP server.
- If C(present), will add or update an NTP server.
choices: [absent, present]
default: present
ntp_server:
description:
- NTP server IP address or hostname.
- Enter up to 63 characters that form a valid hostname.
- Enter a valid IPV4 Address.
aliases: [ name ]
default: ""
description:
description:
- A user-defined description of the NTP server.
- Enter up to 256 characters.
- "You can use any characters or spaces except the following:"
- "` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)."
aliases: [ descr ]
default: ""
requirements:
- ucsmsdk
author:
- John McDonough (@movinalot)
- CiscoUcs (@CiscoUcs)
version_added: "2.7"
'''
EXAMPLES = r'''
- name: Configure NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: 10.10.10.10
description: Internal NTP Server by IP address
state: present
- name: Configure NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: pool.ntp.org
description: External NTP Server by hostname
state: present
- name: Remove NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: 10.10.10.10
state: absent
- name: Remove NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: pool.ntp.org
state: absent
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def run_module():
argument_spec = ucs_argument_spec
argument_spec.update(
ntp_server=dict(type='str', aliases=['name']),
description=dict(type='str', aliases=['descr'], default=''),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
['state', 'present', ['ntp_server']],
],
)
# UCSModule verifies ucsmsdk is present and exits on failure. Imports are below ucs object creation.
ucs = UCSModule(module)
err = False
from ucsmsdk.mometa.comm.CommNtpProvider import CommNtpProvider
changed = False
try:
mo_exists = False
props_match = False
dn = 'sys/svc-ext/datetime-svc/ntp-' + module.params['ntp_server']
mo = ucs.login_handle.query_dn(dn)
if mo:
mo_exists = True
if module.params['state'] == 'absent':
if mo_exists:
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
changed = True
else:
if mo_exists:
# check top-level mo props
kwargs = dict(descr=module.params['description'])
if mo.check_prop_match(**kwargs):
props_match = True
if not props_match:
if not module.check_mode:
# update/add mo
mo = CommNtpProvider(parent_mo_or_dn='sys/svc-ext/datetime-svc',
name=module.params['ntp_server'],
descr=module.params['description'])
ucs.login_handle.add_mo(mo, modify_present=True)
ucs.login_handle.commit()
changed = True
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
def main():
run_module()
if __name__ == '__main__':
main()
|
gpl-3.0
| 1,552,419,439,679,565,600 | -9,218,391,877,551,910,000 | 26.576471 | 139 | 0.599189 | false |
basepi/hubble
|
hubblestack/files/hubblestack_nova/misc.py
|
2
|
45373
|
# -*- encoding: utf-8 -*-
'''
Hubble Nova plugin for running miscellaneous one-off python functions to
run more complex nova audits without allowing arbitrary command execution
from within the yaml profiles.
Sample YAML data, with inline comments:
# Top level key lets the module know it should look at this data
misc:
# Unique ID for this set of audits
nodev:
data:
# 'osfinger' grain, for multiplatform support
'Red Hat Enterprise Linux Server-6':
# tag is required
tag: CIS-1.1.10
function: misc_function_name
args: # optional
- first_arg
- second_arg
kwargs: # optional
first_kwarg: value
second_kwarg: value
labels:
- critical
- raiseticket
# Catch-all, if no other osfinger match was found
'*':
tag: generic_tag
function: misc_function_name
args: # optional
- first_arg
- second_arg
kwargs: # optional
first_kwarg: value
second_kwarg: value
# Description will be output with the results
description: '/home should be nodev'
'''
from __future__ import absolute_import
import logging
import fnmatch
import os
import re
import salt.utils
from salt.ext import six
from salt.exceptions import CommandExecutionError
from collections import Counter
log = logging.getLogger(__name__)
def __virtual__():
return True
def apply_labels(__data__, labels):
'''
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
'''
ret={}
if labels:
labelled_test_cases=[]
for test_case in __data__.get('misc', []):
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if test_case_body.get('labels') and set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_cases.append(test_case)
ret['misc']=labelled_test_cases
else:
ret=__data__
return ret
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Run the misc audits contained in the data_list
'''
__data__ = {}
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('misc audit __data__:')
log.debug(__data__)
log.debug('misc audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
if 'function' not in tag_data:
continue
function = FUNCTION_MAP.get(tag_data['function'])
if not function:
if 'Errors' not in ret:
ret['Errors'] = []
ret['Errors'].append({tag: 'No function {0} found'
.format(tag_data['function'])})
continue
args = tag_data.get('args', [])
kwargs = tag_data.get('kwargs', {})
# Call the function
try:
result = function(*args, **kwargs)
except Exception as exc:
if 'Errors' not in ret:
ret['Errors'] = []
ret['Errors'].append({tag: 'An error occurred exeuction function {0}: {1}'
.format(tag_data['function'], str(exc))})
continue
if result is True:
ret['Success'].append(tag_data)
elif isinstance(result, six.string_types):
tag_data['failure_reason'] = result
ret['Failure'].append(tag_data)
else:
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the misc level
'''
if 'misc' not in ret:
ret['misc'] = []
if 'misc' in data:
for key, val in data['misc'].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret['misc'].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfinger')
for audit_dict in data.get('misc', []):
# misc:0
for audit_id, audit_data in audit_dict.iteritems():
# misc:0:nodev
tags_dict = audit_data.get('data', {})
# misc:0:nodev:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', {})
# misc:0:nodev:data:Debian-8
if 'tag' not in tags:
tags['tag'] = ''
tag = tags['tag']
if tag not in ret:
ret[tag] = []
formatted_data = {'tag': tag,
'module': 'misc'}
formatted_data.update(audit_data)
formatted_data.update(tags)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
############################
# Begin function definitions
############################
def _execute_shell_command(cmd, python_shell=False):
'''
This function will execute passed command in /bin/shell
'''
return __salt__['cmd.run'](cmd, python_shell=python_shell, shell='/bin/bash', ignore_retcode=True)
def _is_valid_home_directory(directory_path, check_slash_home=False):
directory_path = None if directory_path is None else directory_path.strip()
if directory_path is not None and directory_path != "" and os.path.isdir(directory_path):
if check_slash_home and directory_path == "/":
return False
else:
return True
return False
def _is_permission_in_limit(max_permission, given_permission):
'''
Return true only if given_permission is not more lenient that max_permission. In other words, if
r or w or x is present in given_permission but absent in max_permission, it should return False
Takes input two integer values from 0 to 7.
'''
max_permission = int(max_permission)
given_permission = int(given_permission)
allowed_r = False
allowed_w = False
allowed_x = False
given_r = False
given_w = False
given_x = False
if max_permission >= 4:
allowed_r = True
max_permission = max_permission - 4
if max_permission >= 2:
allowed_w = True
max_permission = max_permission - 2
if max_permission >= 1:
allowed_x = True
if given_permission >= 4:
given_r = True
given_permission = given_permission - 4
if given_permission >= 2:
given_w = True
given_permission = given_permission - 2
if given_permission >= 1:
given_x = True
if given_r and (not allowed_r):
return False
if given_w and (not allowed_w):
return False
if given_x and (not allowed_x):
return False
return True
def check_all_ports_firewall_rules(reason=''):
'''
Ensure firewall rule for all open ports
'''
start_open_ports = (_execute_shell_command('netstat -ln | grep "Active Internet connections (only servers)" -n | cut -d ":" -f1', python_shell=True)).strip()
end_open_ports = (_execute_shell_command('netstat -ln | grep "Active UNIX domain sockets (only servers)" -n | cut -d ":" -f1', python_shell=True)).strip()
open_ports = (_execute_shell_command('netstat -ln | awk \'FNR > ' + start_open_ports + ' && FNR < ' + end_open_ports + ' && $6 == "LISTEN" && $4 !~ /127.0.0.1/ {print $4}\' | sed -e "s/.*://"', python_shell=True)).strip()
open_ports = open_ports.split('\n') if open_ports != "" else []
firewall_ports = (_execute_shell_command('iptables -L INPUT -v -n | awk \'FNR > 2 && $11 != "" && $11 ~ /^dpt:/ {print $11}\' | sed -e "s/.*://"', python_shell=True)).strip()
firewall_ports = firewall_ports.split('\n') if firewall_ports != "" else []
no_firewall_ports = []
for open_port in open_ports:
if open_port not in firewall_ports:
no_firewall_ports.append(open_port)
return True if len(no_firewall_ports) == 0 else str(no_firewall_ports)
def check_password_fields_not_empty(reason=''):
'''
Ensure password fields are not empty
'''
result = _execute_shell_command('cat /etc/shadow | awk -F: \'($2 == "" ) { print $1 " does not have a password "}\'', python_shell=True)
return True if result == '' else result
def ungrouped_files_or_dir(reason=''):
'''
Ensure no ungrouped files or directories exist
'''
raise CommandExecutionError('Module disabled due to performance concerns')
result = _execute_shell_command('df --local -P | awk {\'if (NR!=1) print $6\'} | xargs -I \'{}\' find \'{}\' -xdev -nogroup', python_shell=True)
return True if result == '' else result
def unowned_files_or_dir(reason=''):
'''
Ensure no unowned files or directories exist
'''
raise CommandExecutionError('Module disabled due to performance concerns')
result = _execute_shell_command('df --local -P | awk {\'if (NR!=1) print $6\'} | xargs -I \'{}\' find \'{}\' -xdev -nouser', python_shell=True)
return True if result == '' else result
def world_writable_file(reason=''):
'''
Ensure no world writable files exist
'''
raise CommandExecutionError('Module disabled due to performance concerns')
result = _execute_shell_command('df --local -P | awk {\'if (NR!=1) print $6\'} | xargs -I \'{}\' find \'{}\' -xdev -type f -perm -0002', python_shell=True)
return True if result == '' else result
def system_account_non_login(non_login_shell='/sbin/nologin', max_system_uid='500', except_for_users=''):
'''
Ensure system accounts are non-login
'''
users_list = ['root','halt','sync','shutdown']
for user in except_for_users.split(","):
if user.strip() != "":
users_list.append(user.strip())
result = []
cmd = __salt__["cmd.run_all"]('egrep -v "^\+" /etc/passwd ')
for line in cmd['stdout'].split('\n'):
tokens = line.split(':')
if tokens[0] not in users_list and int(tokens[2]) < int(max_system_uid) and tokens[6] not in ( non_login_shell , "/bin/false" ):
result.append(line)
return True if result == [] else str(result)
def sticky_bit_on_world_writable_dirs(reason=''):
'''
Ensure sticky bit is set on all world-writable directories
'''
raise CommandExecutionError('Module disabled due to performance concerns')
result = _execute_shell_command('df --local -P | awk {\'if (NR!=1) print $6\'} | xargs -I \'{}\' find \'{}\' -xdev -type d \( -perm -0002 -a ! -perm -1000 \) 2>/dev/null', python_shell=True)
return True if result == '' else "There are failures"
def default_group_for_root(reason=''):
'''
Ensure default group for the root account is GID 0
'''
result = _execute_shell_command('grep "^root:" /etc/passwd | cut -f4 -d:', python_shell=True)
result = result.strip()
return True if result == '0' else False
def root_is_only_uid_0_account(reason=''):
'''
Ensure root is the only UID 0 account
'''
result = _execute_shell_command('cat /etc/passwd | awk -F: \'($3 == 0) { print $1 }\'', python_shell=True)
return True if result.strip() == 'root' else result
def test_mount_attrs(mount_name, attribute, check_type='hard'):
'''
Ensure that a given directory is mounted with appropriate attributes
If check_type is soft, then in absence of volume, True will be returned
If check_type is hard, then in absence of volume, False will be returned
'''
# check that the path exists on system
command = 'test -e ' + mount_name
results = __salt__['cmd.run_all'](command, ignore_retcode=True)
retcode = results['retcode']
if str(retcode) == '1':
return True if check_type == "soft" else (mount_name + " folder does not exist")
# if the path exits, proceed with following code
output = __salt__['cmd.run']('cat /proc/mounts')
if not re.search(mount_name, output, re.M):
return True if check_type == "soft" else (mount_name + " is not mounted")
else:
for line in output.splitlines():
if mount_name in line and attribute not in line:
return str(line)
return True
def check_time_synchronization(reason=''):
'''
Ensure that some service is running to synchronize the system clock
'''
command = 'systemctl status systemd-timesyncd ntpd | grep "Active: active (running)"'
output = _execute_shell_command(command, python_shell=True)
if output.strip() == '':
return "neither ntpd nor timesyncd is running"
else:
return True
def restrict_permissions(path, permission):
'''
Ensure that the file permissions on path are equal or more strict than the pemissions given in argument
'''
path_details = __salt__['file.stats'](path)
given_permission = path_details.get('mode')
given_permission = given_permission[-3:]
max_permission = str(permission)
if (_is_permission_in_limit(max_permission[0], given_permission[0]) and _is_permission_in_limit(max_permission[1], given_permission[1]) and _is_permission_in_limit(max_permission[2], given_permission[2])):
return True
return given_permission
def check_path_integrity(reason=''):
'''
Ensure that system PATH variable is not malformed.
'''
script = """
if [ "`echo $PATH | grep ::`" != "" ]; then
echo "Empty Directory in PATH (::)"
fi
if [ "`echo $PATH | grep :$`" != "" ]; then
echo "Trailing : in PATH"
fi
p=`echo $PATH | sed -e 's/::/:/' -e 's/:$//' -e 's/:/ /g'`
set -- $p
while [ "$1" != "" ]; do
if [ "$1" = "." ]; then
echo "PATH contains ."
shift
continue
fi
if [ -d $1 ]; then
dirperm=`ls -ldH $1 | cut -f1 -d" "`
if [ `echo $dirperm | cut -c6` != "-" ]; then
echo "Group Write permission set on directory $1"
fi
if [ `echo $dirperm | cut -c9` != "-" ]; then
echo "Other Write permission set on directory $1"
fi
dirown=`ls -ldH $1 | awk '{print $3}'`
if [ "$dirown" != "root" ] ; then
echo $1 is not owned by root
fi
else
echo $1 is not a directory
fi
shift
done
"""
output = _execute_shell_command(script, python_shell=True)
return True if output.strip() == '' else output
def check_duplicate_uids(reason=''):
'''
Return False if any duplicate user id exist in /etc/group file, else return True
'''
uids = _execute_shell_command("cat /etc/passwd | cut -f3 -d\":\"", python_shell=True).strip()
uids = uids.split('\n') if uids != "" else []
duplicate_uids = [k for k, v in Counter(uids).items() if v > 1]
if duplicate_uids is None or duplicate_uids == []:
return True
return str(duplicate_uids)
def check_duplicate_gids(reason=''):
'''
Return False if any duplicate group id exist in /etc/group file, else return True
'''
gids = _execute_shell_command("cat /etc/group | cut -f3 -d\":\"", python_shell=True).strip()
gids = gids.split('\n') if gids != "" else []
duplicate_gids = [k for k, v in Counter(gids).items() if v > 1]
if duplicate_gids is None or duplicate_gids == []:
return True
return str(duplicate_gids)
def check_duplicate_unames(reason=''):
'''
Return False if any duplicate user names exist in /etc/group file, else return True
'''
unames = _execute_shell_command("cat /etc/passwd | cut -f1 -d\":\"", python_shell=True).strip()
unames = unames.split('\n') if unames != "" else []
duplicate_unames = [k for k, v in Counter(unames).items() if v > 1]
if duplicate_unames is None or duplicate_unames == []:
return True
return str(duplicate_unames)
def check_duplicate_gnames(reason=''):
'''
Return False if any duplicate group names exist in /etc/group file, else return True
'''
gnames = _execute_shell_command("cat /etc/group | cut -f1 -d\":\"", python_shell=True).strip()
gnames = gnames.split('\n') if gnames != "" else []
duplicate_gnames = [k for k, v in Counter(gnames).items() if v > 1]
if duplicate_gnames is None or duplicate_gnames == []:
return True
return str(duplicate_gnames)
def check_directory_files_permission(path, permission):
'''
Check all files permission inside a directory
'''
blacklisted_characters = '[^a-zA-Z0-9-_/]'
if "-exec" in path or re.findall(blacklisted_characters, path):
raise CommandExecutionError("Profile parameter '{0}' not a safe pattern".format(path))
files_list = _execute_shell_command("find {0} -type f".format(path)).strip()
files_list = files_list.split('\n') if files_list != "" else []
bad_permission_files = []
for file_in_directory in files_list:
per = restrict_permissions(file_in_directory, permission)
if per is not True:
bad_permission_files += [file_in_directory + ": Bad Permission - " + per + ":"]
return True if bad_permission_files == [] else str(bad_permission_files)
def check_core_dumps(reason=''):
'''
Ensure core dumps are restricted
'''
hard_core_dump_value = _execute_shell_command("grep -R -E \"hard +core\" /etc/security/limits.conf /etc/security/limits.d/ | awk '{print $4}'", python_shell=True).strip()
hard_core_dump_value = hard_core_dump_value.split('\n') if hard_core_dump_value != "" else []
if '0' in hard_core_dump_value:
return True
if hard_core_dump_value is None or hard_core_dump_value == [] or hard_core_dump_value == "":
return "'hard core' not found in any file"
return str(hard_core_dump_value)
def check_service_status(service_name, state):
'''
Ensure that the given service is in the required state. Return False if it is not in desired state
Return True otherwise
state can be enabled or disabled.
'''
all_services = __salt__['cmd.run']('systemctl list-unit-files')
if re.search(service_name, all_services, re.M):
output = __salt__['cmd.retcode']('systemctl is-enabled ' + service_name, ignore_retcode=True)
if (state == "disabled" and str(output) == "1") or (state == "enabled" and str(output) == "0"):
return True
else:
return __salt__['cmd.run_stdout']('systemctl is-enabled ' + service_name, ignore_retcode=True)
else:
if state == "disabled":
return True
else:
return 'Looks like ' + service_name + ' does not exists. Please check.'
def check_ssh_timeout_config(reason=''):
'''
Ensure SSH Idle Timeout Interval is configured
'''
client_alive_interval = _execute_shell_command("grep \"^ClientAliveInterval\" /etc/ssh/sshd_config | awk '{print $NF}'", python_shell=True).strip()
if client_alive_interval != '' and int(client_alive_interval) <= 300:
client_alive_count_max = _execute_shell_command("grep \"^ClientAliveCountMax\" /etc/ssh/sshd_config | awk '{print $NF}'", python_shell=True).strip()
if client_alive_count_max != '' and int(client_alive_count_max) <= 3:
return True
else:
return "ClientAliveCountMax value should be less than equal to 3"
else:
return "ClientAliveInterval value should be less than equal to 300"
def check_unowned_files(reason=''):
'''
Ensure no unowned files or directories exist
'''
raise CommandExecutionError('Module disabled due to performance concerns')
unowned_files = _execute_shell_command("df --local -P | awk 'NR!=1 {print $6}' | xargs -I '{}' find '{}' -xdev -nouser 2>/dev/null", python_shell=True).strip()
unowned_files = unowned_files.split('\n') if unowned_files != "" else []
# The command above only searches local filesystems, there may still be compromised items on network
# mounted partitions.
# Following command will check each partition for unowned files
unowned_partition_files = _execute_shell_command("mount | awk '{print $3}' | xargs -I '{}' find '{}' -xdev -nouser 2>/dev/null", python_shell=True).strip()
unowned_partition_files = unowned_partition_files.split('\n') if unowned_partition_files != "" else []
unowned_files = unowned_files + unowned_partition_files
return True if unowned_files == [] else str(list(set(unowned_files)))
def check_ungrouped_files(reason=''):
'''
Ensure no ungrouped files or directories exist
'''
raise CommandExecutionError('Module disabled due to performance concerns')
ungrouped_files = _execute_shell_command("df --local -P | awk 'NR!=1 {print $6}' | xargs -I '{}' find '{}' -xdev -nogroup 2>/dev/null", python_shell=True).strip()
ungrouped_files = ungrouped_files.split('\n') if ungrouped_files != "" else []
# The command above only searches local filesystems, there may still be compromised items on network
# mounted partitions.
# Following command will check each partition for unowned files
ungrouped_partition_files = _execute_shell_command("mount | awk '{print $3}' | xargs -I '{}' find '{}' -xdev -nogroup 2>/dev/null", python_shell=True).strip()
ungrouped_partition_files = ungrouped_partition_files.split('\n') if ungrouped_partition_files != "" else []
ungrouped_files = ungrouped_files + ungrouped_partition_files
return True if ungrouped_files == [] else str(list(set(ungrouped_files)))
def check_all_users_home_directory(max_system_uid):
'''
Ensure all users' home directories exist
'''
max_system_uid = int(max_system_uid)
users_uids_dirs = _execute_shell_command("cat /etc/passwd | awk -F: '{ print $1 \" \" $3 \" \" $6 \" \" $7}'", python_shell=True).strip()
users_uids_dirs = users_uids_dirs.split('\n') if users_uids_dirs else []
error = []
for user_data in users_uids_dirs:
user_uid_dir = user_data.strip().split(" ")
if len(user_uid_dir) < 4:
user_uid_dir = user_uid_dir + [''] * (4 - len(user_uid_dir))
if user_uid_dir[1].isdigit():
if not _is_valid_home_directory(user_uid_dir[2], True) and int(user_uid_dir[1]) >= max_system_uid and user_uid_dir[0] != "nfsnobody" \
and 'nologin' not in user_uid_dir[3] and 'false' not in user_uid_dir[3]:
error += ["Either home directory " + user_uid_dir[2] + " of user " + user_uid_dir[0] + " is invalid or does not exist."]
else:
error += ["User " + user_uid_dir[0] + " has invalid uid " + user_uid_dir[1]]
return True if not error else str(error)
def check_users_home_directory_permissions(max_allowed_permission='750', except_for_users=''):
'''
Ensure users' home directories permissions are 750 or more restrictive
'''
users_list = ['root','halt','sync','shutdown']
for user in except_for_users.split(","):
if user.strip() != "":
users_list.append(user.strip())
users_dirs = []
cmd = __salt__["cmd.run_all"]('egrep -v "^\+" /etc/passwd ')
for line in cmd['stdout'].split('\n'):
tokens = line.split(':')
if tokens[0] not in users_list and 'nologin' not in tokens[6] and 'false' not in tokens[6]:
users_dirs.append(tokens[0] + " " + tokens[5])
error = []
for user_dir in users_dirs:
user_dir = user_dir.split(" ")
if len(user_dir) < 2:
user_dir = user_dir + [''] * (2 - len(user_dir))
if _is_valid_home_directory(user_dir[1]):
result = restrict_permissions(user_dir[1], max_allowed_permission)
if result is not True:
error += ["permission on home directory " + user_dir[1] + " of user " + user_dir[0] + " is wrong: " + result]
return True if error == [] else str(error)
def check_users_own_their_home(max_system_uid):
'''
Ensure users own their home directories
'''
max_system_uid = int(max_system_uid)
users_uids_dirs = _execute_shell_command("cat /etc/passwd | awk -F: '{ print $1 \" \" $3 \" \" $6 \" \" $7}'", python_shell=True).strip()
users_uids_dirs = users_uids_dirs.split('\n') if users_uids_dirs != "" else []
error = []
for user_data in users_uids_dirs:
user_uid_dir = user_data.strip().split(" ")
if len(user_uid_dir) < 4:
user_uid_dir = user_uid_dir + [''] * (4 - len(user_uid_dir))
if user_uid_dir[1].isdigit():
if not _is_valid_home_directory(user_uid_dir[2]):
if int(user_uid_dir[1]) >= max_system_uid and 'nologin' not in user_uid_dir[3] and 'false' not in user_uid_dir[3]:
error += ["Either home directory " + user_uid_dir[2] + " of user " + user_uid_dir[0] + " is invalid or does not exist."]
elif int(user_uid_dir[1]) >= max_system_uid and user_uid_dir[0] != "nfsnobody" and 'nologin' not in user_uid_dir[3] \
and 'false' not in user_uid_dir[3]:
owner = __salt__['cmd.run']("stat -L -c \"%U\" \"" + user_uid_dir[2] + "\"")
if owner != user_uid_dir[0]:
error += ["The home directory " + user_uid_dir[2] + " of user " + user_uid_dir[0] + " is owned by " + owner]
else:
error += ["User " + user_uid_dir[0] + " has invalid uid " + user_uid_dir[1]]
return True if not error else str(error)
def check_users_dot_files(reason=''):
'''
Ensure users' dot files are not group or world writable
'''
users_dirs = _execute_shell_command("cat /etc/passwd | egrep -v '(root|halt|sync|shutdown)' | awk -F: '($7 != \"/sbin/nologin\") {print $1\" \"$6}'", python_shell=True).strip()
users_dirs = users_dirs.split('\n') if users_dirs != "" else []
error = []
for user_dir in users_dirs:
user_dir = user_dir.split()
if len(user_dir) < 2:
user_dir = user_dir + [''] * (2 - len(user_dir))
if _is_valid_home_directory(user_dir[1]):
dot_files = _execute_shell_command("find " + user_dir[1] + " -name \".*\"").strip()
dot_files = dot_files.split('\n') if dot_files != "" else []
for dot_file in dot_files:
if os.path.isfile(dot_file):
path_details = __salt__['file.stats'](dot_file)
given_permission = path_details.get('mode')
file_permission = given_permission[-3:]
if file_permission[1] in ["2", "3", "6", "7"]:
error += ["Group Write permission set on file " + dot_file + " for user " + user_dir[0]]
if file_permission[2] in ["2", "3", "6", "7"]:
error += ["Other Write permission set on file " + dot_file + " for user " + user_dir[0]]
return True if error == [] else str(error)
def check_users_forward_files(reason=''):
'''
Ensure no users have .forward files
'''
users_dirs = _execute_shell_command("cat /etc/passwd | awk -F: '{ print $1\" \"$6 }'", python_shell=True).strip()
users_dirs = users_dirs.split('\n') if users_dirs != "" else []
error = []
for user_dir in users_dirs:
user_dir = user_dir.split()
if len(user_dir) < 2:
user_dir = user_dir + [''] * (2 - len(user_dir))
if _is_valid_home_directory(user_dir[1]):
forward_file = _execute_shell_command("find " + user_dir[1] + " -maxdepth 1 -name \".forward\"").strip()
if forward_file is not None and os.path.isfile(forward_file):
error += ["Home directory: " + user_dir[1] + ", for user: " + user_dir[0] + " has " + forward_file + " file"]
return True if error == [] else str(error)
def check_users_netrc_files(reason=''):
'''
Ensure no users have .netrc files
'''
users_dirs = _execute_shell_command("cat /etc/passwd | awk -F: '{ print $1\" \"$6 }'", python_shell=True).strip()
users_dirs = users_dirs.split('\n') if users_dirs != "" else []
error = []
for user_dir in users_dirs:
user_dir = user_dir.split()
if len(user_dir) < 2:
user_dir = user_dir + [''] * (2 - len(user_dir))
if _is_valid_home_directory(user_dir[1]):
netrc_file = _execute_shell_command("find " + user_dir[1] + " -maxdepth 1 -name \".netrc\"").strip()
if netrc_file is not None and os.path.isfile(netrc_file):
error += ["Home directory: " + user_dir[1] + ", for user: " + user_dir[0] + " has .netrc file"]
return True if error == [] else str(error)
def check_groups_validity(reason=''):
'''
Ensure all groups in /etc/passwd exist in /etc/group
'''
group_ids_in_passwd = _execute_shell_command("cut -s -d: -f4 /etc/passwd 2>/dev/null", python_shell=True).strip()
group_ids_in_passwd = group_ids_in_passwd.split('\n') if group_ids_in_passwd != "" else []
group_ids_in_passwd = list(set(group_ids_in_passwd))
invalid_groups = []
for group_id in group_ids_in_passwd:
group_presence_validity = _execute_shell_command("getent group " + group_id + " 2>/dev/null 1>/dev/null; echo $?", python_shell=True).strip()
if str(group_presence_validity) != "0":
invalid_groups += ["Invalid groupid: " + group_id + " in /etc/passwd file"]
return True if invalid_groups == [] else str(invalid_groups)
def ensure_reverse_path_filtering(reason=''):
'''
Ensure Reverse Path Filtering is enabled
'''
error_list = []
command = "sysctl net.ipv4.conf.all.rp_filter 2> /dev/null"
output = _execute_shell_command(command, python_shell=True)
if output.strip() == '':
error_list.append("net.ipv4.conf.all.rp_filter not found")
search_results = re.findall("rp_filter = (\d+)", output)
result = int(search_results[0])
if result < 1:
error_list.append("net.ipv4.conf.all.rp_filter value set to " + str(result))
command = "sysctl net.ipv4.conf.default.rp_filter 2> /dev/null"
output = _execute_shell_command(command, python_shell=True)
if output.strip() == '':
error_list.append("net.ipv4.conf.default.rp_filter not found")
search_results = re.findall("rp_filter = (\d+)", output)
result = int(search_results[0])
if result < 1:
error_list.append("net.ipv4.conf.default.rp_filter value set to " + str(result))
if len(error_list) > 0:
return str(error_list)
else:
return True
def check_users_rhosts_files(reason=''):
'''
Ensure no users have .rhosts files
'''
users_dirs = _execute_shell_command("cat /etc/passwd | egrep -v '(root|halt|sync|shutdown)' | awk -F: '($7 != \"/sbin/nologin\") {print $1\" \"$6}'", python_shell=True).strip()
users_dirs = users_dirs.split('\n') if users_dirs != "" else []
error = []
for user_dir in users_dirs:
user_dir = user_dir.split()
if len(user_dir) < 2:
user_dir = user_dir + [''] * (2 - len(user_dir))
if _is_valid_home_directory(user_dir[1]):
rhosts_file = _execute_shell_command("find " + user_dir[1] + " -maxdepth 1 -name \".rhosts\"").strip()
if rhosts_file is not None and os.path.isfile(rhosts_file):
error += ["Home directory: " + user_dir[1] + ", for user: " + user_dir[0] + " has .rhosts file"]
return True if error == [] else str(error)
def check_netrc_files_accessibility(reason=''):
'''
Ensure users' .netrc Files are not group or world accessible
'''
script = """
for dir in `cat /etc/passwd | egrep -v '(root|sync|halt|shutdown)' | awk -F: '($7 != "/sbin/nologin") { print $6 }'`; do
for file in $dir/.netrc; do
if [ ! -h "$file" -a -f "$file" ]; then
fileperm=`ls -ld $file | cut -f1 -d" "`
if [ `echo $fileperm | cut -c5` != "-" ]; then
echo "Group Read set on $file"
fi
if [ `echo $fileperm | cut -c6` != "-" ]; then
echo "Group Write set on $file"
fi
if [ `echo $fileperm | cut -c7` != "-" ]; then
echo "Group Execute set on $file"
fi
if [ `echo $fileperm | cut -c8` != "-" ]; then
echo "Other Read set on $file"
fi
if [ `echo $fileperm | cut -c9` != "-" ]; then
echo "Other Write set on $file"
fi
if [ `echo $fileperm | cut -c10` != "-" ]; then
echo "Other Execute set on $file"
fi
fi
done
done
"""
output = _execute_shell_command(script, python_shell=True)
return True if output.strip() == '' else output
def _grep(path,
pattern,
*args):
'''
Grep for a string in the specified file
.. note::
This function's return value is slated for refinement in future
versions of Salt
path
Path to the file to be searched
.. note::
Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing
is being used then the path should be quoted to keep the shell from
attempting to expand the glob expression.
pattern
Pattern to match. For example: ``test``, or ``a[0-5]``
opts
Additional command-line flags to pass to the grep command. For example:
``-v``, or ``-i -B2``
.. note::
The options should come after a double-dash (as shown in the
examples below) to keep Salt's own argument parser from
interpreting them.
CLI Example:
.. code-block:: bash
salt '*' file.grep /etc/passwd nobody
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2
salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l
'''
path = os.path.expanduser(path)
if args:
options = ' '.join(args)
else:
options = ''
cmd = (
r'''grep {options} {pattern} {path}'''
.format(
options=options,
pattern=pattern,
path=path,
)
)
try:
ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return ret
def check_list_values(file_path, match_pattern, value_pattern, grep_arg, white_list, black_list, value_delimter):
'''
This function will first get the line matching given match_pattern.
After this value pattern will be extracted from the above line.
value pattern will be splitted by value_delimiter to get the list of values.
match_pattern will be regex patter for grep command.
value_pattern will be regex for re module of python to get matched values.
Only one of white_list and blacklist is allowed.
white_list and black_list should have comma(,) seperated values.
Example for CIS-2.2.1.2
ensure_ntp_configured:
data:
CentOS Linux-7:
tag: 2.2.1.2
function: check_list_values
args:
- /etc/ntp.conf
- '^restrict.*default'
- '^restrict.*default(.*)$'
- null
- kod,nomodify,notrap,nopeer,noquery
- null
- ' '
description: Ensure ntp is configured
'''
list_delimter = ","
if black_list is not None and white_list is not None:
return "Both black_list and white_list values are not allowed."
grep_args = [] if grep_arg is None else [grep_arg]
matched_lines = _grep(file_path, match_pattern, *grep_args).get('stdout')
if not matched_lines:
return "No match found for the given pattern: " + str(match_pattern)
matched_lines = matched_lines.split('\n') if matched_lines is not None else []
error = []
for matched_line in matched_lines:
regexp = re.compile(value_pattern)
matched_values = regexp.search(matched_line).group(1)
matched_values = matched_values.strip().split(value_delimter) if matched_values is not None else []
if white_list is not None:
values_not_in_white_list = list(set(matched_values) - set(white_list.strip().split(list_delimter)))
if values_not_in_white_list != []:
error += ["values not in whitelist: " + str(values_not_in_white_list)]
else:
values_in_black_list = list(set(matched_values).intersection(set(black_list.strip().split(list_delimter))))
if values_in_black_list != []:
error += ["values in blacklist: " + str(values_in_black_list)]
return True if error == [] else str(error)
def mail_conf_check(reason=''):
'''
Ensure mail transfer agent is configured for local-only mode
'''
valid_addresses = ["localhost", "127.0.0.1", "::1"]
mail_addresses = _execute_shell_command("grep '^[[:blank:]]*inet_interfaces' /etc/postfix/main.cf | awk -F'=' '{print $2}'", python_shell=True).strip()
mail_addresses = str(mail_addresses)
mail_addresses = mail_addresses.split(',') if mail_addresses != "" else []
mail_addresses = map(str.strip, mail_addresses)
invalid_addresses = list(set(mail_addresses) - set(valid_addresses))
return str(invalid_addresses) if invalid_addresses != [] else True
def check_if_any_pkg_installed(args):
'''
:param args: Comma separated list of packages those needs to be verified
:return: True if any of the input package is installed else False
'''
result = False
for pkg in args.split(','):
if __salt__['pkg.version'](pkg):
result = True
break
return result
def ensure_max_password_expiration(allow_max_days, except_for_users=''):
'''
Ensure max password expiration days is set to the value less than or equal to that given in args
'''
grep_args = []
pass_max_days_output = _grep('/etc/login.defs', '^PASS_MAX_DAYS', *grep_args).get('stdout')
if not pass_max_days_output:
return "PASS_MAX_DAYS must be set"
system_pass_max_days = pass_max_days_output.split()[1]
if not _is_int(system_pass_max_days):
return "PASS_MAX_DAYS must be set properly"
if int(system_pass_max_days) > allow_max_days:
return "PASS_MAX_DAYS must be less than or equal to " + str(allow_max_days)
#fetch all users with passwords
grep_args.append('-E')
all_users = _grep('/etc/shadow', '^[^:]+:[^\!*]', *grep_args).get('stdout')
except_for_users_list=[]
for user in except_for_users.split(","):
if user.strip() != "":
except_for_users_list.append(user.strip())
result = []
for line in all_users.split('\n'):
user = line.split(':')[0]
#As per CIS doc, 5th field is the password max expiry days
user_passwd_expiry = line.split(':')[4]
if not user in except_for_users_list and _is_int(user_passwd_expiry) and int(user_passwd_expiry) > allow_max_days:
result.append('User ' + user + ' has max password expiry days ' + user_passwd_expiry + ', which is more than ' + str(allow_max_days))
return True if result == [] else str(result)
def _is_int(input):
try:
num = int(input)
except ValueError:
return False
return True
def check_sshd_paramters(pattern, values=None, comparetype='regex'):
'''
This function will check if any pattern passed is present in ssh service
User can also check for the values for that pattern
To check for values in any order, then use comparetype as 'only'
Example:
1) To check for INFO for LogLevel
check_log_level:
data:
'*':
tag: CIS-1.1.1
function: check_sshd_paramters
args:
- '^LogLevel\s+INFO'
description: Ensure SSH LogLevel is set to INFO
2) To check for only approved ciphers in any order
sshd_approved_cipher:
data:
'*':
tag: CIS-1.1.2
function: check_sshd_paramters
args:
- '^Ciphers'
kwargs:
values: aes256-ctr,aes192-ctr,aes128-ctr
comparetype: only
description: Ensure only approved ciphers are used
'''
output = __salt__['cmd.run']('sshd -T')
if comparetype == 'only':
if not values:
return "You need to provide values for comparetype 'only'."
else:
for line in output.splitlines():
if re.match(pattern, line, re.I):
expected_values = values.split(',')
found_values = line[len(pattern):].strip().split(',')
for found_value in found_values:
if found_value in expected_values:
continue
else:
return "Allowed values for pattern: " + pattern + " are " + values
return True
return "Looks like pattern i.e. " + pattern + " not found in sshd -T. Please check."
elif comparetype == 'regex':
if re.search(pattern, output, re.M | re.I):
return True
else:
return "Looks like pattern i.e. " + pattern + " not found in sshd -T. Please check."
else:
return "The comparetype: " + comparetype + " not found. It can be 'regex' or 'only'. Please check."
def test_success():
'''
Automatically returns success
'''
return True
def test_failure():
'''
Automatically returns failure, no reason
'''
return False
def test_failure_reason(reason):
'''
Automatically returns failure, with a reason (first arg)
'''
return reason
FUNCTION_MAP = {
'check_all_ports_firewall_rules': check_all_ports_firewall_rules,
'check_password_fields_not_empty': check_password_fields_not_empty,
'ungrouped_files_or_dir': ungrouped_files_or_dir,
'unowned_files_or_dir': unowned_files_or_dir,
'world_writable_file': world_writable_file,
'system_account_non_login': system_account_non_login,
'sticky_bit_on_world_writable_dirs': sticky_bit_on_world_writable_dirs,
'default_group_for_root': default_group_for_root,
'root_is_only_uid_0_account': root_is_only_uid_0_account,
'test_success': test_success,
'test_failure': test_failure,
'test_failure_reason': test_failure_reason,
'test_mount_attrs': test_mount_attrs,
'check_path_integrity': check_path_integrity,
'restrict_permissions': restrict_permissions,
'check_time_synchronization': check_time_synchronization,
'check_core_dumps': check_core_dumps,
'check_directory_files_permission': check_directory_files_permission,
'check_duplicate_gnames': check_duplicate_gnames,
'check_duplicate_unames': check_duplicate_unames,
'check_duplicate_gids': check_duplicate_gids,
'check_duplicate_uids': check_duplicate_uids,
'check_service_status': check_service_status,
'check_ssh_timeout_config': check_ssh_timeout_config,
'check_unowned_files': check_unowned_files,
'check_ungrouped_files': check_ungrouped_files,
'check_all_users_home_directory': check_all_users_home_directory,
'check_users_home_directory_permissions': check_users_home_directory_permissions,
'check_users_own_their_home': check_users_own_their_home,
'check_users_dot_files': check_users_dot_files,
'check_users_forward_files': check_users_forward_files,
'check_users_netrc_files': check_users_netrc_files,
'check_groups_validity': check_groups_validity,
'ensure_reverse_path_filtering': ensure_reverse_path_filtering,
'check_users_rhosts_files': check_users_rhosts_files,
'check_netrc_files_accessibility': check_netrc_files_accessibility,
'check_list_values': check_list_values,
'mail_conf_check': mail_conf_check,
'check_if_any_pkg_installed': check_if_any_pkg_installed,
'ensure_max_password_expiration': ensure_max_password_expiration,
'check_sshd_paramters': check_sshd_paramters,
}
|
apache-2.0
| 7,765,171,805,011,876,000 | -3,329,545,553,888,035,000 | 38.800877 | 225 | 0.590461 | false |
sschiau/swift
|
utils/swift-bench.py
|
28
|
15119
|
#!/usr/bin/env python
# ===--- swift-bench.py ------------------------------*- coding: utf-8 -*-===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
# This file implements a test harness for running Swift performance benchmarks.
#
# Its input is a set of swift files, containing functions named 'bench_*' that
# take no arguments and returns Int. The harness makes a separate test from
# each of these functions, runs all the tests and reports aggregate results.
#
# The workflow of the harness is the following:
# o Basing on the input files, generate 'processed' files. These files
# contain a main function with simple arguments parsing, time measurement
# utilities and a loop in which the bench-functions are called.
# o When all files are processed, the harness begins to compile them, keeping
# track of all compile fails for later results reporting.
# o When all files are compiled, the harness begins to run the tests. The
# harness chooses a number of iterations for each tests to achieve the best
# accuracy in the given time limit (in order to do that, it performs
# several auxiliary test runs). When the iteration number is chosen, the
# measurement of execution time is actually performed.
# o At this point everything is ready, and the harness simply reports the
# results.
#
# Ideas for the harness improvement and development are welcomed here:
# rdar://problem/18072938
from __future__ import print_function
import argparse
import math
import os
import re
import subprocess
import sys
# This regular expression is looking for Swift functions named `bench_*`
# that take no arguments and return an Int. The Swift code for such
# a function is:
#
# func bench_myname() {
# // function body goes here
# }
BENCH_RE = re.compile(
r"^\s*" # whitespace at the start of the line
r"func\s+" # 'func' keyword, which must be followed by
# at least one space
r"bench_([a-zA-Z0-9_]+)\s*"
# name of the function
r"\s*\(\s*\)" # argument list
r"\s*->\s*Int\s*" # return type
r"({)?" # opening brace of the function body
r"\s*$" # whitespace ot the end of the line
)
def pstdev(sample):
"""Given a list of numbers, return the population standard deviation.
For a population x_1, x_2, ..., x_N with mean M, the standard deviation
is defined as
sqrt( 1/N * [ (x_1 - M)^2 + (x_2 - M)^2 + ... + (x_N - M)^2 ] )
"""
if len(sample) == 0:
raise ValueError("Cannot calculate the standard deviation of an "
"empty list!")
mean = sum(sample) / float(len(sample))
inner = 1.0 / len(sample) * (sum((x - mean) ** 2 for x in sample))
return math.sqrt(inner)
class SwiftBenchHarness(object):
sources = []
verbose_level = 0
compiler = ""
tests = {}
time_limit = 1000
min_sample_time = 100
min_iter_time = 1
opt_flags = []
def log(self, str, level):
if self.verbose_level >= level:
for _ in range(1, level):
sys.stdout.write(' ')
print(str)
def run_command(self, cmd):
self.log(' Executing: ' + ' '.join(cmd), 1)
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def parse_arguments(self):
self.log("Parsing arguments.", 2)
parser = argparse.ArgumentParser()
parser.add_argument(
"-v", "--verbosity",
help="increase output verbosity", type=int)
parser.add_argument("files", help="input files", nargs='+')
parser.add_argument(
'-c', '--compiler',
help="compiler to use", default="swiftc")
parser.add_argument(
'-t', '--timelimit',
help="Time limit for every test", type=int)
parser.add_argument(
'-s', '--sampletime',
help="Minimum time for every sample", type=int)
parser.add_argument(
'-f', '--flags', help="Compilation flags", nargs='+')
args = parser.parse_args()
if args.verbosity:
self.verbose_level = args.verbosity
self.sources = args.files
self.compiler = args.compiler
if args.flags:
self.opt_flags = args.flags
if args.timelimit and args.timelimit > 0:
self.time_limit = args.timelimit
if args.sampletime and args.sampletime > 0:
self.min_sample_time = args.sampletime
self.log("Sources: %s." % ', '.join(self.sources), 3)
self.log("Compiler: %s." % self.compiler, 3)
self.log("Opt flags: %s." % ', '.join(self.opt_flags), 3)
self.log("Verbosity: %s." % self.verbose_level, 3)
self.log("Time limit: %s." % self.time_limit, 3)
self.log("Min sample time: %s." % self.min_sample_time, 3)
def process_source(self, name):
self.log("Processing source file: %s." % name, 2)
header = """
@_silgen_name("mach_absolute_time") func __mach_absolute_time__() -> UInt64
@_silgen_name("opaqueGetInt32")
func _opaqueGetInt32(x: Int) -> Int
@_silgen_name("opaqueGetInt64")
func _opaqueGetInt64(x: Int) -> Int
@inline(never)
public func getInt(x: Int) -> Int {
#if arch(i386) || arch(arm)
return _opaqueGetInt32(x)
#elseif arch(x86_64) || arch(arm64) || arch(powerpc64) || \
arch(powerpc64le) || arch(s390x)
return _opaqueGetInt64(x)
#else
return x
#endif
}
@inline(never)
func False() -> Bool { return getInt(1) == 0 }
@inline(never)
func Consume(x: Int) { if False() { println(x) } }
"""
before_bench = """
@inline(never)
"""
into_bench = """
if False() { return 0 }
"""
main_begin = """
func main() {
var N = 1
var name = ""
if CommandLine.arguments.count > 1 {
N = CommandLine.arguments[1].toInt()!
}
"""
main_body = r"""
name = "%s"
if CommandLine.arguments.count <= 2 || CommandLine.arguments[2] == name {
let start = __mach_absolute_time__()
for _ in 1...N {
bench_%s()
}
let end = __mach_absolute_time__()
println("\(name),\(N),\(end - start)")
}
"""
main_end = """
}
main()
"""
with open(name) as f:
lines = list(f)
output = header
looking_for_curly_brace = False
test_names = []
for lineno, l in enumerate(lines, start=1):
if looking_for_curly_brace:
output += l
if "{" not in l:
continue
looking_for_curly_brace = False
output += into_bench
continue
m = BENCH_RE.match(l)
if m:
output += before_bench
output += l
bench_name = m.group(1)
self.log("Benchmark found: %s (line %d)" %
(bench_name, lineno), 3)
self.tests[
name + ":" +
bench_name] = Test(bench_name, name, "", "")
test_names.append(bench_name)
if m.group(2):
output += into_bench
else:
looking_for_curly_brace = True
else:
output += l
output += main_begin
for n in test_names:
output += main_body % (n, n)
processed_name = 'processed_' + os.path.basename(name)
output += main_end
with open(processed_name, 'w') as f:
f.write(output)
for n in test_names:
self.tests[name + ":" + n].processed_source = processed_name
def process_sources(self):
self.log("Processing sources: %s." % self.sources, 2)
for s in self.sources:
self.process_source(s)
def compile_opaque_cfile(self):
self.log("Generating and compiling C file with opaque functions.", 3)
file_body = """
#include <stdint.h>
extern "C" int32_t opaqueGetInt32(int32_t x) { return x; }
extern "C" int64_t opaqueGetInt64(int64_t x) { return x; }
"""
with open('opaque.cpp', 'w') as f:
f.write(file_body)
# TODO: Handle subprocess.CalledProcessError for this call:
self.run_command(
['clang++', 'opaque.cpp', '-o', 'opaque.o', '-c', '-O2'])
compiled_files = {}
def compile_source(self, name):
self.tests[name].binary = "./" + \
self.tests[name].processed_source.split(os.extsep)[0]
if not self.tests[name].processed_source in self.compiled_files:
try:
self.run_command([
self.compiler,
self.tests[name].processed_source,
"-o",
self.tests[name].binary + '.o',
'-c'
] + self.opt_flags)
self.run_command([
self.compiler,
'-o',
self.tests[name].binary,
self.tests[name].binary + '.o',
'opaque.o'
])
self.compiled_files[
self.tests[name].processed_source] = ('', '')
except subprocess.CalledProcessError as e:
self.compiled_files[self.tests[name].processed_source] = (
'COMPFAIL', e.output)
(status, output) = self.compiled_files[
self.tests[name].processed_source]
self.tests[name].status = status
self.tests[name].output = output
def compile_sources(self):
self.log("Compiling processed sources.", 2)
self.compile_opaque_cfile()
for t in self.tests:
self.compile_source(t)
def run_benchmarks(self):
self.log("Running benchmarks.", 2)
for t in self.tests:
self.run_bench(t)
def parse_benchmark_output(self, res):
# Parse lines like
# TestName,NNN,MMM
# where NNN - performed iterations number, MMM - execution time (in ns)
results_re = re.compile(r"(\w+),[ \t]*(\d+),[ \t]*(\d+)")
m = results_re.match(res)
if not m:
return ("", 0, 0)
return (m.group(1), m.group(2), m.group(3))
def compute_iters_number(self, name):
scale = 1
spent = 0
# Measure time for one iteration
# If it's too small, increase number of iteration until it's measurable
while (spent <= self.min_iter_time):
try:
r = self.run_command([
self.tests[name].binary, str(scale),
self.tests[name].name])
(test_name, iters_computed, exec_time) = \
self.parse_benchmark_output(r)
# Convert ns to ms
spent = int(exec_time) / 1000000
if spent <= self.min_iter_time:
scale *= 2
if scale > sys.maxint:
return (0, 0)
except subprocess.CalledProcessError as e:
r = e.output
break
if spent == 0:
spent = 1
# Now compute number of samples we can take in the given time limit
mult = int(self.min_sample_time / spent)
if mult == 0:
mult = 1
scale *= mult
spent *= mult
samples = int(self.time_limit / spent)
if samples == 0:
samples = 1
return (samples, scale)
def run_bench(self, name):
if not self.tests[name].status == "":
return
(num_samples, iter_scale) = self.compute_iters_number(name)
if (num_samples, iter_scale) == (0, 0):
self.tests[name].status = "CAN'T MEASURE"
self.tests[name].output = (
"Can't find number of iterations for the test to last " +
"longer than %d ms." % self.min_iter_time)
return
samples = []
self.log("Running bench: %s, numsamples: %d" % (name, num_samples), 2)
for _ in range(0, num_samples):
try:
r = self.run_command([self.tests[name].binary, str(iter_scale),
self.tests[name].name])
(test_name, iters_computed, exec_time) = \
self.parse_benchmark_output(r)
# TODO: Verify test_name and iters_computed
samples.append(int(exec_time) / iter_scale)
self.tests[name].output = r
except subprocess.CalledProcessError as e:
self.tests[name].status = "RUNFAIL"
self.tests[name].output = e.output
break
res = TestResults(name, samples)
self.tests[name].results = res
def report_results(self):
self.log("\nReporting results.", 2)
print("==================================================")
for t in self.tests:
self.tests[t].do_print()
class Test(object):
def __init__(self, name, source, processed_source, binary):
self.name = name
self.source = source
self.processed_source = processed_source
self.binary = binary
self.status = ""
self.results = None
self.output = None
def do_print(self):
print("NAME: %s" % self.name)
print("SOURCE: %s" % self.source)
if self.status == "":
if self.results is not None:
self.results.do_print()
else:
print("STATUS: %s" % self.status)
print("OUTPUT:")
print(self.output)
print("END OF OUTPUT")
print("")
class TestResults(object):
def __init__(self, name, samples):
self.name = name
self.samples = samples
if len(samples) > 0:
self.process()
def process(self):
self.minimum = min(self.samples)
self.maximum = max(self.samples)
self.avg = sum(self.samples) / len(self.samples)
self.std = pstdev(self.samples)
self.err = self.std / math.sqrt(len(self.samples))
self.int_min = self.avg - self.err * 1.96
self.int_max = self.avg + self.err * 1.96
def do_print(self):
print("SAMPLES: %d" % len(self.samples))
print("MIN: %3.2e" % self.minimum)
print("MAX: %3.2e" % self.maximum)
print("AVG: %3.2e" % self.avg)
print("STD: %3.2e" % self.std)
print("ERR: %3.2e (%2.1f%%)" % (self.err, self.err * 100 / self.avg))
print("CONF INT 0.95: (%3.2e, %3.2e)" % (self.int_min, self.int_max))
print("")
def main():
harness = SwiftBenchHarness()
harness.parse_arguments()
harness.process_sources()
harness.compile_sources()
harness.run_benchmarks()
harness.report_results()
main()
|
apache-2.0
| 6,606,585,072,450,858,000 | 8,429,492,632,253,177,000 | 33.597254 | 79 | 0.54501 | false |
jumpstarter-io/nova
|
nova/cmd/api_ec2.py
|
26
|
1537
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova EC2 API."""
import sys
from oslo.config import cfg
from nova import config
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import utils
from nova import version
CONF = cfg.CONF
CONF.import_opt('enabled_ssl_apis', 'nova.service')
def main():
config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
objects.register_all()
gmr.TextGuruMeditation.setup_autorun(version)
should_use_ssl = 'ec2' in CONF.enabled_ssl_apis
server = service.WSGIService('ec2', use_ssl=should_use_ssl,
max_url_len=16384)
service.serve(server, workers=server.workers)
service.wait()
|
apache-2.0
| 4,063,275,467,402,877,000 | 2,716,278,009,318,247,400 | 31.020833 | 77 | 0.72674 | false |
godfather1103/WeiboRobot
|
python27/1.0/lib/test/test_scope.py
|
114
|
15536
|
import unittest
from test.test_support import check_syntax_error, check_py3k_warnings, \
check_warnings, run_unittest
class ScopeTests(unittest.TestCase):
def testSimpleNesting(self):
def make_adder(x):
def adder(y):
return x + y
return adder
inc = make_adder(1)
plus10 = make_adder(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testExtraNesting(self):
def make_adder2(x):
def extra(): # check freevars passing through non-use scopes
def adder(y):
return x + y
return adder
return extra()
inc = make_adder2(1)
plus10 = make_adder2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testSimpleAndRebinding(self):
def make_adder3(x):
def adder(y):
return x + y
x = x + 1 # check tracking of assignment to x in defining scope
return adder
inc = make_adder3(0)
plus10 = make_adder3(9)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingGlobalNoFree(self):
def make_adder4(): # XXX add exta level of indirection
def nest():
def nest():
def adder(y):
return global_x + y # check that plain old globals work
return adder
return nest()
return nest()
global_x = 1
adder = make_adder4()
self.assertEqual(adder(1), 2)
global_x = 10
self.assertEqual(adder(-2), 8)
def testNestingThroughClass(self):
def make_adder5(x):
class Adder:
def __call__(self, y):
return x + y
return Adder()
inc = make_adder5(1)
plus10 = make_adder5(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingPlusFreeRefToGlobal(self):
def make_adder6(x):
global global_nest_x
def adder(y):
return global_nest_x + y
global_nest_x = x
return adder
inc = make_adder6(1)
plus10 = make_adder6(10)
self.assertEqual(inc(1), 11) # there's only one global
self.assertEqual(plus10(-2), 8)
def testNearestEnclosingScope(self):
def f(x):
def g(y):
x = 42 # check that this masks binding in f()
def h(z):
return x + z
return h
return g(2)
test_func = f(10)
self.assertEqual(test_func(5), 47)
def testMixedFreevarsAndCellvars(self):
def identity(x):
return x
def f(x, y, z):
def g(a, b, c):
a = a + x # 3
def h():
# z * (4 + 9)
# 3 * 13
return identity(z * (b + y))
y = c + z # 9
return h
return g
g = f(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 39)
def testFreeVarInMethod(self):
def test():
method_and_var = "var"
class Test:
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
return Test()
t = test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
method_and_var = "var"
class Test:
# this class is not nested, so the rules are different
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
t = Test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
def testRecursion(self):
def f(x):
def fact(n):
if n == 0:
return 1
else:
return n * fact(n - 1)
if x >= 0:
return fact(x)
else:
raise ValueError, "x must be >= 0"
self.assertEqual(f(6), 720)
def testUnoptimizedNamespaces(self):
check_syntax_error(self, """\
def unoptimized_clash1(strip):
def f(s):
from string import *
return strip(s) # ambiguity: free or local
return f
""")
check_syntax_error(self, """\
def unoptimized_clash2():
from string import *
def f(s):
return strip(s) # ambiguity: global or local
return f
""")
check_syntax_error(self, """\
def unoptimized_clash2():
from string import *
def g():
def f(s):
return strip(s) # ambiguity: global or local
return f
""")
# XXX could allow this for exec with const argument, but what's the point
check_syntax_error(self, """\
def error(y):
exec "a = 1"
def f(x):
return x + y
return f
""")
check_syntax_error(self, """\
def f(x):
def g():
return x
del x # can't del name
""")
check_syntax_error(self, """\
def f():
def g():
from string import *
return strip # global or local?
""")
# and verify a few cases that should work
exec """
def noproblem1():
from string import *
f = lambda x:x
def noproblem2():
from string import *
def f(x):
return x + 1
def noproblem3():
from string import *
def f(x):
global y
y = x
"""
def testLambdas(self):
f1 = lambda x: lambda y: x + y
inc = f1(1)
plus10 = f1(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f2 = lambda x: (lambda : lambda y: x + y)()
inc = f2(1)
plus10 = f2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f3 = lambda x: lambda y: global_x + y
global_x = 1
inc = f3(None)
self.assertEqual(inc(2), 3)
f8 = lambda x, y, z: lambda a, b, c: lambda : z * (b + y)
g = f8(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 18)
def testUnboundLocal(self):
def errorInOuter():
print y
def inner():
return y
y = 1
def errorInInner():
def inner():
return y
inner()
y = 1
self.assertRaises(UnboundLocalError, errorInOuter)
self.assertRaises(NameError, errorInInner)
# test for bug #1501934: incorrect LOAD/STORE_GLOBAL generation
exec """
global_x = 1
def f():
global_x += 1
try:
f()
except UnboundLocalError:
pass
else:
fail('scope of global_x not correctly determined')
""" in {'fail': self.fail}
def testComplexDefinitions(self):
def makeReturner(*lst):
def returner():
return lst
return returner
self.assertEqual(makeReturner(1,2,3)(), (1,2,3))
def makeReturner2(**kwargs):
def returner():
return kwargs
return returner
self.assertEqual(makeReturner2(a=11)()['a'], 11)
with check_py3k_warnings(("tuple parameter unpacking has been removed",
SyntaxWarning)):
exec """\
def makeAddPair((a, b)):
def addPair((c, d)):
return (a + c, b + d)
return addPair
""" in locals()
self.assertEqual(makeAddPair((1, 2))((100, 200)), (101,202))
def testScopeOfGlobalStmt(self):
# Examples posted by Samuele Pedroni to python-dev on 3/1/2001
exec """\
# I
x = 7
def f():
x = 1
def g():
global x
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 7)
self.assertEqual(x, 7)
# II
x = 7
def f():
x = 1
def g():
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 7)
# III
x = 7
def f():
x = 1
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# IV
x = 7
def f():
x = 3
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# XXX what about global statements in class blocks?
# do they affect methods?
x = 12
class Global:
global x
x = 13
def set(self, val):
x = val
def get(self):
return x
g = Global()
self.assertEqual(g.get(), 13)
g.set(15)
self.assertEqual(g.get(), 13)
"""
def testLeaks(self):
class Foo:
count = 0
def __init__(self):
Foo.count += 1
def __del__(self):
Foo.count -= 1
def f1():
x = Foo()
def f2():
return x
f2()
for i in range(100):
f1()
self.assertEqual(Foo.count, 0)
def testClassAndGlobal(self):
exec """\
def test(x):
class Foo:
global x
def __call__(self, y):
return x + y
return Foo()
x = 0
self.assertEqual(test(6)(2), 8)
x = -1
self.assertEqual(test(3)(2), 5)
looked_up_by_load_name = False
class X:
# Implicit globals inside classes are be looked up by LOAD_NAME, not
# LOAD_GLOBAL.
locals()['looked_up_by_load_name'] = True
passed = looked_up_by_load_name
self.assertTrue(X.passed)
"""
def testLocalsFunction(self):
def f(x):
def g(y):
def h(z):
return y + z
w = x + y
y += 3
return locals()
return g
d = f(2)(4)
self.assertIn('h', d)
del d['h']
self.assertEqual(d, {'x': 2, 'y': 7, 'w': 6})
def testLocalsClass(self):
# This test verifies that calling locals() does not pollute
# the local namespace of the class with free variables. Old
# versions of Python had a bug, where a free variable being
# passed through a class namespace would be inserted into
# locals() by locals() or exec or a trace function.
#
# The real bug lies in frame code that copies variables
# between fast locals and the locals dict, e.g. when executing
# a trace function.
def f(x):
class C:
x = 12
def m(self):
return x
locals()
return C
self.assertEqual(f(1).x, 12)
def f(x):
class C:
y = x
def m(self):
return x
z = list(locals())
return C
varnames = f(1).z
self.assertNotIn("x", varnames)
self.assertIn("y", varnames)
def testLocalsClass_WithTrace(self):
# Issue23728: after the trace function returns, the locals()
# dictionary is used to update all variables, this used to
# include free variables. But in class statements, free
# variables are not inserted...
import sys
sys.settrace(lambda a,b,c:None)
try:
x = 12
class C:
def f(self):
return x
self.assertEqual(x, 12) # Used to raise UnboundLocalError
finally:
sys.settrace(None)
def testBoundAndFree(self):
# var is bound and free in class
def f(x):
class C:
def m(self):
return x
a = x
return C
inst = f(3)()
self.assertEqual(inst.a, inst.m())
def testInteractionWithTraceFunc(self):
import sys
def tracer(a,b,c):
return tracer
def adaptgetter(name, klass, getter):
kind, des = getter
if kind == 1: # AV happens when stepping from this line to next
if des == "":
des = "_%s__%s" % (klass.__name__, name)
return lambda obj: getattr(obj, des)
class TestClass:
pass
sys.settrace(tracer)
adaptgetter("foo", TestClass, (1, ""))
sys.settrace(None)
self.assertRaises(TypeError, sys.settrace)
def testEvalExecFreeVars(self):
def f(x):
return lambda: x + 1
g = f(3)
self.assertRaises(TypeError, eval, g.func_code)
try:
exec g.func_code in {}
except TypeError:
pass
else:
self.fail("exec should have failed, because code contained free vars")
def testListCompLocalVars(self):
try:
print bad
except NameError:
pass
else:
print "bad should not be defined"
def x():
[bad for s in 'a b' for bad in s.split()]
x()
try:
print bad
except NameError:
pass
def testEvalFreeVars(self):
def f(x):
def g():
x
eval("x + 1")
return g
f(4)()
def testFreeingCell(self):
# Test what happens when a finalizer accesses
# the cell where the object was stored.
class Special:
def __del__(self):
nestedcell_get()
def f():
global nestedcell_get
def nestedcell_get():
return c
c = (Special(),)
c = 2
f() # used to crash the interpreter...
def testGlobalInParallelNestedFunctions(self):
# A symbol table bug leaked the global statement from one
# function to other nested functions in the same block.
# This test verifies that a global statement in the first
# function does not affect the second function.
CODE = """def f():
y = 1
def g():
global y
return y
def h():
return y + 1
return g, h
y = 9
g, h = f()
result9 = g()
result2 = h()
"""
local_ns = {}
global_ns = {}
exec CODE in local_ns, global_ns
self.assertEqual(2, global_ns["result2"])
self.assertEqual(9, global_ns["result9"])
def testTopIsNotSignificant(self):
# See #9997.
def top(a):
pass
def b():
global a
def test_main():
with check_warnings(("import \* only allowed at module level",
SyntaxWarning)):
run_unittest(ScopeTests)
if __name__ == '__main__':
test_main()
|
gpl-3.0
| 6,859,108,238,285,360,000 | 2,572,343,783,156,198,400 | 22.39759 | 82 | 0.486676 | false |
fevangelou/namebench
|
nb_third_party/dns/query.py
|
215
|
15983
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Talk to a DNS server."""
from __future__ import generators
import errno
import select
import socket
import struct
import sys
import time
import dns.exception
import dns.inet
import dns.name
import dns.message
import dns.rdataclass
import dns.rdatatype
class UnexpectedSource(dns.exception.DNSException):
"""Raised if a query response comes from an unexpected address or port."""
pass
class BadResponse(dns.exception.FormError):
"""Raised if a query response does not respond to the question asked."""
pass
def _compute_expiration(timeout):
if timeout is None:
return None
else:
return time.time() + timeout
def _wait_for(ir, iw, ix, expiration):
done = False
while not done:
if expiration is None:
timeout = None
else:
timeout = expiration - time.time()
if timeout <= 0.0:
raise dns.exception.Timeout
try:
if timeout is None:
(r, w, x) = select.select(ir, iw, ix)
else:
(r, w, x) = select.select(ir, iw, ix, timeout)
except select.error, e:
if e.args[0] != errno.EINTR:
raise e
done = True
if len(r) == 0 and len(w) == 0 and len(x) == 0:
raise dns.exception.Timeout
def _wait_for_readable(s, expiration):
_wait_for([s], [], [s], expiration)
def _wait_for_writable(s, expiration):
_wait_for([], [s], [s], expiration)
def _addresses_equal(af, a1, a2):
# Convert the first value of the tuple, which is a textual format
# address into binary form, so that we are not confused by different
# textual representations of the same address
n1 = dns.inet.inet_pton(af, a1[0])
n2 = dns.inet.inet_pton(af, a2[0])
return n1 == n2 and a1[1:] == a2[1:]
def udp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
ignore_unexpected=False, one_rr_per_rrset=False):
"""Return the response obtained after sending a query via UDP.
@param q: the query
@type q: dns.message.Message
@param where: where to send the message
@type where: string containing an IPv4 or IPv6 address
@param timeout: The number of seconds to wait before the query times out.
If None, the default, wait forever.
@type timeout: float
@param port: The port to which to send the message. The default is 53.
@type port: int
@param af: the address family to use. The default is None, which
causes the address family to use to be inferred from the form of of where.
If the inference attempt fails, AF_INET is used.
@type af: int
@rtype: dns.message.Message object
@param source: source address. The default is the IPv4 wildcard address.
@type source: string
@param source_port: The port from which to send the message.
The default is 0.
@type source_port: int
@param ignore_unexpected: If True, ignore responses from unexpected
sources. The default is False.
@type ignore_unexpected: bool
@param one_rr_per_rrset: Put each RR into its own RRset
@type one_rr_per_rrset: bool
"""
wire = q.to_wire()
if af is None:
try:
af = dns.inet.af_for_address(where)
except:
af = dns.inet.AF_INET
if af == dns.inet.AF_INET:
destination = (where, port)
if source is not None:
source = (source, source_port)
elif af == dns.inet.AF_INET6:
destination = (where, port, 0, 0)
if source is not None:
source = (source, source_port, 0, 0)
s = socket.socket(af, socket.SOCK_DGRAM, 0)
try:
expiration = _compute_expiration(timeout)
s.setblocking(0)
if source is not None:
s.bind(source)
_wait_for_writable(s, expiration)
s.sendto(wire, destination)
while 1:
_wait_for_readable(s, expiration)
(wire, from_address) = s.recvfrom(65535)
if _addresses_equal(af, from_address, destination) or \
(dns.inet.is_multicast(where) and \
from_address[1:] == destination[1:]):
break
if not ignore_unexpected:
raise UnexpectedSource('got a response from '
'%s instead of %s' % (from_address,
destination))
finally:
s.close()
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
one_rr_per_rrset=one_rr_per_rrset)
if not q.is_response(r):
raise BadResponse
return r
def _net_read(sock, count, expiration):
"""Read the specified number of bytes from sock. Keep trying until we
either get the desired amount, or we hit EOF.
A Timeout exception will be raised if the operation is not completed
by the expiration time.
"""
s = ''
while count > 0:
_wait_for_readable(sock, expiration)
n = sock.recv(count)
if n == '':
raise EOFError
count = count - len(n)
s = s + n
return s
def _net_write(sock, data, expiration):
"""Write the specified data to the socket.
A Timeout exception will be raised if the operation is not completed
by the expiration time.
"""
current = 0
l = len(data)
while current < l:
_wait_for_writable(sock, expiration)
current += sock.send(data[current:])
def _connect(s, address):
try:
s.connect(address)
except socket.error:
(ty, v) = sys.exc_info()[:2]
if v[0] != errno.EINPROGRESS and \
v[0] != errno.EWOULDBLOCK and \
v[0] != errno.EALREADY:
raise v
def tcp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
one_rr_per_rrset=False):
"""Return the response obtained after sending a query via TCP.
@param q: the query
@type q: dns.message.Message object
@param where: where to send the message
@type where: string containing an IPv4 or IPv6 address
@param timeout: The number of seconds to wait before the query times out.
If None, the default, wait forever.
@type timeout: float
@param port: The port to which to send the message. The default is 53.
@type port: int
@param af: the address family to use. The default is None, which
causes the address family to use to be inferred from the form of of where.
If the inference attempt fails, AF_INET is used.
@type af: int
@rtype: dns.message.Message object
@param source: source address. The default is the IPv4 wildcard address.
@type source: string
@param source_port: The port from which to send the message.
The default is 0.
@type source_port: int
@param one_rr_per_rrset: Put each RR into its own RRset
@type one_rr_per_rrset: bool
"""
wire = q.to_wire()
if af is None:
try:
af = dns.inet.af_for_address(where)
except:
af = dns.inet.AF_INET
if af == dns.inet.AF_INET:
destination = (where, port)
if source is not None:
source = (source, source_port)
elif af == dns.inet.AF_INET6:
destination = (where, port, 0, 0)
if source is not None:
source = (source, source_port, 0, 0)
s = socket.socket(af, socket.SOCK_STREAM, 0)
try:
expiration = _compute_expiration(timeout)
s.setblocking(0)
if source is not None:
s.bind(source)
_connect(s, destination)
l = len(wire)
# copying the wire into tcpmsg is inefficient, but lets us
# avoid writev() or doing a short write that would get pushed
# onto the net
tcpmsg = struct.pack("!H", l) + wire
_net_write(s, tcpmsg, expiration)
ldata = _net_read(s, 2, expiration)
(l,) = struct.unpack("!H", ldata)
wire = _net_read(s, l, expiration)
finally:
s.close()
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
one_rr_per_rrset=one_rr_per_rrset)
if not q.is_response(r):
raise BadResponse
return r
def xfr(where, zone, rdtype=dns.rdatatype.AXFR, rdclass=dns.rdataclass.IN,
timeout=None, port=53, keyring=None, keyname=None, relativize=True,
af=None, lifetime=None, source=None, source_port=0, serial=0,
use_udp=False, keyalgorithm=dns.tsig.default_algorithm):
"""Return a generator for the responses to a zone transfer.
@param where: where to send the message
@type where: string containing an IPv4 or IPv6 address
@param zone: The name of the zone to transfer
@type zone: dns.name.Name object or string
@param rdtype: The type of zone transfer. The default is
dns.rdatatype.AXFR.
@type rdtype: int or string
@param rdclass: The class of the zone transfer. The default is
dns.rdatatype.IN.
@type rdclass: int or string
@param timeout: The number of seconds to wait for each response message.
If None, the default, wait forever.
@type timeout: float
@param port: The port to which to send the message. The default is 53.
@type port: int
@param keyring: The TSIG keyring to use
@type keyring: dict
@param keyname: The name of the TSIG key to use
@type keyname: dns.name.Name object or string
@param relativize: If True, all names in the zone will be relativized to
the zone origin. It is essential that the relativize setting matches
the one specified to dns.zone.from_xfr().
@type relativize: bool
@param af: the address family to use. The default is None, which
causes the address family to use to be inferred from the form of of where.
If the inference attempt fails, AF_INET is used.
@type af: int
@param lifetime: The total number of seconds to spend doing the transfer.
If None, the default, then there is no limit on the time the transfer may
take.
@type lifetime: float
@rtype: generator of dns.message.Message objects.
@param source: source address. The default is the IPv4 wildcard address.
@type source: string
@param source_port: The port from which to send the message.
The default is 0.
@type source_port: int
@param serial: The SOA serial number to use as the base for an IXFR diff
sequence (only meaningful if rdtype == dns.rdatatype.IXFR).
@type serial: int
@param use_udp: Use UDP (only meaningful for IXFR)
@type use_udp: bool
@param keyalgorithm: The TSIG algorithm to use; defaults to
dns.tsig.default_algorithm
@type keyalgorithm: string
"""
if isinstance(zone, (str, unicode)):
zone = dns.name.from_text(zone)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
q = dns.message.make_query(zone, rdtype, rdclass)
if rdtype == dns.rdatatype.IXFR:
rrset = dns.rrset.from_text(zone, 0, 'IN', 'SOA',
'. . %u 0 0 0 0' % serial)
q.authority.append(rrset)
if not keyring is None:
q.use_tsig(keyring, keyname, algorithm=keyalgorithm)
wire = q.to_wire()
if af is None:
try:
af = dns.inet.af_for_address(where)
except:
af = dns.inet.AF_INET
if af == dns.inet.AF_INET:
destination = (where, port)
if source is not None:
source = (source, source_port)
elif af == dns.inet.AF_INET6:
destination = (where, port, 0, 0)
if source is not None:
source = (source, source_port, 0, 0)
if use_udp:
if rdtype != dns.rdatatype.IXFR:
raise ValueError('cannot do a UDP AXFR')
s = socket.socket(af, socket.SOCK_DGRAM, 0)
else:
s = socket.socket(af, socket.SOCK_STREAM, 0)
s.setblocking(0)
if source is not None:
s.bind(source)
expiration = _compute_expiration(lifetime)
_connect(s, destination)
l = len(wire)
if use_udp:
_wait_for_writable(s, expiration)
s.send(wire)
else:
tcpmsg = struct.pack("!H", l) + wire
_net_write(s, tcpmsg, expiration)
done = False
soa_rrset = None
soa_count = 0
if relativize:
origin = zone
oname = dns.name.empty
else:
origin = None
oname = zone
tsig_ctx = None
first = True
while not done:
mexpiration = _compute_expiration(timeout)
if mexpiration is None or mexpiration > expiration:
mexpiration = expiration
if use_udp:
_wait_for_readable(s, expiration)
(wire, from_address) = s.recvfrom(65535)
else:
ldata = _net_read(s, 2, mexpiration)
(l,) = struct.unpack("!H", ldata)
wire = _net_read(s, l, mexpiration)
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
xfr=True, origin=origin, tsig_ctx=tsig_ctx,
multi=True, first=first,
one_rr_per_rrset=(rdtype==dns.rdatatype.IXFR))
tsig_ctx = r.tsig_ctx
first = False
answer_index = 0
delete_mode = False
expecting_SOA = False
if soa_rrset is None:
if not r.answer or r.answer[0].name != oname:
raise dns.exception.FormError
rrset = r.answer[0]
if rrset.rdtype != dns.rdatatype.SOA:
raise dns.exception.FormError("first RRset is not an SOA")
answer_index = 1
soa_rrset = rrset.copy()
if rdtype == dns.rdatatype.IXFR:
if soa_rrset[0].serial == serial:
#
# We're already up-to-date.
#
done = True
else:
expecting_SOA = True
#
# Process SOAs in the answer section (other than the initial
# SOA in the first message).
#
for rrset in r.answer[answer_index:]:
if done:
raise dns.exception.FormError("answers after final SOA")
if rrset.rdtype == dns.rdatatype.SOA and rrset.name == oname:
if expecting_SOA:
if rrset[0].serial != serial:
raise dns.exception.FormError("IXFR base serial mismatch")
expecting_SOA = False
elif rdtype == dns.rdatatype.IXFR:
delete_mode = not delete_mode
if rrset == soa_rrset and not delete_mode:
done = True
elif expecting_SOA:
#
# We made an IXFR request and are expecting another
# SOA RR, but saw something else, so this must be an
# AXFR response.
#
rdtype = dns.rdatatype.AXFR
expecting_SOA = False
if done and q.keyring and not r.had_tsig:
raise dns.exception.FormError("missing TSIG")
yield r
s.close()
|
apache-2.0
| -1,894,542,926,608,962,800 | 6,880,506,115,732,222,000 | 36.343458 | 82 | 0.60214 | false |
mne-tools/mne-python
|
mne/viz/circle.py
|
14
|
15879
|
"""Functions to plot on circle as for connectivity."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
#
# License: Simplified BSD
from itertools import cycle
from functools import partial
import numpy as np
from .utils import plt_show
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
"""Create layout arranging nodes on a circle.
Parameters
----------
node_names : list of str
Node names.
node_order : list of str
List with node names defining the order in which the nodes are
arranged. Must have the elements as node_names but the order can be
different. The nodes are arranged clockwise starting at "start_pos"
degrees.
start_pos : float
Angle in degrees that defines where the first node is plotted.
start_between : bool
If True, the layout starts with the position between the nodes. This is
the same as adding "180. / len(node_names)" to start_pos.
group_boundaries : None | array-like
List of of boundaries between groups at which point a "group_sep" will
be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
group_sep : float
Group separation angle in degrees. See "group_boundaries".
Returns
-------
node_angles : array, shape=(n_node_names,)
Node angles in degrees.
"""
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int64)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
# convert it to a list with indices
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
# special case when a group separator is at the start
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float64) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
"""Isolate connections around a single node when user left clicks a node.
On right click, resets all connections.
"""
if event.inaxes != axes:
return
if event.button == 1: # left click
# click must be near node radius
if not ylim[0] <= event.ydata <= ylim[1]:
return
# all angles in range [0, 2*pi]
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3: # right click
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of array | None
Two arrays with indices of connections for which the connections
strengths are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape (n_node_names,) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuple | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str | instance of matplotlib.colors.LinearSegmentedColormap
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : tuple, shape (2,)
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.figure.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | tuple, shape (3,)
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure handle.
axes : instance of matplotlib.projections.polar.PolarAxes
The subplot handle.
Notes
-----
This code is based on a circle graph example by Nicolas P. Rougier
By default, :func:`matplotlib.pyplot.savefig` does not take ``facecolor``
into account when saving, even if set when a figure is generated. This
can be addressed via, e.g.::
>>> fig.savefig(fname_fig, facecolor='black') # doctest:+SKIP
If ``facecolor`` is not set via :func:`matplotlib.pyplot.savefig`, the
figure labels, title, and legend may be cut off in the output figure.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
try:
spectral = plt.cm.spectral
except AttributeError:
spectral = plt.cm.Spectral
node_colors = [spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = np.tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, str):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True)
axes.set_facecolor(facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additional space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
del con_abs
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int64)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initialize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
sm = plt.cm.ScalarMappable(cmap=colormap,
norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
plt_show(show)
return fig, axes
|
bsd-3-clause
| 1,820,932,291,245,673,000 | -5,569,143,242,194,701,000 | 36.274648 | 79 | 0.603753 | false |
seaotterman/tensorflow
|
tensorflow/contrib/factorization/python/ops/clustering_ops.py
|
22
|
25599
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Clustering Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.factorization.python.ops import gen_clustering_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.factorization.python.ops.gen_clustering_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.embedding_ops import embedding_lookup
from tensorflow.python.platform import resource_loader
_clustering_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_clustering_ops.so'))
# Euclidean distance between vectors U and V is defined as ||U - V||_F which is
# the square root of the sum of the absolute squares of the elements difference.
SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
# Cosine distance between vectors U and V is defined as
# 1 - (U \dot V) / (||U||_F ||V||_F)
COSINE_DISTANCE = 'cosine'
RANDOM_INIT = 'random'
KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'
class KMeans(object):
"""Creates the graph for k-means clustering."""
def __init__(self,
inputs,
num_clusters,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=False,
mini_batch_steps_per_iteration=1,
random_seed=0,
kmeans_plus_plus_num_retries=2):
"""Creates an object for generating KMeans clustering graph.
This class implements the following variants of K-means algorithm:
If use_mini_batch is False, it runs standard full batch K-means. Each step
runs a single iteration of K-Means. This step can be run sharded across
multiple workers by passing a list of sharded inputs to this class. Note
however that a single step needs to process the full input at once.
If use_mini_batch is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of mini_batch_steps_per_iteration steps. Two copies of cluster
centers are maintained: one that is updated at the end of each iteration,
and one that is updated every step. The first copy is used to compute
cluster allocations for each step, and for inference, while the second copy
is the one updated each step using the mini-batch update rule. After each
iteration is complete, this second copy is copied back the first copy.
Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1,
the algorithm reduces to the standard mini-batch algorithm. Also by setting
mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm
becomes an asynchronous version of the full-batch algorithm. Note however
that there is no guarantee by this implementation that each input is seen
exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not behave
exactly like a full-batch version.
Args:
inputs: An input tensor or list of input tensors
num_clusters: number of clusters.
initial_clusters: Specifies the clusters used during initialization. Can
be a tensor or numpy array, or a function that generates the clusters.
Can also be "random" to specify that clusters should be chosen randomly
from input data.
distance_metric: distance metric used for clustering.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy.
random_seed: Seed for PRNG used to initialize seeds.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
"""
self._inputs = inputs if isinstance(inputs, list) else [inputs]
assert num_clusters > 0, num_clusters
self._num_clusters = num_clusters
if initial_clusters is None:
initial_clusters = RANDOM_INIT
self._initial_clusters = initial_clusters
assert distance_metric in [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration)
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
@classmethod
def _distance_graph(cls, inputs, clusters, distance_metric):
"""Computes distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
distance_metric: distance metric used for clustering
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
Currently only Euclidean distance and cosine distance are supported.
"""
assert isinstance(inputs, list)
if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
return cls._compute_euclidean_distance(inputs, clusters)
elif distance_metric == COSINE_DISTANCE:
return cls._compute_cosine_distance(
inputs, clusters, inputs_normalized=True)
else:
assert False, ('Unsupported distance metric passed to Kmeans %s' %
str(distance_metric))
@classmethod
def _compute_euclidean_distance(cls, inputs, clusters):
"""Computes Euclidean distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
"""
output = []
for inp in inputs:
with ops.colocate_with(inp):
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (math_ops.reduce_sum(
math_ops.square(inp), 1, keep_dims=True) - 2 * math_ops.matmul(
inp, clusters, transpose_b=True) + array_ops.transpose(
math_ops.reduce_sum(
math_ops.square(clusters), 1, keep_dims=True)))
output.append(squared_distance)
return output
@classmethod
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
"""Computes cosine distance between each input and each cluster center.
Args:
inputs: list of input Tensor.
clusters: cluster Tensor
inputs_normalized: if True, it assumes that inp and clusters are
normalized and computes the dot product which is equivalent to the cosine
distance. Else it L2 normalizes the inputs first.
Returns:
list of Tensors, where each element corresponds to each element in inp.
The value is the distance of each row to all the cluster centers.
"""
output = []
if not inputs_normalized:
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp in inputs:
with ops.colocate_with(inp):
if not inputs_normalized:
inp = nn_impl.l2_normalize(inp, dim=1)
output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
return output
def _infer_graph(self, inputs, clusters):
"""Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
"""
assert isinstance(inputs, list)
# Pairwise distances are used only by transform(). In all other cases, this
# sub-graph is not evaluated.
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if (self._distance_metric == COSINE_DISTANCE and
not self._clusters_l2_normalized()):
# The cosine distance between normalized vectors x and y is the same as
# 2 * squared_euclidian_distance. We are using this fact and reusing the
# nearest_neighbors op.
# TODO(ands): Support COSINE distance in nearest_neighbors and remove
# this.
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp):
(indices,
distances) = gen_clustering_ops.nearest_neighbors(inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append(
(score, array_ops.squeeze(distances), array_ops.squeeze(indices)))
return zip(*output)
def _init_clusters_random(self):
"""Does random initialization of clusters.
Returns:
Tensor of randomly initialized clusters.
"""
num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in self._inputs])
# Note that for mini-batch k-means, we should ensure that the batch size of
# data used during initialization is sufficiently large to avoid duplicated
# clusters.
with ops.control_dependencies(
[check_ops.assert_less_equal(self._num_clusters, num_data)]):
indices = random_ops.random_uniform(
array_ops.reshape(self._num_clusters, [-1]),
minval=0,
maxval=math_ops.cast(num_data, dtypes.int64),
seed=self._random_seed,
dtype=dtypes.int64)
clusters_init = embedding_lookup(
self._inputs, indices, partition_strategy='div')
return clusters_init
def _clusters_l2_normalized(self):
"""Returns True if clusters centers are kept normalized."""
return (self._distance_metric == COSINE_DISTANCE and
(not self._use_mini_batch or
self._mini_batch_steps_per_iteration > 1))
def _initialize_clusters(self,
cluster_centers,
cluster_centers_initialized,
cluster_centers_updated):
"""Returns an op to initialize the cluster centers."""
init = self._initial_clusters
if init == RANDOM_INIT:
clusters_init = self._init_clusters_random()
elif init == KMEANS_PLUS_PLUS_INIT:
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
inp = self._inputs[0]
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
clusters_init = gen_clustering_ops.kmeans_plus_plus_initialization(
inp, self._num_clusters, self._random_seed,
self._kmeans_plus_plus_num_retries)
elif callable(init):
clusters_init = init(self._inputs, self._num_clusters)
elif not isinstance(init, str):
clusters_init = init
else:
assert False, 'Unsupported init passed to Kmeans %s' % str(init)
if self._distance_metric == COSINE_DISTANCE and clusters_init is not None:
clusters_init = nn_impl.l2_normalize(clusters_init, dim=1)
with ops.colocate_with(cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[clusters_init],
array_ops.identity(cluster_centers_initialized))
with ops.colocate_with(cluster_centers):
assign_centers = state_ops.assign(cluster_centers, clusters_init,
validate_shape=False)
if cluster_centers_updated != cluster_centers:
assign_centers = control_flow_ops.group(
assign_centers,
state_ops.assign(cluster_centers_updated, clusters_init,
validate_shape=False))
assign_centers = control_flow_ops.with_dependencies(
[assign_centers],
state_ops.assign(cluster_centers_initialized, True))
return control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: assign_centers).op
def _create_variables(self):
"""Creates variables.
Returns:
Tuple with following elements:
cluster_centers: a Tensor for storing cluster centers
cluster_centers_initialized: bool Variable indicating whether clusters
are initialized.
cluster_counts: a Tensor for storing counts of points assigned to this
cluster. This is used by mini-batch training.
cluster_centers_updated: Tensor representing copy of cluster centers that
are updated every step.
update_in_steps: numbers of steps left before we sync
cluster_centers_updated back to cluster_centers.
"""
init_value = array_ops.constant([], dtype=dtypes.float32)
cluster_centers = variable_scope.variable(init_value,
name='clusters',
validate_shape=False)
cluster_centers_initialized = variable_scope.variable(False,
dtype=dtypes.bool,
name='initialized')
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
# Copy of cluster centers actively updated each step according to
# mini-batch update rule.
cluster_centers_updated = variable_scope.variable(init_value,
name='clusters_updated',
validate_shape=False)
# How many steps till we copy the updated clusters to cluster_centers.
update_in_steps = variable_scope.variable(
self._mini_batch_steps_per_iteration,
dtype=dtypes.int64,
name='update_in_steps')
# Count of points assigned to cluster_centers_updated.
cluster_counts = variable_scope.variable(
array_ops.zeros([self._num_clusters],
dtype=dtypes.int64))
else:
cluster_centers_updated = cluster_centers
update_in_steps = None
cluster_counts = (variable_scope.variable(array_ops.ones(
[self._num_clusters],
dtype=dtypes.int64))
if self._use_mini_batch else None)
return (cluster_centers,
cluster_centers_initialized,
cluster_counts,
cluster_centers_updated,
update_in_steps)
@classmethod
def _l2_normalize_data(cls, inputs):
"""Normalized the input data."""
output = []
for inp in inputs:
with ops.colocate_with(inp):
output.append(nn_impl.l2_normalize(inp, dim=1))
return output
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
cluster_centers_initialized: scalar indicating whether clusters have been
initialized.
init_op: an op to initialize the clusters.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
inputs = self._inputs
(cluster_centers_var,
cluster_centers_initialized,
total_counts,
cluster_centers_updated,
update_in_steps) = self._create_variables()
init_op = self._initialize_clusters(cluster_centers_var,
cluster_centers_initialized,
cluster_centers_updated)
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
sync_updates_op = self._mini_batch_sync_updates_op(
update_in_steps,
cluster_centers_var, cluster_centers_updated,
total_counts)
assert sync_updates_op is not None
with ops.control_dependencies([sync_updates_op]):
training_op = self._mini_batch_training_op(
inputs, cluster_idx, cluster_centers_updated, total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(inputs, cluster_idx,
cluster_centers_var)
return (all_scores, cluster_idx, scores,
cluster_centers_initialized, init_op, training_op)
def _mini_batch_sync_updates_op(self, update_in_steps,
cluster_centers_var, cluster_centers_updated,
total_counts):
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
assert update_in_steps is not None
with ops.colocate_with(update_in_steps):
def _f():
# Note that there is a race condition here, so we do a best effort
# updates here. We reset update_in_steps first so that other workers
# don't duplicate the updates. Also we update cluster_center_vars
# before resetting total_counts to avoid large updates to
# cluster_centers_updated based on partially updated
# cluster_center_vars.
with ops.control_dependencies([state_ops.assign(
update_in_steps,
self._mini_batch_steps_per_iteration - 1)]):
with ops.colocate_with(cluster_centers_updated):
if self._distance_metric == COSINE_DISTANCE:
cluster_centers = nn_impl.l2_normalize(cluster_centers_updated,
dim=1)
else:
cluster_centers = cluster_centers_updated
with ops.colocate_with(cluster_centers_var):
with ops.control_dependencies([state_ops.assign(
cluster_centers_var,
cluster_centers)]):
with ops.colocate_with(cluster_centers_var):
with ops.control_dependencies([
state_ops.assign(total_counts,
array_ops.zeros_like(total_counts))]):
return array_ops.identity(update_in_steps)
return control_flow_ops.cond(
update_in_steps <= 0,
_f,
lambda: state_ops.assign_sub(update_in_steps, 1))
else:
return control_flow_ops.no_op()
def _mini_batch_training_op(self, inputs, cluster_idx_list,
cluster_centers, total_counts):
"""Creates an op for training for mini batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
total_counts: Tensor Ref of cluster counts.
Returns:
An op for doing an update of mini-batch k-means.
"""
update_ops = []
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp):
assert total_counts is not None
cluster_idx = array_ops.reshape(cluster_idx, [-1])
# Dedupe the unique ids of cluster_centers being updated so that updates
# can be locally aggregated.
unique_ids, unique_idx = array_ops.unique(cluster_idx)
num_unique_cluster_idx = array_ops.size(unique_ids)
# Fetch the old values of counts and cluster_centers.
with ops.colocate_with(total_counts):
old_counts = array_ops.gather(total_counts, unique_ids)
# TODO(agarwal): This colocation seems to run into problems. Fix it.
# with ops.colocate_with(cluster_centers):
old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)
# Locally aggregate the increment to counts.
count_updates = math_ops.unsorted_segment_sum(
array_ops.ones_like(
unique_idx, dtype=total_counts.dtype),
unique_idx,
num_unique_cluster_idx)
# Locally compute the sum of inputs mapped to each id.
# For a cluster with old cluster value x, old count n, and with data
# d_1,...d_k newly assigned to it, we recompute the new value as
# x += (sum_i(d_i) - k * x) / (n + k).
# Compute sum_i(d_i), see comment above.
cluster_center_updates = math_ops.unsorted_segment_sum(
inp, unique_idx, num_unique_cluster_idx)
# Shape to enable broadcasting count_updates and learning_rate to inp.
# It extends the shape with 1's to match the rank of inp.
broadcast_shape = array_ops.concat(
[
array_ops.reshape(num_unique_cluster_idx, [1]), array_ops.ones(
array_ops.reshape(array_ops.rank(inp) - 1, [1]),
dtype=dtypes.int32)
],
0)
# Subtract k * x, see comment above.
cluster_center_updates -= math_ops.cast(
array_ops.reshape(count_updates, broadcast_shape),
inp.dtype) * old_cluster_centers
learning_rate = math_ops.reciprocal(
math_ops.cast(old_counts + count_updates, inp.dtype))
learning_rate = array_ops.reshape(learning_rate, broadcast_shape)
# scale by 1 / (n + k), see comment above.
cluster_center_updates *= learning_rate
# Apply the updates.
update_counts = state_ops.scatter_add(
total_counts,
unique_ids,
count_updates)
update_cluster_centers = state_ops.scatter_add(
cluster_centers,
unique_ids,
cluster_center_updates)
update_ops.extend([update_counts, update_cluster_centers])
return control_flow_ops.group(*update_ops)
def _full_batch_training_op(self, inputs, cluster_idx_list, cluster_centers):
"""Creates an op for training for full batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
Returns:
An op for doing an update of mini-batch k-means.
"""
cluster_sums = []
cluster_counts = []
epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp):
cluster_sums.append(
math_ops.unsorted_segment_sum(inp, cluster_idx, self._num_clusters))
cluster_counts.append(
math_ops.unsorted_segment_sum(
array_ops.reshape(
array_ops.ones(
array_ops.reshape(array_ops.shape(inp)[0], [-1])),
[-1, 1]), cluster_idx, self._num_clusters))
with ops.colocate_with(cluster_centers):
new_clusters_centers = math_ops.add_n(cluster_sums) / (math_ops.cast(
math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)
if self._clusters_l2_normalized():
new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
return state_ops.assign(cluster_centers, new_clusters_centers)
|
apache-2.0
| 5,295,127,131,408,792,000 | 5,271,352,989,133,544,000 | 43.989455 | 80 | 0.648932 | false |
xzturn/tensorflow
|
tensorflow/python/data/kernel_tests/optional_test.py
|
4
|
19765
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Optional`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def _optional_spec_test_combinations():
# pylint: disable=g-long-lambda
cases = [
("Dense", lambda: constant_op.constant(37.0),
tensor_spec.TensorSpec([], dtypes.float32)),
("Sparse", lambda: sparse_tensor.SparseTensor(
indices=[[0, 1]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[10, 10]),
sparse_tensor.SparseTensorSpec([10, 10], dtypes.int32)),
("Nest", lambda: {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
}, {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (
tensor_spec.TensorSpec([1], dtypes.string),
tensor_spec.TensorSpec([], dtypes.string),
)
}),
("Optional", lambda: optional_ops.Optional.from_value(37.0),
optional_ops.OptionalSpec(tensor_spec.TensorSpec([], dtypes.float32))),
]
def reduce_fn(x, y):
name, value_fn, expected_structure = y
return x + combinations.combine(
tf_value_fn=combinations.NamedObject(name, value_fn),
expected_value_structure=expected_structure)
return functools.reduce(reduce_fn, cases, [])
def _get_next_as_optional_test_combinations():
# pylint: disable=g-long-lambda
cases = [
("Dense", np.array([1, 2, 3], dtype=np.int32),
lambda: constant_op.constant([4, 5, 6], dtype=dtypes.int32), True),
("Sparse",
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]],
values=np.array([-1., 1.], dtype=np.float32),
dense_shape=[2, 2]),
lambda: sparse_tensor.SparseTensor(
indices=[[0, 1], [1, 0]], values=[37.0, 42.0], dense_shape=[2, 2]),
False),
("Nest", {
"a":
np.array([1, 2, 3], dtype=np.int32),
"b":
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]],
values=np.array([-1., 1.], dtype=np.float32),
dense_shape=[2, 2])
}, lambda: {
"a":
constant_op.constant([4, 5, 6], dtype=dtypes.int32),
"b":
sparse_tensor.SparseTensor(
indices=[[0, 1], [1, 0]],
values=[37.0, 42.0],
dense_shape=[2, 2])
}, False),
]
def reduce_fn(x, y):
name, value, value_fn, gpu_compatible = y
return x + combinations.combine(
np_value=value, tf_value_fn=combinations.NamedObject(name, value_fn),
gpu_compatible=gpu_compatible)
return functools.reduce(reduce_fn, cases, [])
class OptionalTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testFromValue(self):
opt = optional_ops.Optional.from_value(constant_op.constant(37.0))
self.assertTrue(self.evaluate(opt.has_value()))
self.assertEqual(37.0, self.evaluate(opt.get_value()))
@combinations.generate(test_base.default_test_combinations())
def testFromStructuredValue(self):
opt = optional_ops.Optional.from_value({
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
})
self.assertTrue(self.evaluate(opt.has_value()))
self.assertEqual({
"a": 37.0,
"b": ([b"Foo"], b"Bar")
}, self.evaluate(opt.get_value()))
@combinations.generate(test_base.default_test_combinations())
def testFromSparseTensor(self):
st_0 = sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0], dtype=np.int64),
dense_shape=np.array([1]))
st_1 = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1., 1.], dtype=np.float32),
dense_shape=np.array([2, 2]))
opt = optional_ops.Optional.from_value((st_0, st_1))
self.assertTrue(self.evaluate(opt.has_value()))
val_0, val_1 = opt.get_value()
for expected, actual in [(st_0, val_0), (st_1, val_1)]:
self.assertAllEqual(expected.indices, self.evaluate(actual.indices))
self.assertAllEqual(expected.values, self.evaluate(actual.values))
self.assertAllEqual(expected.dense_shape,
self.evaluate(actual.dense_shape))
@combinations.generate(test_base.default_test_combinations())
def testFromNone(self):
value_structure = tensor_spec.TensorSpec([], dtypes.float32)
opt = optional_ops.Optional.none_from_structure(value_structure)
self.assertTrue(opt.value_structure.is_compatible_with(value_structure))
self.assertFalse(
opt.value_structure.is_compatible_with(
tensor_spec.TensorSpec([1], dtypes.float32)))
self.assertFalse(
opt.value_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int32)))
self.assertFalse(self.evaluate(opt.has_value()))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(opt.get_value())
@combinations.generate(test_base.default_test_combinations())
def testAddN(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
# With value
opt1 = optional_ops.Optional.from_value((1.0, 2.0))
opt2 = optional_ops.Optional.from_value((3.0, 4.0))
add_tensor = math_ops.add_n([opt1._variant_tensor,
opt2._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor, opt1.value_structure)
self.assertAllEqual(self.evaluate(add_opt.get_value()), (4.0, 6.0))
# Without value
opt_none1 = optional_ops.Optional.none_from_structure(
opt1.value_structure)
opt_none2 = optional_ops.Optional.none_from_structure(
opt2.value_structure)
add_tensor = math_ops.add_n([opt_none1._variant_tensor,
opt_none2._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor,
opt_none1.value_structure)
self.assertFalse(self.evaluate(add_opt.has_value()))
@combinations.generate(test_base.default_test_combinations())
def testNestedAddN(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
opt1 = optional_ops.Optional.from_value([1, 2.0])
opt2 = optional_ops.Optional.from_value([3, 4.0])
opt3 = optional_ops.Optional.from_value((5.0, opt1._variant_tensor))
opt4 = optional_ops.Optional.from_value((6.0, opt2._variant_tensor))
add_tensor = math_ops.add_n([opt3._variant_tensor,
opt4._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor, opt3.value_structure)
self.assertEqual(self.evaluate(add_opt.get_value()[0]), 11.0)
inner_add_opt = optional_ops._OptionalImpl(add_opt.get_value()[1],
opt1.value_structure)
self.assertAllEqual(inner_add_opt.get_value(), [4, 6.0])
@combinations.generate(test_base.default_test_combinations())
def testZerosLike(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
# With value
opt = optional_ops.Optional.from_value((1.0, 2.0))
zeros_tensor = array_ops.zeros_like(opt._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt.value_structure)
self.assertAllEqual(self.evaluate(zeros_opt.get_value()),
(0.0, 0.0))
# Without value
opt_none = optional_ops.Optional.none_from_structure(
opt.value_structure)
zeros_tensor = array_ops.zeros_like(opt_none._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt_none.value_structure)
self.assertFalse(self.evaluate(zeros_opt.has_value()))
@combinations.generate(test_base.default_test_combinations())
def testNestedZerosLike(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
opt1 = optional_ops.Optional.from_value(1.0)
opt2 = optional_ops.Optional.from_value(opt1._variant_tensor)
zeros_tensor = array_ops.zeros_like(opt2._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt2.value_structure)
inner_zeros_opt = optional_ops._OptionalImpl(zeros_opt.get_value(),
opt1.value_structure)
self.assertEqual(self.evaluate(inner_zeros_opt.get_value()), 0.0)
@combinations.generate(test_base.default_test_combinations())
def testCopyToGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/cpu:0"):
optional_with_value = optional_ops.Optional.from_value(
(constant_op.constant(37.0), constant_op.constant("Foo"),
constant_op.constant(42)))
optional_none = optional_ops.Optional.none_from_structure(
tensor_spec.TensorSpec([], dtypes.float32))
with ops.device("/gpu:0"):
gpu_optional_with_value = optional_ops._OptionalImpl(
array_ops.identity(optional_with_value._variant_tensor),
optional_with_value.value_structure)
gpu_optional_none = optional_ops._OptionalImpl(
array_ops.identity(optional_none._variant_tensor),
optional_none.value_structure)
gpu_optional_with_value_has_value = gpu_optional_with_value.has_value()
gpu_optional_with_value_values = gpu_optional_with_value.get_value()
gpu_optional_none_has_value = gpu_optional_none.has_value()
self.assertTrue(self.evaluate(gpu_optional_with_value_has_value))
self.assertEqual((37.0, b"Foo", 42),
self.evaluate(gpu_optional_with_value_values))
self.assertFalse(self.evaluate(gpu_optional_none_has_value))
@combinations.generate(test_base.default_test_combinations())
def testNestedCopyToGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/cpu:0"):
optional_with_value = optional_ops.Optional.from_value(
(constant_op.constant(37.0), constant_op.constant("Foo"),
constant_op.constant(42)))
optional_none = optional_ops.Optional.none_from_structure(
tensor_spec.TensorSpec([], dtypes.float32))
nested_optional = optional_ops.Optional.from_value(
(optional_with_value._variant_tensor, optional_none._variant_tensor,
1.0))
with ops.device("/gpu:0"):
gpu_nested_optional = optional_ops._OptionalImpl(
array_ops.identity(nested_optional._variant_tensor),
nested_optional.value_structure)
gpu_nested_optional_has_value = gpu_nested_optional.has_value()
gpu_nested_optional_values = gpu_nested_optional.get_value()
self.assertTrue(self.evaluate(gpu_nested_optional_has_value))
inner_with_value = optional_ops._OptionalImpl(
gpu_nested_optional_values[0], optional_with_value.value_structure)
inner_none = optional_ops._OptionalImpl(
gpu_nested_optional_values[1], optional_none.value_structure)
self.assertEqual((37.0, b"Foo", 42),
self.evaluate(inner_with_value.get_value()))
self.assertFalse(self.evaluate(inner_none.has_value()))
self.assertEqual(1.0, self.evaluate(gpu_nested_optional_values[2]))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
_optional_spec_test_combinations()))
def testOptionalSpec(self, tf_value_fn, expected_value_structure):
tf_value = tf_value_fn()
opt = optional_ops.Optional.from_value(tf_value)
self.assertTrue(
structure.are_compatible(opt.value_structure, expected_value_structure))
opt_structure = structure.type_spec_from_value(opt)
self.assertIsInstance(opt_structure, optional_ops.OptionalSpec)
self.assertTrue(structure.are_compatible(opt_structure, opt_structure))
self.assertTrue(
structure.are_compatible(opt_structure._value_structure,
expected_value_structure))
self.assertEqual([dtypes.variant],
structure.get_flat_tensor_types(opt_structure))
self.assertEqual([tensor_shape.TensorShape([])],
structure.get_flat_tensor_shapes(opt_structure))
# All OptionalSpec objects are not compatible with a non-optional
# value.
non_optional_structure = structure.type_spec_from_value(
constant_op.constant(42.0))
self.assertFalse(opt_structure.is_compatible_with(non_optional_structure))
# Assert that the optional survives a round-trip via _from_tensor_list()
# and _to_tensor_list().
round_trip_opt = opt_structure._from_tensor_list(
opt_structure._to_tensor_list(opt))
if isinstance(tf_value, optional_ops.Optional):
self.assertValuesEqual(
self.evaluate(tf_value.get_value()),
self.evaluate(round_trip_opt.get_value().get_value()))
else:
self.assertValuesEqual(
self.evaluate(tf_value),
self.evaluate(round_trip_opt.get_value()))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
_get_next_as_optional_test_combinations()))
def testIteratorGetNextAsOptional(self, np_value, tf_value_fn,
gpu_compatible):
if not gpu_compatible and test.is_gpu_available():
self.skipTest("Test case not yet supported on GPU.")
ds = dataset_ops.Dataset.from_tensors(np_value).repeat(3)
if context.executing_eagerly():
iterator = dataset_ops.make_one_shot_iterator(ds)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
for _ in range(3):
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertIsInstance(next_elem, optional_ops.Optional)
self.assertTrue(structure.are_compatible(
next_elem.value_structure,
structure.type_spec_from_value(tf_value_fn())))
self.assertTrue(next_elem.has_value())
self.assertValuesEqual(np_value, next_elem.get_value())
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertFalse(self.evaluate(next_elem.has_value()))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_elem.get_value())
else:
iterator = dataset_ops.make_initializable_iterator(ds)
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertIsInstance(next_elem, optional_ops.Optional)
self.assertTrue(structure.are_compatible(
next_elem.value_structure,
structure.type_spec_from_value(tf_value_fn())))
# Before initializing the iterator, evaluating the optional fails with
# a FailedPreconditionError. This is only relevant in graph mode.
elem_has_value_t = next_elem.has_value()
elem_value_t = next_elem.get_value()
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_has_value_t)
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_value_t)
# Now we initialize the iterator.
self.evaluate(iterator.initializer)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
for _ in range(3):
elem_has_value, elem_value = self.evaluate(
[elem_has_value_t, elem_value_t])
self.assertTrue(elem_has_value)
self.assertValuesEqual(np_value, elem_value)
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
self.assertFalse(self.evaluate(elem_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_value_t)
@combinations.generate(test_base.default_test_combinations())
def testFunctionBoundaries(self):
@def_function.function
def get_optional():
x = constant_op.constant(1.0)
opt = optional_ops.Optional.from_value(x)
# TODO(skyewm): support returning Optionals from functions?
return opt._variant_tensor
# TODO(skyewm): support Optional arguments?
@def_function.function
def consume_optional(opt_tensor):
value_structure = tensor_spec.TensorSpec([], dtypes.float32)
opt = optional_ops._OptionalImpl(opt_tensor, value_structure)
return opt.get_value()
opt_tensor = get_optional()
val = consume_optional(opt_tensor)
self.assertEqual(self.evaluate(val), 1.0)
@combinations.generate(test_base.default_test_combinations())
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(opt):
trace_count[0] += 1
return opt.get_value()
opt1 = optional_ops.Optional.from_value(constant_op.constant(37.0))
opt2 = optional_ops.Optional.from_value(constant_op.constant(42.0))
for _ in range(10):
self.assertEqual(self.evaluate(f(opt1)), 37.0)
self.assertEqual(self.evaluate(f(opt2)), 42.0)
self.assertEqual(trace_count[0], 1)
if __name__ == "__main__":
test.main()
|
apache-2.0
| 2,276,702,477,589,067,800 | 6,431,455,972,861,345,000 | 41.053191 | 80 | 0.64766 | false |
40123248/w16b_test
|
static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/constants.py
|
603
|
15297
|
#!/usr/bin/env python
'''Constants defined by SDL, and needed in pygame.
Note that many of the flags for SDL are not needed in pygame, and are not
included here. These constants are generally accessed from the
`pygame.locals` module. This module is automatically placed in the pygame
namespace, but you will usually want to place them directly into your module's
namespace with the following command::
from pygame.locals import *
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
#import SDL.constants
# SDL constants taken from https://wiki.libsdl.org/SDLKeycodeLookup
'''
YV12_OVERLAY = SDL.constants.SDL_YV12_OVERLAY
IYUV_OVERLAY = SDL.constants.SDL_IYUV_OVERLAY
YUY2_OVERLAY = SDL.constants.SDL_YUY2_OVERLAY
UYVY_OVERLAY = SDL.constants.SDL_UYVY_OVERLAY
YVYU_OVERLAY = SDL.constants.SDL_YVYU_OVERLAY
SWSURFACE = SDL.constants.SDL_SWSURFACE
HWSURFACE = SDL.constants.SDL_HWSURFACE
RESIZABLE = SDL.constants.SDL_RESIZABLE
ASYNCBLIT = SDL.constants.SDL_ASYNCBLIT
OPENGL = SDL.constants.SDL_OPENGL
OPENGLBLIT = SDL.constants.SDL_OPENGLBLIT
ANYFORMAT = SDL.constants.SDL_ANYFORMAT
HWPALETTE = SDL.constants.SDL_HWPALETTE
DOUBLEBUF = SDL.constants.SDL_DOUBLEBUF
#FULLSCREEN = SDL.constants.SDL_FULLSCREEN
'''
FULLSCREEN = 0
'''
HWACCEL = SDL.constants.SDL_HWACCEL
SRCCOLORKEY = SDL.constants.SDL_SRCCOLORKEY
'''
RLEACCELOK = 254
RLEACCEL = 255
'''
SRCALPHA = SDL.constants.SDL_SRCALPHA
PREALLOC = SDL.constants.SDL_PREALLOC
NOFRAME = SDL.constants.SDL_NOFRAME
GL_RED_SIZE = SDL.constants.SDL_GL_RED_SIZE
GL_GREEN_SIZE = SDL.constants.SDL_GL_GREEN_SIZE
GL_BLUE_SIZE = SDL.constants.SDL_GL_BLUE_SIZE
GL_ALPHA_SIZE = SDL.constants.SDL_GL_ALPHA_SIZE
GL_BUFFER_SIZE = SDL.constants.SDL_GL_BUFFER_SIZE
GL_DOUBLEBUFFER = SDL.constants.SDL_GL_DOUBLEBUFFER
GL_DEPTH_SIZE = SDL.constants.SDL_GL_DEPTH_SIZE
GL_STENCIL_SIZE = SDL.constants.SDL_GL_STENCIL_SIZE
GL_ACCUM_RED_SIZE = SDL.constants.SDL_GL_ACCUM_RED_SIZE
GL_ACCUM_GREEN_SIZE = SDL.constants.SDL_GL_ACCUM_GREEN_SIZE
GL_ACCUM_BLUE_SIZE = SDL.constants.SDL_GL_ACCUM_BLUE_SIZE
GL_ACCUM_ALPHA_SIZE = SDL.constants.SDL_GL_ACCUM_ALPHA_SIZE
GL_STEREO = SDL.constants.SDL_GL_STEREO
GL_MULTISAMPLEBUFFERS = SDL.constants.SDL_GL_MULTISAMPLEBUFFERS
GL_MULTISAMPLESAMPLES = SDL.constants.SDL_GL_MULTISAMPLESAMPLES
TIMER_RESOLUTION = SDL.constants.TIMER_RESOLUTION
AUDIO_U8 = SDL.constants.AUDIO_U8
AUDIO_S8 = SDL.constants.AUDIO_S8
AUDIO_U16LSB = SDL.constants.AUDIO_U16LSB
AUDIO_S16LSB = SDL.constants.AUDIO_S16LSB
AUDIO_U16MSB = SDL.constants.AUDIO_U16MSB
AUDIO_S16MSB = SDL.constants.AUDIO_S16MSB
AUDIO_U16 = SDL.constants.AUDIO_U16
AUDIO_S16 = SDL.constants.AUDIO_S16
AUDIO_U16SYS = SDL.constants.AUDIO_U16SYS
AUDIO_S16SYS = SDL.constants.AUDIO_S16SYS
'''
def _t(a, b, c, d):
return (ord(a) << 24) | (ord(b) << 16) | (ord(c) << 8) | ord(d)
SCRAP_TEXT = _t('T', 'E', 'X', 'T')
SCRAP_BMP = _t('B', 'M', 'P', ' ')
BLEND_ADD = 0x01
BLEND_SUB = 0x02
BLEND_MULT = 0x03
BLEND_MIN = 0x04
BLEND_MAX = 0x05
"""
NOEVENT = SDL.constants.SDL_NOEVENT
ACTIVEEVENT = SDL.constants.SDL_ACTIVEEVENT
KEYDOWN = SDL.constants.SDL_KEYDOWN
KEYUP = SDL.constants.SDL_KEYUP
MOUSEMOTION = SDL.constants.SDL_MOUSEMOTION
MOUSEBUTTONDOWN = SDL.constants.SDL_MOUSEBUTTONDOWN
MOUSEBUTTONUP = SDL.constants.SDL_MOUSEBUTTONUP
JOYAXISMOTION = SDL.constants.SDL_JOYAXISMOTION
JOYBALLMOTION = SDL.constants.SDL_JOYBALLMOTION
JOYHATMOTION = SDL.constants.SDL_JOYHATMOTION
JOYBUTTONDOWN = SDL.constants.SDL_JOYBUTTONDOWN
JOYBUTTONUP = SDL.constants.SDL_JOYBUTTONUP
VIDEORESIZE = SDL.constants.SDL_VIDEORESIZE
VIDEOEXPOSE = SDL.constants.SDL_VIDEOEXPOSE
QUIT = SDL.constants.SDL_QUIT
SYSWMEVENT = SDL.constants.SDL_SYSWMEVENT
USEREVENT = SDL.constants.SDL_USEREVENT
NUMEVENTS = SDL.constants.SDL_NUMEVENTS
HAT_CENTERED = SDL.constants.SDL_HAT_CENTERED
HAT_UP = SDL.constants.SDL_HAT_UP
HAT_RIGHTUP = SDL.constants.SDL_HAT_RIGHTUP
HAT_RIGHT = SDL.constants.SDL_HAT_RIGHT
HAT_RIGHTDOWN = SDL.constants.SDL_HAT_RIGHTDOWN
HAT_DOWN = SDL.constants.SDL_HAT_DOWN
HAT_LEFTDOWN = SDL.constants.SDL_HAT_LEFTDOWN
HAT_LEFT = SDL.constants.SDL_HAT_LEFT
HAT_LEFTUP = SDL.constants.SDL_HAT_LEFTUP
"""
#BEGIN GENERATED CONSTANTS; see support/make_pygame_keyconstants.py
K_0 = 48
K_1 = 49
K_2 = 50
K_3 = 51
K_4 = 52
K_5 = 53
K_6 = 54
K_7 = 55
K_8 = 56
K_9 = 57
K_AMPERSAND = 38
K_ASTERISK = 42
K_AT = 64
K_BACKQUOTE = 96
K_BACKSLASH = 92
K_BACKSPACE = 8
#K_BREAK = SDL.constants.SDLK_BREAK
K_CAPSLOCK = 1073741881
K_CARET = 94
K_CLEAR = 1073742040
K_COLON = 58
K_COMMA = 44
#K_COMPOSE = SDL.constants.SDLK_COMPOSE
K_DELETE = 127
K_DOLLAR = 36
K_DOWN = 1073741905
K_END = 1073741901
K_EQUALS = 1073741927
K_ESCAPE = 27
#K_EURO = SDL.constants.SDLK_EURO
K_EXCLAIM = 33
K_F1 = 1073741882
K_F10 = 1073741891
K_F11 = 1073741892
K_F12 = 1073741893
K_F13 = 1073741928
K_F14 = 1073741929
K_F15 = 1073741930
K_F2 = 1073741883
K_F3 = 1073741884
K_F4 = 1073741885
K_F5 = 1073741886
K_F6 = 1073741887
K_F7 = 1073741888
K_F8 = 1073741889
K_F9 = 1073741890
#K_FIRST = SDL.constants.SDLK_FIRST
K_GREATER = 1073742022
K_HASH = 1073742028
K_HELP = 1073741941
K_HOME = 1073741898
K_INSERT = 1073741897
K_KP0 = 1073741922
K_KP1 = 1073741913
K_KP2 = 1073741914
K_KP3 = 1073741915
K_KP4 = 1073741916
K_KP5 = 1073741917
K_KP6 = 1073741918
K_KP7 = 1073741919
K_KP8 = 1073741920
K_KP9 = 1073741921
K_KP_DIVIDE = 1073741908
K_KP_ENTER = 1073741912
K_KP_EQUALS = 1073741927
K_KP_MINUS = 1073741910
K_KP_MULTIPLY = 1073741909
K_KP_PERIOD = 1073741923
K_KP_PLUS = 1073741911
K_LALT = 1073742050
#K_LAST = SDL.constants.SDLK_LAST
K_LCTRL = 1073742048
K_LEFT = 1073741904
#K_LEFTBRACKET = SDL.constants.SDLK_LEFTBRACKET
K_LEFTPAREN = 1073742006
#K_LESS = SDL.constants.SDLK_LESS
#K_LMETA = SDL.constants.SDLK_LMETA
K_LSHIFT = 1073742049
#K_LSUPER = SDL.constants.SDLK_LSUPER
K_MENU = 1073741942
K_MINUS = 45
K_MODE = 1073742081
#K_NUMLOCK = SDL.constants.SDLK_NUMLOCK
K_PAGEDOWN = 1073741902
K_PAGEUP = 1073741899
K_PAUSE = 1073741896
#K_PERIOD = SDL.constants.SDLK_PERIOD
K_PLUS = 43
#K_POWER = SDL.constants.SDLK_POWER
#K_PRINT = SDL.constants.SDLK_PRINT
K_QUESTION = 63
K_QUOTE = 39
K_QUOTEDBL = 34
K_RALT = 1073742054
K_RCTRL = 1073742052
K_RETURN = 13
K_RIGHT = 1073741903
#K_RIGHTBRACKET = SDL.constants.SDLK_RIGHTBRACKET
K_RIGHTPAREN = 41
#K_RMETA = SDL.constants.SDLK_RMETA
K_RSHIFT = 1073742053
#K_RSUPER = SDL.constants.SDLK_RSUPER
K_SCROLLOCK = 1073741895
K_SEMICOLON = 59
K_SLASH = 47
K_SPACE = 1073742029
K_SYSREQ = 1073741978
K_TAB = 9
K_UNDERSCORE = 95
K_UNDO = 1073741946
K_UNKNOWN = 0
K_UP = 1073741906
"""
K_WORLD_0 = SDL.constants.SDLK_WORLD_0
K_WORLD_1 = SDL.constants.SDLK_WORLD_1
K_WORLD_10 = SDL.constants.SDLK_WORLD_10
K_WORLD_11 = SDL.constants.SDLK_WORLD_11
K_WORLD_12 = SDL.constants.SDLK_WORLD_12
K_WORLD_13 = SDL.constants.SDLK_WORLD_13
K_WORLD_14 = SDL.constants.SDLK_WORLD_14
K_WORLD_15 = SDL.constants.SDLK_WORLD_15
K_WORLD_16 = SDL.constants.SDLK_WORLD_16
K_WORLD_17 = SDL.constants.SDLK_WORLD_17
K_WORLD_18 = SDL.constants.SDLK_WORLD_18
K_WORLD_19 = SDL.constants.SDLK_WORLD_19
K_WORLD_2 = SDL.constants.SDLK_WORLD_2
K_WORLD_20 = SDL.constants.SDLK_WORLD_20
K_WORLD_21 = SDL.constants.SDLK_WORLD_21
K_WORLD_22 = SDL.constants.SDLK_WORLD_22
K_WORLD_23 = SDL.constants.SDLK_WORLD_23
K_WORLD_24 = SDL.constants.SDLK_WORLD_24
K_WORLD_25 = SDL.constants.SDLK_WORLD_25
K_WORLD_26 = SDL.constants.SDLK_WORLD_26
K_WORLD_27 = SDL.constants.SDLK_WORLD_27
K_WORLD_28 = SDL.constants.SDLK_WORLD_28
K_WORLD_29 = SDL.constants.SDLK_WORLD_29
K_WORLD_3 = SDL.constants.SDLK_WORLD_3
K_WORLD_30 = SDL.constants.SDLK_WORLD_30
K_WORLD_31 = SDL.constants.SDLK_WORLD_31
K_WORLD_32 = SDL.constants.SDLK_WORLD_32
K_WORLD_33 = SDL.constants.SDLK_WORLD_33
K_WORLD_34 = SDL.constants.SDLK_WORLD_34
K_WORLD_35 = SDL.constants.SDLK_WORLD_35
K_WORLD_36 = SDL.constants.SDLK_WORLD_36
K_WORLD_37 = SDL.constants.SDLK_WORLD_37
K_WORLD_38 = SDL.constants.SDLK_WORLD_38
K_WORLD_39 = SDL.constants.SDLK_WORLD_39
K_WORLD_4 = SDL.constants.SDLK_WORLD_4
K_WORLD_40 = SDL.constants.SDLK_WORLD_40
K_WORLD_41 = SDL.constants.SDLK_WORLD_41
K_WORLD_42 = SDL.constants.SDLK_WORLD_42
K_WORLD_43 = SDL.constants.SDLK_WORLD_43
K_WORLD_44 = SDL.constants.SDLK_WORLD_44
K_WORLD_45 = SDL.constants.SDLK_WORLD_45
K_WORLD_46 = SDL.constants.SDLK_WORLD_46
K_WORLD_47 = SDL.constants.SDLK_WORLD_47
K_WORLD_48 = SDL.constants.SDLK_WORLD_48
K_WORLD_49 = SDL.constants.SDLK_WORLD_49
K_WORLD_5 = SDL.constants.SDLK_WORLD_5
K_WORLD_50 = SDL.constants.SDLK_WORLD_50
K_WORLD_51 = SDL.constants.SDLK_WORLD_51
K_WORLD_52 = SDL.constants.SDLK_WORLD_52
K_WORLD_53 = SDL.constants.SDLK_WORLD_53
K_WORLD_54 = SDL.constants.SDLK_WORLD_54
K_WORLD_55 = SDL.constants.SDLK_WORLD_55
K_WORLD_56 = SDL.constants.SDLK_WORLD_56
K_WORLD_57 = SDL.constants.SDLK_WORLD_57
K_WORLD_58 = SDL.constants.SDLK_WORLD_58
K_WORLD_59 = SDL.constants.SDLK_WORLD_59
K_WORLD_6 = SDL.constants.SDLK_WORLD_6
K_WORLD_60 = SDL.constants.SDLK_WORLD_60
K_WORLD_61 = SDL.constants.SDLK_WORLD_61
K_WORLD_62 = SDL.constants.SDLK_WORLD_62
K_WORLD_63 = SDL.constants.SDLK_WORLD_63
K_WORLD_64 = SDL.constants.SDLK_WORLD_64
K_WORLD_65 = SDL.constants.SDLK_WORLD_65
K_WORLD_66 = SDL.constants.SDLK_WORLD_66
K_WORLD_67 = SDL.constants.SDLK_WORLD_67
K_WORLD_68 = SDL.constants.SDLK_WORLD_68
K_WORLD_69 = SDL.constants.SDLK_WORLD_69
K_WORLD_7 = SDL.constants.SDLK_WORLD_7
K_WORLD_70 = SDL.constants.SDLK_WORLD_70
K_WORLD_71 = SDL.constants.SDLK_WORLD_71
K_WORLD_72 = SDL.constants.SDLK_WORLD_72
K_WORLD_73 = SDL.constants.SDLK_WORLD_73
K_WORLD_74 = SDL.constants.SDLK_WORLD_74
K_WORLD_75 = SDL.constants.SDLK_WORLD_75
K_WORLD_76 = SDL.constants.SDLK_WORLD_76
K_WORLD_77 = SDL.constants.SDLK_WORLD_77
K_WORLD_78 = SDL.constants.SDLK_WORLD_78
K_WORLD_79 = SDL.constants.SDLK_WORLD_79
K_WORLD_8 = SDL.constants.SDLK_WORLD_8
K_WORLD_80 = SDL.constants.SDLK_WORLD_80
K_WORLD_81 = SDL.constants.SDLK_WORLD_81
K_WORLD_82 = SDL.constants.SDLK_WORLD_82
K_WORLD_83 = SDL.constants.SDLK_WORLD_83
K_WORLD_84 = SDL.constants.SDLK_WORLD_84
K_WORLD_85 = SDL.constants.SDLK_WORLD_85
K_WORLD_86 = SDL.constants.SDLK_WORLD_86
K_WORLD_87 = SDL.constants.SDLK_WORLD_87
K_WORLD_88 = SDL.constants.SDLK_WORLD_88
K_WORLD_89 = SDL.constants.SDLK_WORLD_89
K_WORLD_9 = SDL.constants.SDLK_WORLD_9
K_WORLD_90 = SDL.constants.SDLK_WORLD_90
K_WORLD_91 = SDL.constants.SDLK_WORLD_91
K_WORLD_92 = SDL.constants.SDLK_WORLD_92
K_WORLD_93 = SDL.constants.SDLK_WORLD_93
K_WORLD_94 = SDL.constants.SDLK_WORLD_94
K_WORLD_95 = SDL.constants.SDLK_WORLD_95
"""
K_a = 97
K_b = 98
K_c = 99
K_d = 100
K_e = 101
K_f = 102
K_g = 103
K_h = 104
K_i = 105
K_j = 106
K_k = 107
K_l = 108
K_m = 109
K_n = 110
K_o = 111
K_p = 112
K_q = 113
K_r = 114
K_s = 115
K_t = 116
K_u = 117
K_v = 118
K_w = 119
K_x = 120
K_y = 121
K_z = 122
#END GENERATED CONSTANTS
|
gpl-3.0
| -4,356,364,992,519,305,000 | -4,906,076,507,548,214,000 | 40.909589 | 78 | 0.525463 | false |
alfonsokim/nupic
|
examples/opf/experiments/params/EnsembleOnline.py
|
10
|
15348
|
import random
import multiprocessing
import numpy as np
from nupic.frameworks.opf import helpers
from nupic.frameworks.opf.client import Client
from random import shuffle
from random import randrange, uniform
import copy
windowSize=36
r=30
predictedField='pounds'
inertia=0.25
socRate=1.0
class Worker(multiprocessing.Process):
def __init__(self, work_queue, result_queue, stableSize, windowSize, predictedField, modeldata, iden):
multiprocessing.Process.__init__(self)
# job management
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
#Model State
self.stableSize=stableSize
self.windowSize=windowSize
self.stableUpdateStepSize=1
self.iden=iden
self.truth=[]
self.predictedField=predictedField
self.modeldata=modeldata
self.numModels=len(modeldata)
self.M={}
self.Scores={}
self.predictionStreams={}
self.median=True
self.index=-1
self.modelCapacity=len(modelData)
def run(self):
self.initM(modelData)
while not self.kill_received:
jobaux = self.work_queue.get()
command=jobaux[0]
if command=='predict':
self.index=self.index+1
self.updateModelStats()
self.result_queue.put([(self.Scores[m], self.predictionStreams[m][-1], self.truth[self.index], m) for m in self.M.keys()])
if command=='getPredictionStreams':
self.result_queue.put(dict([(m, self.predictionStreams[m][:-windowSize]) for m in self.predictionStreams.keys()]))
if command=='delete':
delList=jobaux[1]
for d in delList:
if(d in self.M):
del self.M[d]
del self.Scores[d]
del self.predictionStreams[d]
print 'deleted Model'+str(d)+" in process "+str(self.iden)
print "number of models remaining in "+str(self.iden)+": "+str(len(self.M))
self.result_queue.put(self.iden)
if command=='getAAEs':
self.result_queue.put([(m, computeAAE(self.truth, self.predictionStreams[m],r ), self.getModelState(self.M[m]), self.M[m]['modelDescription']) for m in self.M.keys()])
if command=='addPSOVariants':
for t in jobaux[1]:
if(t[0]==self.iden):
name=t[2]
modelDescription=t[1][0]
x=t[1][1]
v=t[1][2]
self.M[name]={}
self.M[name]['modelDescription']=modelDescription
self.M[name]['client']=Client(**modelDescription)
self.M[name]['alive']=True
self.M[name]['start']=0
self.M[name]['end']=None
self.M[name]['x']=x
self.M[name]['v']=v
self.Scores[name]=10000
self.predictionStreams[name]=[0,]
print "added new model "+str(name)+" to process"+str(self.iden)
# store the result
def getModelState(self, d):
return d['x'], d['v']
def initM(self, modelDatList):
for modelData in modelDatList:
name=modelData[0]
self.M[name]={}
self.M[name]['modelDescription']=modelData[1]
self.M[name]['client']=Client(**modelData[1])
alpha=modelData[1]['modelConfig']['modelParams']['clParams']['alpha']
n=0
for encoder in modelData[1]['modelConfig']['modelParams']['sensorParams']['encoders']:
if encoder['name']==predictedField:
n=encoder['n']
synPermInactiveDec=modelData[1]['modelConfig']['modelParams']['spParams']['synPermInactiveDec']
activationThreshold=modelData[1]['modelConfig']['modelParams']['tmParams']['activationThreshold']
pamLength=modelData[1]['modelConfig']['modelParams']['tmParams']['pamLength']
self.M[name]['x']=np.array([alpha, n,synPermInactiveDec,activationThreshold, pamLength ])
vAlpha=uniform(0.01, 0.15)
vN=randrange(30, 200, 5)
vSynPermInactiveDec=uniform(0.01, 0.15)
vActivationThreshold=randrange(12, 17, 1)
vPamLength=randrange(1, 6, 1)
self.M[name]['v']=np.array([vAlpha, vN,vSynPermInactiveDec,vActivationThreshold,vPamLength])
self.M[name]['alive']=True
self.M[name]['start']=0
self.M[name]['end']=None
self.Scores[name]=10000
self.predictionStreams[name]=[0,]
def updateModelStats(self):
updatedTruth=False
for m in self.M.keys():
truth, prediction=self.M[m]['client'].nextTruthPrediction(self.predictedField)
if(not updatedTruth):
self.truth.append(truth)
updatedTruth=True
self.predictionStreams[m].append(prediction)
self.Scores[m]=computeAAE(self.truth, self.predictionStreams[m],windowSize)
def getStableVote(scores, stableSize, votes, currModel):
scores = sorted(scores, key=lambda t: t[0])[:stableSize]
median=True
if not median:
for s in scores:
if s[3]==currModel:
print [(score[0], score[3]) for score in scores]
return s[1], currModel
print [(s[0], s[3]) for s in scores], "switching voting Model!"
return scores[0][1], scores[0][3]
else:
print [(s[0], s[3]) for s in scores]
voters = sorted(scores, key=lambda t: t[1])
for voter in voters:
votes[voter[3]]=votes[voter[3]]+1
vote=voters[int(stableSize/2)][1]
return vote, currModel
def getFieldPermutations(config, predictedField):
encoders=config['modelParams']['sensorParams']['encoders']
encoderList=[]
for encoder in encoders:
if encoder==None:
continue
if encoder['name']==predictedField:
encoderList.append([encoder])
for e in encoders:
if e==None:
continue
if e['name'] != predictedField:
encoderList.append([encoder, e])
return encoderList
def getModelDescriptionLists(numProcesses, experiment):
config, control = helpers.loadExperiment(experiment)
encodersList=getFieldPermutations(config, 'pounds')
ns=range(50, 140, 120)
clAlphas=np.arange(0.01, 0.16, 0.104)
synPermInactives=np.arange(0.01, 0.16, 0.105)
tpPamLengths=range(5, 8, 2)
tpSegmentActivations=range(13, 17, 12)
if control['environment'] == 'opfExperiment':
experimentTasks = control['tasks']
task = experimentTasks[0]
datasetURI = task['dataset']['streams'][0]['source']
elif control['environment'] == 'nupic':
datasetURI = control['dataset']['streams'][0]['source']
metricSpecs = control['metrics']
datasetPath = datasetURI[len("file://"):]
ModelSetUpData=[]
name=0
for n in ns:
for clAlpha in clAlphas:
for synPermInactive in synPermInactives:
for tpPamLength in tpPamLengths:
for tpSegmentActivation in tpSegmentActivations:
for encoders in encodersList:
encodersmod=copy.deepcopy(encoders)
configmod=copy.deepcopy(config)
configmod['modelParams']['sensorParams']['encoders']=encodersmod
configmod['modelParams']['clParams']['alpha']=clAlpha
configmod['modelParams']['spParams']['synPermInactiveDec']=synPermInactive
configmod['modelParams']['tmParams']['pamLength']=tpPamLength
configmod['modelParams']['tmParams']['activationThreshold']=tpSegmentActivation
for encoder in encodersmod:
if encoder['name']==predictedField:
encoder['n']=n
ModelSetUpData.append((name,{'modelConfig':configmod, 'inferenceArgs':control['inferenceArgs'], 'metricSpecs':metricSpecs, 'sourceSpec':datasetPath,'sinkSpec':None,}))
name=name+1
#print modelInfo['modelConfig']['modelParams']['tmParams']
#print modelInfo['modelConfig']['modelParams']['sensorParams']['encoders'][4]['n']
print "num Models"+str( len(ModelSetUpData))
shuffle(ModelSetUpData)
#print [ (m[1]['modelConfig']['modelParams']['tmParams']['pamLength'], m[1]['modelConfig']['modelParams']['sensorParams']['encoders']) for m in ModelSetUpData]
return list(chunk(ModelSetUpData,numProcesses))
def chunk(l, n):
""" Yield n successive chunks from l.
"""
newn = int(1.0 * len(l) / n + 0.5)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
def command(command, work_queues, aux):
for queue in work_queues:
queue.put((command, aux))
def getDuplicateList(streams, delta):
delList=[]
keys=streams.keys()
for key1 in keys:
if key1 in streams:
for key2 in streams.keys():
if(key1 !=key2):
print 'comparing model'+str(key1)+" to "+str(key2)
dist=sum([(a-b)**2 for a, b in zip(streams[key1], streams[key2])])
print dist
if(dist<delta):
delList.append(key2)
del streams[key2]
return delList
def slice_sampler(px, N = 1, x = None):
"""
Provides samples from a user-defined distribution.
slice_sampler(px, N = 1, x = None)
Inputs:
px = A discrete probability distribution.
N = Number of samples to return, default is 1
x = Optional list/array of observation values to return, where prob(x) = px.
Outputs:
If x=None (default) or if len(x) != len(px), it will return an array of integers
between 0 and len(px)-1. If x is supplied, it will return the
samples from x according to the distribution px.
"""
values = np.zeros(N, dtype=np.int)
samples = np.arange(len(px))
px = np.array(px) / (1.*sum(px))
u = uniform(0, max(px))
for n in xrange(N):
included = px>=u
choice = random.sample(range(np.sum(included)), 1)[0]
values[n] = samples[included][choice]
u = uniform(0, px[included][choice])
if x:
if len(x) == len(px):
x=np.array(x)
values = x[values]
else:
print "px and x are different lengths. Returning index locations for px."
return values
def getPSOVariants(modelInfos, votes, n):
# get x, px lists for sampling
norm=sum(votes.values())
xpx =[(m, float(votes[m])/norm) for m in votes.keys()]
x,px = [[z[i] for z in xpx] for i in (0,1)]
#sample form set of models
variantIDs=slice_sampler(px, n, x)
print "variant IDS"
print variantIDs
#best X
x_best=modelInfos[0][2][0]
# create PSO variates of models
modelDescriptions=[]
for variantID in variantIDs:
t=modelInfos[[i for i, v in enumerate(modelInfos) if v[0] == variantID][0]]
x=t[2][0]
v=t[2][1]
print "old x"
print x
modelDescriptionMod=copy.deepcopy(t[3])
configmod=modelDescriptionMod['modelConfig']
v=inertia*v+socRate*np.random.random_sample(len(v))*(x_best-x)
x=x+v
print "new x"
print x
configmod['modelParams']['clParams']['alpha']=max(0.01, x[0])
configmod['modelParams']['spParams']['synPermInactiveDec']=max(0.01, x[2])
configmod['modelParams']['tmParams']['pamLength']=int(round(max(1, x[4])))
configmod['modelParams']['tmParams']['activationThreshold']=int(round(max(1, x[3])))
for encoder in configmod['modelParams']['sensorParams']['encoders']:
if encoder['name']==predictedField:
encoder['n']=int(round(max(encoder['w']+1, x[1]) ))
modelDescriptions.append((modelDescriptionMod, x, v))
return modelDescriptions
def computeAAE(truth, predictions, windowSize):
windowSize=min(windowSize, len(truth))
zipped=zip(truth[-windowSize:], predictions[-windowSize-1:])
AAE=sum([abs(a - b) for a, b in zipped])/windowSize
return AAE
if __name__ == "__main__":
cutPercentage=0.1
currModel=0
stableSize=3
delta=1
predictedField='pounds'
truth=[]
ensemblePredictions=[0,]
divisor=4
ModelSetUpData=getModelDescriptionLists(divisor, './')
num_processes=len(ModelSetUpData)
print num_processes
work_queues=[]
votes={}
votingParameterStats={"tpSegmentActivationThreshold":[], "tpPamLength":[], "synPermInactiveDec":[], "clAlpha":[], "numBuckets":[]}
# create a queue to pass to workers to store the results
result_queue = multiprocessing.Queue(len(ModelSetUpData))
# spawn workers
workerName=0
modelNameCount=0
for modelData in ModelSetUpData:
print len(modelData)
modelNameCount+=len(modelData)
work_queue= multiprocessing.Queue()
work_queues.append(work_queue)
worker = Worker(work_queue, result_queue, stableSize, windowSize, predictedField, modelData, workerName)
worker.start()
workerName=workerName+1
#init votes dict
for dataList in ModelSetUpData:
for data in dataList:
votes[data[0]]=0
for i in range(2120):
command('predict', work_queues, i)
scores=[]
for j in range(num_processes):
subscore=result_queue.get()
scores.extend(subscore)
print ""
print i
ensemblePrediction, currModel=getStableVote(scores, stableSize, votes, currModel)
ensemblePredictions.append(ensemblePrediction)
truth.append(scores[0][2])
print computeAAE(truth,ensemblePredictions, windowSize), int(currModel)
assert(result_queue.empty())
if i%r==0 and i!=0: #refresh ensemble
assert(result_queue.empty())
#get AAES of models over last i records
command('getAAEs', work_queues, None)
AAEs=[]
for j in range(num_processes):
subAAEs=result_queue.get()
AAEs.extend(subAAEs)
AAEs=sorted(AAEs, key=lambda t: t[1])
numToDelete=int(round(cutPercentage*len(AAEs)))
print "Single Model AAES"
print [(aae[0], aae[1]) for aae in AAEs]
print "Ensemble AAE"
print computeAAE(truth, ensemblePredictions, r)
#add bottom models to delList
print "Vote counts"
print votes
delList=[t[0] for t in AAEs[-numToDelete:]]
print "delList"
print delList
#find duplicate models(now unnecessary)
#command('getPredictionStreams', work_queues, None)
#streams={}
#for j in range(num_processes):
# subList=result_queue.get()
# streams.update(subList)
#delList.extend(getDuplicateList(streams, delta))
#print delList
command('delete', work_queues, delList)
for iden in delList:
del votes[iden]
print votes
#wait for deletion to finish and collect processIndices for addition
processIndices=[]
for j in range(num_processes):
processIndices.append( result_queue.get())
# pick new set of models for PSO variants
newModelDescriptions=getPSOVariants(AAEs, votes, len(delList))
assert(result_queue.empty())
#send new model dscriptions to queue and have processess pick them up
aux=[]
for i in range(len(newModelDescriptions)):
votes[modelNameCount]=0
aux.append((processIndices[i],newModelDescriptions[i],modelNameCount) )
modelNameCount=modelNameCount+1
command('addPSOVariants', work_queues, aux)
#set votes to 0
for key in votes.keys():
votes[key]=0
print "AAE over full stream"
print computeAAE(truth, ensemblePredictions, len(truth))
print "AAE1000"
print computeAAE(truth, ensemblePredictions, 1000)
|
agpl-3.0
| 3,983,500,156,588,017,700 | 8,126,874,168,878,439,000 | 34.121281 | 183 | 0.630701 | false |
Iconik/eve-suite
|
src/model/static/map/constellation.py
|
1
|
1590
|
from model.flyweight import Flyweight
from model.static.database import database
class Constellation(Flyweight):
def __init__(self, constellation_id):
#prevents reinitializing
if "_inited" in self.__dict__:
return
self._inited = None
#prevents reinitializing
self.constellation_id = constellation_id
cursor = database.get_cursor(
"select * from mapConstellations where constellationID={};".format(
self.constellation_id))
row = cursor.fetchone()
self.region_id = row["regionID"]
self.constellation_name = row["constellationName"]
self.x_pos = row["x"]
self.y_pos = row["y"]
self.z_pos = row["z"]
self.x_min = row["xMin"]
self.x_max = row["xMax"]
self.y_min = row["yMin"]
self.y_max = row["yMax"]
self.z_min = row["zMin"]
self.z_max = row["zMax"]
self.faction_id = row["factionID"]
self.radius = row["radius"]
cursor.close()
self._region = None
self._faction = None
def get_region(self):
"""Populates and returns the _region"""
if self._region is None:
from model.static.map.region import Region
self._region = Region(self.region_id)
return self._region
def get_faction(self):
"""Populates and returns the _faction"""
if self._faction is None:
from model.static.chr.faction import Faction
self._faction = Faction(self.faction_id)
return self._faction
|
gpl-3.0
| 513,444,058,805,151,700 | -1,581,341,666,670,954,800 | 30.8 | 79 | 0.577358 | false |
ElectroweakGroup/Database_Extraction_Tool
|
Main.py
|
1
|
1572
|
import IsotopeDataExporting as ided
import os
import glob
import time
import sys
import renormalize as renorm
def function(option):
#Exports data requested by the user into text files (necessary to generate plots)
userInput = ided.datExp(option,True,True)
#Prints the user input allowing user to make sure they inputted allowing user
#to check what they input against the plot they are viewing
#The sleep is a pause so the timestamps used work correctly
renorm.renormalize(userInput[0],userInput[1],userInput[2],userInput[3])
time.sleep(0.01)
#Makes plot (.012 s)
ided.pltFileExp(option,userInput[6],userInput[4],userInput[0],userInput[1],userInput[2],userInput[7],userInput[3],True)
#This code creates the .git file which is the actual plot
os.chdir("Output/gnuPlot")
directory = os.getcwd()
try:
newest = max(glob.iglob(directory+"/*.plt"),key=os.path.getctime)
newest = newest.replace(os.getcwd()+"/","")
os.system("gnuplot "+newest)
except:
print('No new plot')
#This code puts restarts the program so it can be used again
os.chdir("..")
os.chdir("..")
os.system("python3 Main.py "+option)
newest = "Output/gnuPlot/"+newest.replace(".plt",".gif")
if os.path.isfile(newest):
os.system("rm "+newest)
try:
os.system("mv Output/gnuPlot/*.dat Output/gnuPlot/OutputData")
os.system("mv Output/gnuPlot/*.plt Output/gnuPlot/OutputData")
except:
pass
option = sys.argv[-1]
function(option)
|
mit
| -3,817,430,654,350,403,000 | 8,552,685,655,687,639,000 | 30.081633 | 123 | 0.666667 | false |
javierder/dogestart.me
|
django/db/models/fields/files.py
|
105
|
15978
|
import datetime
import os
from django import forms
from django.db.models.fields import Field
from django.core.files.base import File
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile
from django.db.models import signals
from django.utils.encoding import force_str, force_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> instance.file = File(...)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to the. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
for arg in ('primary_key', 'unique'):
if arg in kwargs:
raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__))
self.storage = storage or default_storage
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name):
super(FileField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def contribute_to_class(self, cls, name):
super(ImageField, self).contribute_to_class(cls, name)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
|
mit
| 4,155,493,283,418,019,000 | -5,195,006,409,627,221,000 | 39.246851 | 104 | 0.640568 | false |
GenericStudent/home-assistant
|
homeassistant/components/cast/home_assistant_cast.py
|
9
|
2387
|
"""Home Assistant Cast integration for Cast."""
from typing import Optional
from pychromecast.controllers.homeassistant import HomeAssistantController
import voluptuous as vol
from homeassistant import auth, config_entries, core
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import config_validation as cv, dispatcher
from homeassistant.helpers.network import get_url
from .const import DOMAIN, SIGNAL_HASS_CAST_SHOW_VIEW
SERVICE_SHOW_VIEW = "show_lovelace_view"
ATTR_VIEW_PATH = "view_path"
ATTR_URL_PATH = "dashboard_path"
async def async_setup_ha_cast(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up Home Assistant Cast."""
user_id: Optional[str] = entry.data.get("user_id")
user: Optional[auth.models.User] = None
if user_id is not None:
user = await hass.auth.async_get_user(user_id)
if user is None:
user = await hass.auth.async_create_system_user(
"Home Assistant Cast", [auth.GROUP_ID_ADMIN]
)
hass.config_entries.async_update_entry(
entry, data={**entry.data, "user_id": user.id}
)
if user.refresh_tokens:
refresh_token: auth.models.RefreshToken = list(user.refresh_tokens.values())[0]
else:
refresh_token = await hass.auth.async_create_refresh_token(user)
async def handle_show_view(call: core.ServiceCall):
"""Handle a Show View service call."""
hass_url = get_url(hass, require_ssl=True, prefer_external=True)
controller = HomeAssistantController(
# If you are developing Home Assistant Cast, uncomment and set to your dev app id.
# app_id="5FE44367",
hass_url=hass_url,
client_id=None,
refresh_token=refresh_token.token,
)
dispatcher.async_dispatcher_send(
hass,
SIGNAL_HASS_CAST_SHOW_VIEW,
controller,
call.data[ATTR_ENTITY_ID],
call.data[ATTR_VIEW_PATH],
call.data.get(ATTR_URL_PATH),
)
hass.helpers.service.async_register_admin_service(
DOMAIN,
SERVICE_SHOW_VIEW,
handle_show_view,
vol.Schema(
{
ATTR_ENTITY_ID: cv.entity_id,
ATTR_VIEW_PATH: str,
vol.Optional(ATTR_URL_PATH): str,
}
),
)
|
apache-2.0
| -5,445,637,504,556,753,000 | -9,074,197,967,685,883,000 | 31.256757 | 94 | 0.631336 | false |
Jgarcia-IAS/localizacion
|
openerp/addons/base/tests/test_osv.py
|
446
|
4722
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import unittest
from openerp.osv.query import Query
class QueryTestCase(unittest.TestCase):
def test_basic_query(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_product", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product" LEFT JOIN "res_user" as "product_product__user_id" ON ("product_product"."user_id" = "product_product__user_id"."id"),"product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_query_chained_explicit_joins(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_template__categ_id", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # CHAINED outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" as "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id")""".strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_mixed_query_chained_explicit_implicit_joins(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_template__categ_id", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # CHAINED outer join
query.tables.append('"account.account"')
query.where_clause.append("product_category.expense_account_id = account_account.id") # additional implicit join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" as "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id"),"account.account" """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id AND product_category.expense_account_id = account_account.id""".strip())
def test_raise_missing_lhs(self):
query = Query()
query.tables.append('"product_product"')
self.assertRaises(AssertionError, query.add_join, ("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 5,766,070,072,963,105,000 | 6,307,262,167,533,454,000 | 70.545455 | 360 | 0.655654 | false |
apache/airflow
|
airflow/sentry.py
|
2
|
6613
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sentry Integration"""
import logging
from functools import wraps
from airflow.configuration import conf
from airflow.utils.session import find_session_idx, provide_session
from airflow.utils.state import State
log = logging.getLogger(__name__)
class DummySentry:
"""Blank class for Sentry."""
@classmethod
def add_tagging(cls, task_instance):
"""Blank function for tagging."""
@classmethod
def add_breadcrumbs(cls, task_instance, session=None):
"""Blank function for breadcrumbs."""
@classmethod
def enrich_errors(cls, run):
"""Blank function for formatting a TaskInstance._run_raw_task."""
return run
def flush(self):
"""Blank function for flushing errors."""
Sentry: DummySentry = DummySentry()
if conf.getboolean("sentry", 'sentry_on', fallback=False):
import sentry_sdk
# Verify blinker installation
from blinker import signal # noqa: F401
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.logging import ignore_logger
class ConfiguredSentry(DummySentry):
"""Configure Sentry SDK."""
SCOPE_TAGS = frozenset(("task_id", "dag_id", "execution_date", "operator", "try_number"))
SCOPE_CRUMBS = frozenset(("task_id", "state", "operator", "duration"))
UNSUPPORTED_SENTRY_OPTIONS = frozenset(
(
"integrations",
"in_app_include",
"in_app_exclude",
"ignore_errors",
"before_breadcrumb",
"before_send",
"transport",
)
)
def __init__(self):
"""Initialize the Sentry SDK."""
ignore_logger("airflow.task")
ignore_logger("airflow.jobs.backfill_job.BackfillJob")
executor_name = conf.get("core", "EXECUTOR")
sentry_flask = FlaskIntegration()
# LoggingIntegration is set by default.
integrations = [sentry_flask]
if executor_name == "CeleryExecutor":
from sentry_sdk.integrations.celery import CeleryIntegration
sentry_celery = CeleryIntegration()
integrations.append(sentry_celery)
dsn = None
sentry_config_opts = conf.getsection("sentry") or {}
if sentry_config_opts:
sentry_config_opts.pop("sentry_on")
old_way_dsn = sentry_config_opts.pop("sentry_dsn", None)
new_way_dsn = sentry_config_opts.pop("dsn", None)
# supported backward compatibility with old way dsn option
dsn = old_way_dsn or new_way_dsn
unsupported_options = self.UNSUPPORTED_SENTRY_OPTIONS.intersection(sentry_config_opts.keys())
if unsupported_options:
log.warning(
"There are unsupported options in [sentry] section: %s",
", ".join(unsupported_options),
)
if dsn:
sentry_sdk.init(dsn=dsn, integrations=integrations, **sentry_config_opts)
else:
# Setting up Sentry using environment variables.
log.debug("Defaulting to SENTRY_DSN in environment.")
sentry_sdk.init(integrations=integrations, **sentry_config_opts)
def add_tagging(self, task_instance):
"""Function to add tagging for a task_instance."""
task = task_instance.task
with sentry_sdk.configure_scope() as scope:
for tag_name in self.SCOPE_TAGS:
attribute = getattr(task_instance, tag_name)
if tag_name == "operator":
attribute = task.__class__.__name__
scope.set_tag(tag_name, attribute)
@provide_session
def add_breadcrumbs(self, task_instance, session=None):
"""Function to add breadcrumbs inside of a task_instance."""
if session is None:
return
execution_date = task_instance.execution_date
task = task_instance.task
dag = task.dag
task_instances = dag.get_task_instances(
state={State.SUCCESS, State.FAILED},
end_date=execution_date,
start_date=execution_date,
session=session,
)
for ti in task_instances:
data = {}
for crumb_tag in self.SCOPE_CRUMBS:
data[crumb_tag] = getattr(ti, crumb_tag)
sentry_sdk.add_breadcrumb(category="completed_tasks", data=data, level="info")
def enrich_errors(self, func):
"""Wrap TaskInstance._run_raw_task to support task specific tags and breadcrumbs."""
session_args_idx = find_session_idx(func)
@wraps(func)
def wrapper(task_instance, *args, **kwargs):
# Wrapping the _run_raw_task function with push_scope to contain
# tags and breadcrumbs to a specific Task Instance
try:
session = kwargs.get('session', args[session_args_idx])
except IndexError:
session = None
with sentry_sdk.push_scope():
try:
return func(task_instance, *args, **kwargs)
except Exception as e:
self.add_tagging(task_instance)
self.add_breadcrumbs(task_instance, session=session)
sentry_sdk.capture_exception(e)
raise
return wrapper
def flush(self):
sentry_sdk.flush()
Sentry = ConfiguredSentry()
|
apache-2.0
| -1,952,327,723,533,672,400 | 2,566,089,253,250,235,000 | 36.151685 | 109 | 0.583245 | false |
teltek/edx-platform
|
openedx/core/djangoapps/user_api/management/tests/test_bulk_rehash_retired_usernames.py
|
6
|
6666
|
"""
Test the bulk_rehash_retired_usernames management command
"""
from mock import call, patch
import pytest
from django.conf import settings
from django.core.management import call_command
from user_util.user_util import get_retired_username
from lms.lib import comment_client
from openedx.core.djangoapps.user_api.accounts.tests.retirement_helpers import (
setup_retirement_states, fake_completed_retirement
)
from openedx.core.djangoapps.user_api.models import UserRetirementStatus
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
def _setup_users():
"""
Creates and returns test users in the different states of needing rehash:
- Skipped: The retired username does not require updating, some of these are fake retired
- Needing rehash: Has been fake-retired and name changed so it triggers a hash update
"""
# When we loop through creating users, take additional action on these
user_indexes_to_be_fake_retired = (2, 4, 6, 8, 10)
user_indexes_to_be_rehashed = (4, 6)
users_skipped = []
users_needing_rehash = []
retirements = {}
# Create some test users with retirements
for i in range(1, 11):
user = UserFactory()
retirement = UserRetirementStatus.create_retirement(user)
retirements[user.id] = retirement
if i in user_indexes_to_be_fake_retired:
fake_completed_retirement(user)
if i in user_indexes_to_be_rehashed:
# In order to need a rehash user.username the new hash must be
# different, we force that here.
retirement.retired_username = retirement.retired_username.upper()
user.username = retirement.retired_username
retirement.save()
user.save()
users_needing_rehash.append(user)
else:
users_skipped.append(user)
else:
users_skipped.append(user)
return users_skipped, users_needing_rehash, retirements
@skip_unless_lms
@pytest.mark.usefixtures("setup_retirement_states")
@patch('lms.lib.comment_client.User.retire')
def test_successful_rehash(retire_user_forums, capsys):
"""
Run the command with users of all different hash statuses, expect success
"""
users_skipped, users_needing_rehash, retirements = _setup_users()
call_command('bulk_rehash_retired_usernames')
output = capsys.readouterr().out
# Make sure forums was called the correct number of times
assert retire_user_forums.call_count == 2
for user in users_skipped:
assert "User ID {} because the hash would not change.".format(user.id) in output
expected_username_calls = []
for user in users_needing_rehash:
retirement = retirements[user.id]
user.refresh_from_db()
retirement.refresh_from_db()
new_retired_username = get_retired_username(
retirement.original_username,
settings.RETIRED_USER_SALTS,
settings.RETIRED_USERNAME_FMT
)
expected_username_calls.append(call(new_retired_username))
assert "User ID {} to rehash their retired username".format(user.id) in output
assert new_retired_username == user.username
assert new_retired_username == retirement.retired_username
retire_user_forums.assert_has_calls(expected_username_calls)
@skip_unless_lms
@pytest.mark.usefixtures("setup_retirement_states")
@patch('lms.lib.comment_client.User.retire')
def test_forums_failed(retire_user_forums, capsys):
"""
Run the command with users of all different hash statuses, expect success
"""
users_skipped, users_needing_rehash, retirements = _setup_users()
retire_user_forums.side_effect = Exception('something bad happened with forums')
call_command('bulk_rehash_retired_usernames')
output = capsys.readouterr().out
# Make sure forums was called the correct number of times
assert retire_user_forums.call_count == 2
for user in users_skipped:
assert "User ID {} because the hash would not change.".format(user.id) in output
expected_username_calls = []
for user in users_needing_rehash:
retirement = retirements[user.id]
user.refresh_from_db()
retirement.refresh_from_db()
new_retired_username = get_retired_username(
retirement.original_username,
settings.RETIRED_USER_SALTS,
settings.RETIRED_USERNAME_FMT
)
expected_username_calls.append(call(new_retired_username))
assert "User ID {} to rehash their retired username".format(user.id) in output
# Confirm that the usernames are *not* updated, due to the forums error
assert new_retired_username != user.username
assert new_retired_username != retirement.retired_username
assert "FAILED! 2 retirements failed to rehash. Retirement IDs:" in output
retire_user_forums.assert_has_calls(expected_username_calls)
@skip_unless_lms
@pytest.mark.usefixtures("setup_retirement_states")
@patch('lms.lib.comment_client.User.retire')
def test_forums_404(retire_user_forums, capsys):
"""
Run the command with users of all different hash statuses, expect success
"""
users_skipped, users_needing_rehash, retirements = _setup_users()
retire_user_forums.side_effect = comment_client.utils.CommentClientRequestError('not found', status_codes=404)
call_command('bulk_rehash_retired_usernames')
output = capsys.readouterr().out
# Make sure forums was called the correct number of times
assert retire_user_forums.call_count == 2
for user in users_skipped:
assert "User ID {} because the hash would not change.".format(user.id) in output
expected_username_calls = []
for user in users_needing_rehash:
retirement = retirements[user.id]
user.refresh_from_db()
retirement.refresh_from_db()
new_retired_username = get_retired_username(
retirement.original_username,
settings.RETIRED_USER_SALTS,
settings.RETIRED_USERNAME_FMT
)
expected_username_calls.append(call(new_retired_username))
assert "User ID {} to rehash their retired username".format(user.id) in output
# Confirm that the usernames *are* updated, since this is a non-blocking forums error
assert new_retired_username == user.username
assert new_retired_username == retirement.retired_username
assert "Success!" in output
retire_user_forums.assert_has_calls(expected_username_calls)
|
agpl-3.0
| 3,921,841,800,674,461,700 | 4,085,678,418,882,998,300 | 37.310345 | 114 | 0.690069 | false |
teddym6/qualitybots
|
src/appengine/handlers/config_handler.py
|
26
|
2465
|
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for setting configuration options for the system."""
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from handlers import base
from models import aws_account_details
SET_AWS_ACCOUNT_URL = '/config/set_aws_account'
CONFIG_URL = '/config/config'
class SetAwsAccount(base.BaseHandler):
"""Handler to allow an admin to update the AWS credentials."""
# Disable the "Invalid method name" warnings.
# pylint: disable-msg=C6409
def post(self):
"""Allows an admin user to set the AWS credentials used by the system.
Url Params:
aws_account_number: Amazon EC2 account number.
aws_access_key_id: AWS access Key ID.
aws_secret_access_key: AWS secret access key.
"""
aws_account_number = self.GetRequiredParameter('aws_account_number')
aws_access_key_id = self.GetRequiredParameter('aws_access_key_id')
aws_secret_access_key = self.GetRequiredParameter('aws_secret_access_key')
account_details = aws_account_details.AwsAccountDetails.get()
if not account_details:
account_details = aws_account_details.AwsAccountDetails()
account_details.aws_account_number = aws_account_number
account_details.aws_access_key_id = aws_access_key_id
account_details.aws_secret_access_key = aws_secret_access_key
account_details.put()
class ConfigPage(base.BaseHandler):
"""Handler for the configuration page."""
# Disable the "Invalid method name" warnings.
# pylint: disable-msg=C6409
def get(self):
"""Displays the Add Url landing page."""
self.RenderTemplate('config_settings.html', {})
application = webapp.WSGIApplication(
[(SET_AWS_ACCOUNT_URL, SetAwsAccount),
(CONFIG_URL, ConfigPage)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
apache-2.0
| 1,987,146,248,397,399,000 | -5,090,851,143,236,581,000 | 28.698795 | 78 | 0.723732 | false |
posterior/loom
|
loom/test/test_posterior_enum.py
|
2
|
26903
|
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
# Copyright (c) 2015, Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from itertools import imap, product
from nose import SkipTest
from nose.tools import assert_true, assert_false, assert_equal
import numpy
import numpy.random
from distributions.tests.util import seed_all
from distributions.util import scores_to_probs
from distributions.io.stream import protobuf_stream_load, protobuf_stream_dump
from distributions.lp.clustering import PitmanYor
from goftests import multinomial_goodness_of_fit
from loom.util import tempdir
import loom.schema_pb2
import loom.schema
import loom.format
import loom.runner
import loom.util
import loom.test.util
import parsable
parsable = parsable.Parsable()
TRUNCATE_COUNT = 32
MIN_GOODNESS_OF_FIT = 5e-4
SCORE_TOL = 1e-1 # FIXME why does this need to be so large?
SEED = 123
FEATURE_TYPES = loom.schema.MODELS.copy()
DENSITIES = [
1.0,
0.5,
0.0,
]
# Cross Cat Latent Space Sizes up to 10000000, generated by:
# python test_posterior_enum.py datasets 10000000
LATENT_SIZES = [
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975, 678570, 4213597],
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975, 678570, 4213597],
[1, 2, 6, 22, 94, 454, 2430, 14214, 89918, 610182, 4412798],
[1, 5, 30, 205, 1555, 12880, 115155, 1101705],
[1, 15, 240, 4065, 72465, 1353390],
[1, 52, 2756, 148772, 8174244],
[1, 203, 41412, 8489257],
[1, 877, 770006],
[1, 4140],
[1, 21147],
[1, 115975],
[1, 678570],
[1, 4213597],
]
CAT_MAX_SIZE = 100000
KIND_MAX_SIZE = 205
GRID_SIZE = 2
PITMAN_YOR_GRID = [
{'alpha': 2.0, 'd': 0.1},
{'alpha': 10., 'd': 0.1},
]
HYPER_PRIOR = {
'topology': PITMAN_YOR_GRID,
'clustering': PITMAN_YOR_GRID,
'bb': {
'alpha': [0.5, 2.0],
'beta': [0.5, 2.0],
},
'dd': {
'alpha': [.5, 1.5],
},
'dpd': {
'alpha': [.5, 1.5],
'gamma': [.5, 1.5],
},
'gp': {
'alpha': [.5, 1.5],
'inv_beta': [.5, 1.5],
},
'nich': {
'kappa': [.5, 1.5],
'mu': [-1., 1.],
'nu': [.5, 1.5],
'sigmasq': [.5, 1.5],
}
}
CLUSTERING = PitmanYor.from_dict({'alpha': 2.0, 'd': 0.1})
if __name__ == '__main__' and sys.stdout.isatty():
colorize = {
'Info': '\x1b[34mInfo\x1b[0m',
'Warn': '\x1b[33mWarn\x1b[0m',
'Fail': '\x1b[31mFail\x1b[0m',
'Pass': '\x1b[32mPass\x1b[0m',
}
else:
colorize = {}
def LOG(prefix, casename, comment=''):
prefix = colorize.get(prefix, prefix)
message = '{: <4} {: <18} {}'.format(prefix, casename, comment)
sys.stdout.write(message)
sys.stdout.write('\n')
sys.stdout.flush()
return message
@parsable.command
def infer_cats(max_size=CAT_MAX_SIZE, debug=False):
'''
Test category inference.
'''
dimensions = [
(object_count, feature_count)
for object_count, sizes in enumerate(LATENT_SIZES)
for feature_count, size in enumerate(sizes)
if object_count > 1 and feature_count > 0 and size <= max_size
]
datasets = product(
dimensions,
FEATURE_TYPES,
DENSITIES,
[False],
[debug],
[None])
datasets = list(datasets)
parallel_map = map if debug else loom.util.parallel_map
errors = sum(parallel_map(_test_dataset, datasets), [])
message = '\n'.join(['Failed {} Cases:'.format(len(errors))] + errors)
assert_false(errors, message)
@parsable.command
def infer_kinds(max_size=KIND_MAX_SIZE, debug=False):
'''
Test kind inference.
'''
dimensions = [
(object_count, feature_count)
for object_count, sizes in enumerate(LATENT_SIZES)
for feature_count, size in enumerate(sizes)
if object_count > 0 and feature_count > 0 and size <= max_size
if object_count + feature_count > 2
]
datasets = product(
dimensions,
FEATURE_TYPES,
DENSITIES,
[True],
[debug],
[None])
datasets = list(datasets)
parallel_map = map if debug else loom.util.parallel_map
errors = sum(parallel_map(_test_dataset, datasets), [])
message = '\n'.join(['Failed {} Cases:'.format(len(errors))] + errors)
assert_false(errors, message)
@parsable.command
def infer_feature_hypers(max_size=CAT_MAX_SIZE, debug=False):
'''
Test feature hyperparameter inference.
'''
dimensions = [
(object_count, feature_count)
for object_count, sizes in enumerate(LATENT_SIZES)
for feature_count, size in enumerate(sizes)
if object_count > 1 and feature_count == 1 and size <= max_size
]
hyper_prior = [
(hp_name, (param_name, param_grid))
for hp_name, param_grids in HYPER_PRIOR.iteritems()
if hp_name not in ['topology', 'clustering']
for param_name, param_grid in param_grids.iteritems()
]
datasets = filter(
lambda x: x[1] == x[5][0],
product(
dimensions,
FEATURE_TYPES,
DENSITIES,
[False],
[debug],
hyper_prior))
datasets = list(datasets)
parallel_map = map if debug else loom.util.parallel_map
errors = sum(parallel_map(_test_dataset, datasets), [])
message = '\n'.join(['Failed {} Cases:'.format(len(errors))] + errors)
assert_false(errors, message)
@parsable.command
def infer_topology_hypers(max_size=KIND_MAX_SIZE, debug=False):
'''
Test topology hyperparameter inference.
'''
dimensions = [
(object_count, feature_count)
for object_count, sizes in enumerate(LATENT_SIZES)
for feature_count, size in enumerate(sizes)
if object_count > 1 and feature_count > 1 and size <= max_size
]
hyper_prior = [('topology', HYPER_PRIOR['topology'])]
datasets = product(
dimensions,
FEATURE_TYPES,
DENSITIES,
[True],
[debug],
hyper_prior)
datasets = list(datasets)
parallel_map = map if debug else loom.util.parallel_map
errors = sum(parallel_map(_test_dataset, datasets), [])
message = '\n'.join(['Failed {} Cases:'.format(len(errors))] + errors)
assert_false(errors, message)
@parsable.command
def infer_clustering_hypers(max_size=CAT_MAX_SIZE, debug=False):
'''
Test clusterng hyperparameter inference.
'''
dimensions = [
(object_count, feature_count)
for object_count, sizes in enumerate(LATENT_SIZES)
for feature_count, size in enumerate(sizes)
if object_count > 1 and feature_count == 1 and size <= max_size
]
# FIXME(jglidden) this uses too much tuple trickery
hyper_prior = [('clustering', HYPER_PRIOR['clustering'])]
datasets = product(
dimensions,
FEATURE_TYPES,
DENSITIES,
[False],
[debug],
hyper_prior)
datasets = list(datasets)
parallel_map = map if debug else loom.util.parallel_map
errors = sum(parallel_map(_test_dataset, datasets), [])
message = '\n'.join(['Failed {} Cases:'.format(len(errors))] + errors)
assert_false(errors, message)
# Run tiny examples through nose and expensive examples by hand.
def test_cat_inference():
infer_cats(100)
def test_kind_inference():
infer_kinds(50)
def test_feature_hyper_inference():
infer_feature_hypers(100)
def test_topology_hyper_inference():
infer_topology_hypers(50)
def test_clustering_hyper_inference():
infer_clustering_hypers(100)
def _test_dataset(args):
dim, feature_type, density, infer_kinds, debug, hyper_prior = args
object_count, feature_count = dim
with tempdir(cleanup_on_error=(not debug)):
seed_all(SEED)
config_name = os.path.abspath('config.pb')
model_base_name = 'model.pb'
model_name = os.path.abspath(model_base_name)
rows_name = os.path.abspath('rows.pbs')
models = generate_model(feature_count, feature_type, hyper_prior)
model, fixed_hyper_models = models
dump_model(model, model_name)
fixed_model_names = []
for i, fm in enumerate(fixed_hyper_models):
fixed_model_base = 'fixed-{}-{}'.format(i, model_base_name)
fixed_model_name = os.path.abspath(fixed_model_base)
fixed_model_names.append(fixed_model_name)
dump_model(fm, fixed_model_name)
if hyper_prior is None:
assert len(fixed_model_names) == 0
rows = generate_rows(
object_count,
feature_count,
feature_type,
density)
dump_rows(rows, rows_name)
infer_cats = (object_count > 1)
infer_hypers = (hyper_prior is not None)
if infer_kinds:
sample_count = 10 * LATENT_SIZES[object_count][feature_count]
iterations = 32
else:
sample_count = 10 * LATENT_SIZES[object_count][1]
iterations = 0
config = {
'posterior_enum': {
'sample_count': sample_count,
'sample_skip': 10,
},
'kernels': {
'hyper': {
'run': infer_hypers,
'parallel': False,
},
'kind': {
'iterations': iterations,
'row_queue_capacity': 0,
'score_parallel': False,
},
},
}
loom.config.config_dump(config, config_name)
casename = '{}-{}-{}-{}-{}{}{}'.format(
object_count,
feature_count,
feature_type,
density,
('C' if infer_cats else ''),
('K' if infer_kinds else ''),
('H' if infer_hypers else ''))
# LOG('Run', casename)
error = _test_dataset_config(
casename,
object_count,
feature_count,
config_name,
model_name,
fixed_model_names,
rows_name,
config,
debug)
return [] if error is None else [error]
def add_sample(sample, score, counts_dict, scores_dict):
if sample in counts_dict:
counts_dict[sample] += 1
scores_dict[sample] = score
expected = score
assert abs(score - expected) < SCORE_TOL, \
'inconsistent score: {} vs {}'.format(score, expected)
else:
counts_dict[sample] = 1
scores_dict[sample] = score
def process_fixed_samples(fixed_hyper_samples, unfixed_latents):
fixed_scores = []
fixed_counts = []
for f_samples in fixed_hyper_samples:
fixed_scores_dict = {}
fixed_counts_dict = {}
for sample, score in f_samples:
add_sample(sample, score, fixed_counts_dict, fixed_scores_dict)
fixed_scores.append(fixed_scores_dict)
fixed_counts.append(fixed_counts_dict)
all_fixed_latents = [set([lat for lat in fd]) for fd in fixed_scores]
fixed_latents = set.intersection(*all_fixed_latents)
latents = [lat for lat in unfixed_latents if lat in fixed_latents]
scores_dict = {}
for latent in latents:
latent_scores = [fd[latent] for fd in fixed_scores]
scores_dict[latent] = numpy.logaddexp.reduce(latent_scores)
return latents, scores_dict
def _test_dataset_config(
casename,
object_count,
feature_count,
config_name,
model_name,
fixed_model_names,
rows_name,
config,
debug):
dataset = {'model': model_name, 'rows': rows_name, 'config': config_name}
samples = generate_samples(casename, dataset, debug)
fixed_hyper_samples = []
for fixed_model_name in fixed_model_names:
fixed_dataset = dataset.copy()
fixed_dataset['model'] = fixed_model_name
fs = generate_samples(None, fixed_dataset, debug)
fixed_hyper_samples.append(fs)
sample_count = config['posterior_enum']['sample_count']
counts_dict = {}
scores_dict = {}
actual_count = 0
for sample, score in samples:
actual_count += 1
add_sample(sample, score, counts_dict, scores_dict)
assert_equal(actual_count, sample_count)
if fixed_hyper_samples:
latents, scores_dict = process_fixed_samples(
fixed_hyper_samples,
scores_dict.keys())
useable_count = sum([counts_dict[lat] for lat in latents])
if useable_count < sample_count:
LOG('Warn', casename, 'scores found for {} / {} samples'.format(
useable_count,
sample_count))
sample_count = useable_count
else:
latents = scores_dict.keys()
actual_latent_count = len(latents)
infer_kinds = (config['kernels']['kind']['iterations'] > 0)
if infer_kinds:
expected_latent_count = count_crosscats(object_count, feature_count)
else:
expected_latent_count = BELL_NUMBERS[object_count]
assert actual_latent_count <= expected_latent_count, 'programmer error'
if actual_latent_count < expected_latent_count:
LOG('Warn', casename, 'found only {} / {} latents'.format(
actual_latent_count,
expected_latent_count))
counts = numpy.array([counts_dict[key] for key in latents])
scores = numpy.array([scores_dict[key] for key in latents])
probs = scores_to_probs(scores)
highest_by_prob = numpy.argsort(probs)[::-1][:TRUNCATE_COUNT]
is_accurate = lambda p: sample_count * p * (1 - p) >= 1
highest_by_prob = [i for i in highest_by_prob if is_accurate(probs[i])]
highest_by_count = numpy.argsort(counts)[::-1][:TRUNCATE_COUNT]
highest = list(set(highest_by_prob) | set(highest_by_count))
truncated = len(highest_by_prob) < len(probs)
if len(highest_by_prob) < 1:
LOG('Warn', casename, 'test is inaccurate; use more samples')
return None
goodness_of_fit = multinomial_goodness_of_fit(
probs[highest_by_prob],
counts[highest_by_prob],
total_count=sample_count,
truncated=truncated)
comment = 'goodness of fit = {:0.3g}'.format(goodness_of_fit)
if goodness_of_fit > MIN_GOODNESS_OF_FIT:
LOG('Pass', casename, comment)
return None
else:
print 'EXPECT\tACTUAL\tCHI\tVALUE'
lines = [(probs[i], counts[i], latents[i]) for i in highest]
for prob, count, latent in sorted(lines, reverse=True):
expect = prob * sample_count
chi = (count - expect) * expect ** -0.5
pretty = pretty_latent(latent)
print '{:0.1f}\t{}\t{:+0.1f}\t{}'.format(
expect,
count,
chi,
pretty)
return LOG('Fail', casename, comment)
def generate_model(feature_count, feature_type, hyper_prior=None):
module = FEATURE_TYPES[feature_type]
shared = module.Shared.from_dict(module.EXAMPLES[0]['shared'])
shared.realize()
cross_cat = loom.schema_pb2.CrossCat()
kind = cross_cat.kinds.add()
CLUSTERING.protobuf_dump(kind.product_model.clustering)
features = getattr(kind.product_model, feature_type)
for featureid in xrange(feature_count):
shared.protobuf_dump(features.add())
kind.featureids.append(featureid)
CLUSTERING.protobuf_dump(cross_cat.topology)
# FIXME(jglidden) this belongs in a separate function
fixed_models = []
if hyper_prior is not None:
hp_name, grid_in = hyper_prior
if hp_name == 'topology':
get_grid_out = lambda model: model.hyper_prior.topology
extend = lambda grid_out, point: PitmanYor.to_protobuf(
point,
grid_out.add())
elif hp_name == 'clustering':
get_grid_out = lambda model: model.hyper_prior.clustering
extend = lambda grid_out, point: PitmanYor.to_protobuf(
point,
grid_out.add())
else:
param_name, grid_in = grid_in
get_grid_out = lambda model: getattr(
getattr(model.hyper_prior, hp_name),
param_name)
extend = lambda grid_out, point: grid_out.extend([point])
cross_cat_base = loom.schema_pb2.CrossCat()
cross_cat_base.MergeFrom(cross_cat)
for point in grid_in:
extend(get_grid_out(cross_cat), point)
if hp_name == 'dd':
pass
else:
fixed_model = loom.schema_pb2.CrossCat()
fixed_model.MergeFrom(cross_cat_base)
extend(get_grid_out(fixed_model), point)
fixed_models.append(fixed_model)
if hp_name == 'dd':
assert feature_count == 1
dim = len(shared.dump()['alphas'])
if dim > 4:
raise SkipTest('FIXME test runs out of memory')
for grid in product(*[grid_in] * dim):
fixed_model = loom.schema_pb2.CrossCat()
fixed_model.MergeFrom(cross_cat_base)
alphas = fixed_model.kinds[0].product_model.dd[0].alphas
assert len(alphas) == len(grid)
for i, alpha in enumerate(grid):
alphas[i] = alpha
fixed_models.append(fixed_model)
return cross_cat, fixed_models
def test_generate_model():
for feature_type in FEATURE_TYPES:
generate_model(10, feature_type)
def dump_model(model, model_name):
with open(model_name, 'wb') as f:
f.write(model.SerializeToString())
def generate_rows(object_count, feature_count, feature_type, density):
assert object_count > 0, object_count
assert feature_count > 0, feature_count
assert 0 <= density and density <= 1, density
# generate structure
feature_assignments = CLUSTERING.sample_assignments(feature_count)
kind_count = len(set(feature_assignments))
object_assignments = [
CLUSTERING.sample_assignments(object_count)
for _ in xrange(kind_count)
]
group_counts = [
len(set(assignments))
for assignments in object_assignments
]
# generate data
module = FEATURE_TYPES[feature_type]
shared = module.Shared.from_dict(module.EXAMPLES[0]['shared'])
def sampler_create():
group = module.Group()
group.init(shared)
sampler = module.Sampler()
sampler.init(shared, group)
return sampler
table = [[None] * feature_count for _ in xrange(object_count)]
for f, k in enumerate(feature_assignments):
samplers = [sampler_create() for _ in xrange(group_counts[k])]
for i, g in enumerate(object_assignments[k]):
if numpy.random.uniform() < density:
table[i][f] = samplers[g].eval(shared)
return table
def test_generate_rows():
for feature_type in FEATURE_TYPES:
table = generate_rows(100, 100, feature_type, 1.0)
assert_true(all(cell is not None for row in table for cell in row))
table = generate_rows(100, 100, feature_type, 0.0)
assert_true(all(cell is None for row in table for cell in row))
table = generate_rows(100, 100, feature_type, 0.5)
assert_true(any(cell is None for row in table for cell in row))
assert_true(any(cell is not None for row in table for cell in row))
def serialize_rows(table):
NONE = loom.schema_pb2.ProductValue.Observed.NONE
DENSE = loom.schema_pb2.ProductValue.Observed.DENSE
message = loom.schema_pb2.Row()
for i, values in enumerate(table):
message.Clear()
message.id = i
message.diff.neg.observed.sparsity = NONE
data = message.diff.pos
data.observed.sparsity = DENSE
for value in values:
data.observed.dense.append(value is not None)
if value is None:
pass
elif isinstance(value, bool):
data.booleans.append(value)
elif isinstance(value, int):
data.counts.append(value)
elif isinstance(value, float):
data.reals.append(value)
else:
raise ValueError('unknown value type: {}'.format(value))
yield message.SerializeToString()
def dump_rows(table, rows_name):
protobuf_stream_dump(serialize_rows(table), rows_name)
def test_dump_rows():
for feature_type in FEATURE_TYPES:
table = generate_rows(10, 10, feature_type, 0.5)
with tempdir():
rows_name = os.path.abspath('rows.pbs')
dump_rows(table, rows_name)
message = loom.schema_pb2.Row()
for string in protobuf_stream_load(rows_name):
message.ParseFromString(string)
# print message
def run_posterior_enum(casename, dataset, results, debug, sparsify=True):
if not sparsify:
loom.runner.posterior_enum(
config_in=dataset['config'],
rows_in=dataset['rows'],
model_in=dataset['model'],
samples_out=results['samples'],
debug=debug)
else:
loom.format.make_schema(
model_in=dataset['model'],
schema_out=results['schema'])
loom.format.make_schema_row(
schema_in=results['schema'],
schema_row_out=results['schema_row'])
loom.runner.tare(
schema_row_in=results['schema_row'],
rows_in=dataset['rows'],
tares_out=results['tares'],
debug=debug)
tare_count = sum(1 for _ in protobuf_stream_load(results['tares']))
if casename is not None and tare_count:
LOG('Info', casename, 'found {} tare rows'.format(tare_count))
loom.runner.sparsify(
schema_row_in=results['schema_row'],
tares_in=results['tares'],
rows_in=dataset['rows'],
rows_out=results['diffs'],
debug=debug)
loom.runner.posterior_enum(
config_in=dataset['config'],
rows_in=results['diffs'],
tares_in=results['tares'],
model_in=dataset['model'],
samples_out=results['samples'],
debug=debug)
def load_samples(filename):
message = loom.schema_pb2.PosteriorEnum.Sample()
for string in protobuf_stream_load(filename):
message.ParseFromString(string)
sample = parse_sample(message)
score = float(message.score)
yield sample, score
def generate_samples(casename, dataset, debug):
root = os.getcwd()
with tempdir(cleanup_on_error=(not debug)):
results = {
'schema': os.path.abspath('schema.json'),
'schema_row': os.path.abspath('schema_row.pb'),
'tares': os.path.abspath('tares.pbs'),
'diffs': os.path.abspath('diffs.pbs'),
'samples': os.path.abspath('samples.pbs.gz'),
}
os.chdir(root)
run_posterior_enum(casename, dataset, results, debug)
for sample in load_samples(results['samples']):
yield sample
def parse_sample(message):
return frozenset(
(
frozenset(kind.featureids),
frozenset(frozenset(group.rowids) for group in kind.groups)
)
for kind in message.kinds
)
def pretty_kind(kind):
featureids, groups = kind
return '{} |{}|'.format(
' '.join(imap(str, sorted(featureids))),
'|'.join(sorted(
' '.join(imap(str, sorted(group)))
for group in groups
))
)
def pretty_latent(latent):
return ' - '.join(sorted(pretty_kind(kind) for kind in latent))
# ----------------------------------------------------------------------------
# dataset suggestions
def enum_partitions(count):
if count == 0:
yield ()
elif count == 1:
yield ((1,),)
else:
for p in enum_partitions(count - 1):
yield p + ((count,),)
for i, part in enumerate(p):
yield p[:i] + (part + (count,),) + p[1 + i:]
BELL_NUMBERS = [
1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975, 678570, 4213597,
27644437, 190899322, 1382958545, 10480142147, 82864869804, 682076806159,
]
def test_enum_partitions():
for i, bell_number in enumerate(BELL_NUMBERS):
if bell_number < 1e6:
count = sum(1 for _ in enum_partitions(i))
assert_equal(count, bell_number)
def count_crosscats(rows, cols):
return sum(
BELL_NUMBERS[rows] ** len(kinds)
for kinds in enum_partitions(cols))
@parsable.command
def datasets(max_count=1000000):
'''
Suggest datasets based on bounded latent space size.
'''
enum_partitions
max_rows = 16
max_cols = 12
print '# Cross Cat Latent Space Sizes up to {}, generated by:'
print '# python {} datasets {}'.format(
os.path.basename(__file__),
max_count)
print 'LATENT_SIZES = ['
for rows in range(1 + max_rows):
counts = []
for cols in range(1 + max_cols):
count = count_crosscats(rows, cols)
if count > max_count:
break
counts.append(count)
if len(counts) > 1:
print ' [{}],'.format(', '.join(map(str, counts)))
print ']'
def test_datasets():
datasets(1000)
if __name__ == '__main__':
parsable.dispatch()
|
bsd-3-clause
| -1,779,413,843,091,918,600 | -8,809,772,729,358,689,000 | 31.491546 | 78 | 0.594692 | false |
TeamHG-Memex/frontera
|
examples/scripts/09_frontier_backends.py
|
8
|
1052
|
"""
Test different frontier backends
"""
from frontera import FrontierManager, Settings, FrontierTester, graphs
def test_logic(backend):
# Graph
graph = graphs.Manager('sqlite:///data/graph.db')
# Frontier
settings = Settings()
settings.BACKEND = backend
settings.LOGGING_MANAGER_ENABLED = True
settings.LOGGING_BACKEND_ENABLED = True
settings.LOGGING_DEBUGGING_ENABLED = False
settings.TEST_MODE = True
frontier = FrontierManager.from_settings(settings)
# Tester
tester = FrontierTester(frontier, graph)
tester.run(add_all_pages=True)
# Show crawling sequence
print '-'*80
print frontier.backend.name
print '-'*80
for page in tester.sequence:
print page.url
if __name__ == '__main__':
test_logic('frontera.contrib.backends.memory.FIFO')
test_logic('frontera.contrib.backends.memory.LIFO')
test_logic('frontera.contrib.backends.memory.BFS')
test_logic('frontera.contrib.backends.memory.DFS')
test_logic('frontera.contrib.backends.memory.RANDOM')
|
bsd-3-clause
| 6,539,914,074,791,331,000 | 9,039,355,916,287,384,000 | 28.222222 | 70 | 0.69962 | false |
bheesham/servo
|
tests/wpt/web-platform-tests/tools/html5lib/utils/entities.py
|
438
|
2734
|
import json
import html5lib
def parse(path="html5ents.xml"):
return html5lib.parse(open(path), treebuilder="lxml")
def entity_table(tree):
return dict((entity_name("".join(tr[0].xpath(".//text()"))),
entity_characters(tr[1].text))
for tr in tree.xpath("//h:tbody/h:tr",
namespaces={"h":"http://www.w3.org/1999/xhtml"}))
def entity_name(inp):
return inp.strip()
def entity_characters(inp):
return "".join(codepoint_to_character(item)
for item in inp.split()
if item)
def codepoint_to_character(inp):
return ("\U000"+inp[2:]).decode("unicode-escape")
def make_tests_json(entities):
test_list = make_test_list(entities)
tests_json = {"tests":
[make_test(*item) for item in test_list]
}
return tests_json
def make_test(name, characters, good):
return {
"description":test_description(name, good),
"input":"&%s"%name,
"output":test_expected(name, characters, good)
}
def test_description(name, good):
with_semicolon = name.endswith(";")
semicolon_text = {True:"with a semi-colon",
False:"without a semi-colon"}[with_semicolon]
if good:
text = "Named entity: %s %s"%(name, semicolon_text)
else:
text = "Bad named entity: %s %s"%(name, semicolon_text)
return text
def test_expected(name, characters, good):
rv = []
if not good or not name.endswith(";"):
rv.append("ParseError")
rv.append(["Character", characters])
return rv
def make_test_list(entities):
tests = []
for entity_name, characters in entities.items():
if entity_name.endswith(";") and not subentity_exists(entity_name, entities):
tests.append((entity_name[:-1], "&" + entity_name[:-1], False))
tests.append((entity_name, characters, True))
return sorted(tests)
def subentity_exists(entity_name, entities):
for i in range(1, len(entity_name)):
if entity_name[:-i] in entities:
return True
return False
def make_entities_code(entities):
entities_text = "\n".join(" \"%s\": u\"%s\","%(
name, entities[name].encode(
"unicode-escape").replace("\"", "\\\""))
for name in sorted(entities.keys()))
return """entities = {
%s
}"""%entities_text
def main():
entities = entity_table(parse())
tests_json = make_tests_json(entities)
json.dump(tests_json, open("namedEntities.test", "w"), indent=4)
code = make_entities_code(entities)
open("entities_constants.py", "w").write(code)
if __name__ == "__main__":
main()
|
mpl-2.0
| 1,334,094,881,731,253,800 | 4,464,310,609,499,948,000 | 30.068182 | 86 | 0.579371 | false |
yipenggao/moose
|
python/MooseDocs/extensions/admonition.py
|
4
|
3946
|
##pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import re
from markdown.blockprocessors import BlockProcessor
from MooseMarkdownExtension import MooseMarkdownExtension
from MooseMarkdownCommon import MooseMarkdownCommon
class AdmonitionExtension(MooseMarkdownExtension):
"""
Extension for creating admontion (e.g, warning, errors, info, etc.).
"""
@staticmethod
def defaultConfig():
"""
Default configuration options for SQAExtension
"""
config = MooseMarkdownExtension.defaultConfig()
return config
def extendMarkdown(self, md, md_globals):
"""
Adds components to AdmonitionExtension.
"""
md.registerExtension(self)
config = self.getConfigs()
md.parser.blockprocessors.add('moose_admonition',
AdmonitionBlock(markdown_instance=md, **config),
'_begin')
def makeExtension(*args, **kwargs): #pylint: disable=invalid-name
"""
Create SQAExtension
"""
return AdmonitionExtension(*args, **kwargs)
class AdmonitionBlock(MooseMarkdownCommon, BlockProcessor):
"""
Adds an admonition functionality using syntax similar to other MOOSE syntax.
"""
RE = re.compile(r'!admonition\s+'
r'(?P<command>info|note|important|warning|danger|error)\s*' # commands
r'(?P<title>[^\n]*?)' # optional title (any non newline)
r'(?P<settings>\w+=.*?)?' # optional settings
r'\n(?P<message>.*?)(?:\Z|\n{2,})', # message
flags=re.DOTALL|re.MULTILINE)
@staticmethod
def defaultSettings():
"""Settings for AdmonitionBlock"""
settings = MooseMarkdownCommon.defaultSettings()
return settings
def __init__(self, markdown_instance=None, **kwargs):
MooseMarkdownCommon.__init__(self, **kwargs)
BlockProcessor.__init__(self, markdown_instance.parser)
self.markdown = markdown_instance
def test(self, parent, block):
"""
Check that block contains the defined RE.
"""
return self.RE.search(block)
def run(self, parent, blocks):
"""
Create the collapsible region with the listed requirements.
"""
block = blocks.pop(0)
match = self.RE.search(block)
command = match.group('command')
title = match.group('title').strip()
message = match.group('message').strip()
self.createAdmonition(command, message, title=title, parent=parent)
|
lgpl-2.1
| -1,693,998,647,060,128,000 | 4,797,582,963,508,006,000 | 44.356322 | 100 | 0.469083 | false |
chentao/thrift
|
lib/py/src/transport/THttpClient.py
|
51
|
4464
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from io import BytesIO
import os
import socket
import sys
import warnings
from six.moves import urllib
from six.moves import http_client
from .TTransport import *
import six
class THttpClient(TTransportBase):
"""Http implementation of TTransport base."""
def __init__(self, uri_or_host, port=None, path=None):
"""THttpClient supports two different types constructor parameters.
THttpClient(host, port, path) - deprecated
THttpClient(uri)
Only the second supports https.
"""
if port is not None:
warnings.warn(
"Please use the THttpClient('http://host:port/path') syntax",
DeprecationWarning,
stacklevel=2)
self.host = uri_or_host
self.port = port
assert path
self.path = path
self.scheme = 'http'
else:
parsed = urllib.parse.urlparse(uri_or_host)
self.scheme = parsed.scheme
assert self.scheme in ('http', 'https')
if self.scheme == 'http':
self.port = parsed.port or http_client.HTTP_PORT
elif self.scheme == 'https':
self.port = parsed.port or http_client.HTTPS_PORT
self.host = parsed.hostname
self.path = parsed.path
if parsed.query:
self.path += '?%s' % parsed.query
self.__wbuf = BytesIO()
self.__http = None
self.__http_response = None
self.__timeout = None
self.__custom_headers = None
def open(self):
if self.scheme == 'http':
self.__http = http_client.HTTPConnection(self.host, self.port)
else:
self.__http = http_client.HTTPSConnection(self.host, self.port)
def close(self):
self.__http.close()
self.__http = None
self.__http_response = None
def isOpen(self):
return self.__http is not None
def setTimeout(self, ms):
if not hasattr(socket, 'getdefaulttimeout'):
raise NotImplementedError
if ms is None:
self.__timeout = None
else:
self.__timeout = ms / 1000.0
def setCustomHeaders(self, headers):
self.__custom_headers = headers
def read(self, sz):
return self.__http_response.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def __withTimeout(f):
def _f(*args, **kwargs):
orig_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(args[0].__timeout)
try:
result = f(*args, **kwargs)
finally:
socket.setdefaulttimeout(orig_timeout)
return result
return _f
def flush(self):
if self.isOpen():
self.close()
self.open()
# Pull data out of buffer
data = self.__wbuf.getvalue()
self.__wbuf = BytesIO()
# HTTP request
self.__http.putrequest('POST', self.path)
# Write headers
self.__http.putheader('Content-Type', 'application/x-thrift')
self.__http.putheader('Content-Length', str(len(data)))
if not self.__custom_headers or 'User-Agent' not in self.__custom_headers:
user_agent = 'Python/THttpClient'
script = os.path.basename(sys.argv[0])
if script:
user_agent = '%s (%s)' % (user_agent, urllib.parse.quote(script))
self.__http.putheader('User-Agent', user_agent)
if self.__custom_headers:
for key, val in six.iteritems(self.__custom_headers):
self.__http.putheader(key, val)
self.__http.endheaders()
# Write payload
self.__http.send(data)
# Get reply to flush the request
self.__http_response = self.__http.getresponse()
self.code = self.__http_response.status
self.message = self.__http_response.reason
self.headers = self.__http_response.msg
# Decorate if we know how to timeout
if hasattr(socket, 'getdefaulttimeout'):
flush = __withTimeout(flush)
|
apache-2.0
| -6,580,596,719,652,959,000 | 3,170,380,189,666,897,400 | 27.8 | 78 | 0.658826 | false |
anurag03/integration_tests
|
cfme/storage/object_store_object.py
|
1
|
6474
|
# -*- coding: utf-8 -*-
import attr
from navmazing import NavigateToSibling, NavigateToAttribute
from widgetastic.widget import View, Text, NoSuchElementException
from widgetastic_patternfly import BreadCrumb, Button, Dropdown
from cfme.base.ui import BaseLoggedInPage
from cfme.common import TagPageView, Taggable
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils.appliance.implementations.ui import CFMENavigateStep, navigator, navigate_to
from cfme.utils.providers import get_crud_by_name
from widgetastic_manageiq import (
Accordion, BaseEntitiesView, ItemsToolBarViewSelector, ManageIQTree, SummaryTable, Search)
class ObjectStoreObjectToolbar(View):
"""The toolbar on the Object Store Object page"""
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
download = Dropdown('Download')
view_selector = View.nested(ItemsToolBarViewSelector)
class ObjectStoreObjectDetailsToolbar(View):
"""The toolbar on the Object Store Object detail page"""
policy = Dropdown('Policy')
download = Button(title='Download summary in PDF format')
class ObjectStoreObjectDetailsEntities(View):
"""The entities on the Object Store Object detail page"""
breadcrumb = BreadCrumb()
properties = SummaryTable('Properties')
relationships = SummaryTable('Relationships')
smart_management = SummaryTable('Smart Management')
class ObjectStoreObjectDetailsSidebar(View):
"""The sidebar on the Object Store Object details page"""
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
class ObjectStoreObjectView(BaseLoggedInPage):
"""A base view for all the Object Store Object pages"""
title = Text('.//div[@id="center_div" or @id="main-content"]//h1')
@property
def in_object(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Storage', 'Object Storage',
'Object Store Objects'])
class ObjectStoreObjectAllView(ObjectStoreObjectView):
"""The all Object Store Object page"""
toolbar = View.nested(ObjectStoreObjectToolbar)
search = View.nested(Search)
including_entities = View.include(BaseEntitiesView, use_parent=True)
@property
def is_displayed(self):
return (
self.in_object and
self.title.text == 'Cloud Object Store Objects')
class ObjectStoreObjectDetailsView(ObjectStoreObjectView):
"""The detail Object Store Object page"""
@property
def is_displayed(self):
expected_title = '{} (Summary)'.format(self.context['object'].key)
return (
self.title.text == expected_title and
self.entities.breadcrumb.active_location == expected_title)
toolbar = View.nested(ObjectStoreObjectDetailsToolbar)
sidebar = View.nested(ObjectStoreObjectDetailsSidebar)
entities = View.nested(ObjectStoreObjectDetailsEntities)
@attr.s
class ObjectStoreObject(BaseEntity, Taggable):
""" Model of an Storage Object Store Object in cfme
Args:
key: key of the object.
provider: provider
"""
key = attr.ib()
provider = attr.ib()
@attr.s
class ObjectStoreObjectCollection(BaseCollection):
"""Collection object for the :py:class:'cfme.storage.object_store_object.ObjStoreObject' """
ENTITY = ObjectStoreObject
def all(self):
"""returning all Object Store Objects"""
view = navigate_to(self, 'All')
view.entities.paginator.set_items_per_page(500)
objects = []
try:
if 'provider'in self.filters:
for item in view.entities.elements.read():
if self.filters['provider'].name in item['Cloud Provider']:
objects.append(self.instantiate(key=item['Key'],
provider=self.filters['provider']))
else:
for item in view.entities.elements.read():
provider_name = item['Cloud Provider'].split()[0]
provider = get_crud_by_name(provider_name)
objects.append(self.instantiate(key=item['Key'], provider=provider))
return objects
except NoSuchElementException:
return None
def delete(self, *objects):
# TODO: capture flash message after BZ 1497113 resolve.
view = navigate_to(self, 'All')
for obj in objects:
try:
row = view.entities.paginator.find_row_on_pages(
view.entities.elements, key=obj.key)
row[0].check()
except NoSuchElementException:
raise ItemNotFound('Could not locate object {}'.format(obj.key))
view.toolbar.configuration.item_select('Remove Object Storage Objects',
handle_alert=True)
@navigator.register(ObjectStoreObjectCollection, 'All')
class ObjectStoreObjectAll(CFMENavigateStep):
VIEW = ObjectStoreObjectAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select(
'Storage', 'Object Storage', 'Object Store Objects')
def resetter(self):
self.view.toolbar.view_selector.select("List View")
@navigator.register(ObjectStoreObject, 'Details')
class ObjectStoreObjectDetails(CFMENavigateStep):
VIEW = ObjectStoreObjectDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
try:
# ToDo: use get_entity method as JS API issue (#2898) resolve.
row = self.prerequisite_view.entities.paginator.find_row_on_pages(
self.prerequisite_view.entities.elements, key=self.obj.key)
row[1].click()
except NoSuchElementException:
raise ItemNotFound('Could not locate object {}'.format(self.obj.key))
@navigator.register(ObjectStoreObject, 'EditTagsFromDetails')
class ObjectStoreObjectDetailEditTag(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
|
gpl-2.0
| -35,025,308,136,521,652 | 8,665,443,236,862,923,000 | 34.966667 | 96 | 0.666358 | false |
eroicaleo/LearningPython
|
interview/leet/394_Decode_String.py
|
1
|
2187
|
#!/usr/bin/env python3
import re
# s = "3[a]2[bc]", return "aaabcbc".
# s = "3[a2[c]]", return "accaccacc".
# s = "2[abc]3[cd]ef", return "abcabccdcdcdef".
class Solution:
def decodeString(self, s):
stack, n, t = [], 0, ''
for c in s:
if c.isdigit():
n = 10*n + int(c)
if t:
stack, t = stack + [t], ''
elif c == '[':
stack.append(n)
n, t = 0, ''
elif c.isalpha():
t += c
elif c == ']':
t = stack.pop() * t
if stack and isinstance(stack[-1], str):
t = stack.pop() + t
print(f'c = {c}, t = {t}, stack = {stack}')
return t
def decodeString_stefan(self, s):
while '[' in s:
s = re.sub(r'(\d+)\[([a-z]+)\]', lambda m: int(m.group(1)) * m.group(2), s)
print(s)
return s
def decodeString_recursive(self, s):
self.i, l = 0, len(s)
def helper():
n, t = 0, ''
while self.i < l:
c, self.i = s[self.i], self.i+1
if c.isdigit():
n = 10*n+int(c)
elif c.isalpha():
t += c
elif c == '[':
t += n*helper()
n = 0
elif c == ']':
break
print(f'I am returning {t}')
return t
return helper()
def decodeString_iter2(self, s):
stack = []
n, t = 0, ''
for c in s:
if c.isdigit():
n = 10*n+int(c)
elif c.isalpha():
t += c
elif c == '[':
stack += [n, t]
n, t = 0, ''
elif c == ']':
t, n = stack.pop()+stack.pop()*t, 0
return t
s = "2[abc]3[cd]ef"
s = "3[3[a]3[b]]"
s = "3[a]2[bc]"
s = "3[a2[c]]"
sol = Solution()
print(sol.decodeString(s))
print('Solution 2')
print(sol.decodeString_stefan(s))
print('Solution 3')
print(sol.decodeString_recursive(s))
print('Solution 4')
print(sol.decodeString_iter2(s))
|
mit
| -4,963,263,503,657,644,000 | -7,397,381,876,221,038,000 | 26.3375 | 87 | 0.38866 | false |
mastizada/kuma
|
vendor/packages/nose/nose/plugins/testid.py
|
29
|
9641
|
"""
This plugin adds a test id (like #1) to each test name output. After
you've run once to generate test ids, you can re-run individual
tests by activating the plugin and passing the ids (with or
without the # prefix) instead of test names.
For example, if your normal test run looks like::
% nosetests -v
tests.test_a ... ok
tests.test_b ... ok
tests.test_c ... ok
When adding ``--with-id`` you'll see::
% nosetests -v --with-id
#1 tests.test_a ... ok
#2 tests.test_b ... ok
#2 tests.test_c ... ok
Then you can re-run individual tests by supplying just an id number::
% nosetests -v --with-id 2
#2 tests.test_b ... ok
You can also pass multiple id numbers::
% nosetests -v --with-id 2 3
#2 tests.test_b ... ok
#3 tests.test_c ... ok
Since most shells consider '#' a special character, you can leave it out when
specifying a test id.
Note that when run without the -v switch, no special output is displayed, but
the ids file is still written.
Looping over failed tests
-------------------------
This plugin also adds a mode that will direct the test runner to record
failed tests. Subsequent test runs will then run only the tests that failed
last time. Activate this mode with the ``--failed`` switch::
% nosetests -v --failed
#1 test.test_a ... ok
#2 test.test_b ... ERROR
#3 test.test_c ... FAILED
#4 test.test_d ... ok
On the second run, only tests #2 and #3 will run::
% nosetests -v --failed
#2 test.test_b ... ERROR
#3 test.test_c ... FAILED
As you correct errors and tests pass, they'll drop out of subsequent runs.
First::
% nosetests -v --failed
#2 test.test_b ... ok
#3 test.test_c ... FAILED
Second::
% nosetests -v --failed
#3 test.test_c ... FAILED
When all tests pass, the full set will run on the next invocation.
First::
% nosetests -v --failed
#3 test.test_c ... ok
Second::
% nosetests -v --failed
#1 test.test_a ... ok
#2 test.test_b ... ok
#3 test.test_c ... ok
#4 test.test_d ... ok
.. note ::
If you expect to use ``--failed`` regularly, it's a good idea to always run
run using the ``--with-id`` option. This will ensure that an id file is
always created, allowing you to add ``--failed`` to the command line as soon
as you have failing tests. Otherwise, your first run using ``--failed`` will
(perhaps surprisingly) run *all* tests, because there won't be an id file
containing the record of failed tests from your previous run.
"""
__test__ = False
import logging
import os
from nose.plugins import Plugin
from nose.util import src, set
try:
from cPickle import dump, load
except ImportError:
from pickle import dump, load
log = logging.getLogger(__name__)
class TestId(Plugin):
"""
Activate to add a test id (like #1) to each test name output. Activate
with --failed to rerun failing tests only.
"""
name = 'id'
idfile = None
collecting = True
loopOnFailed = False
def options(self, parser, env):
"""Register commandline options.
"""
Plugin.options(self, parser, env)
parser.add_option('--id-file', action='store', dest='testIdFile',
default='.noseids', metavar="FILE",
help="Store test ids found in test runs in this "
"file. Default is the file .noseids in the "
"working directory.")
parser.add_option('--failed', action='store_true',
dest='failed', default=False,
help="Run the tests that failed in the last "
"test run.")
def configure(self, options, conf):
"""Configure plugin.
"""
Plugin.configure(self, options, conf)
if options.failed:
self.enabled = True
self.loopOnFailed = True
log.debug("Looping on failed tests")
self.idfile = os.path.expanduser(options.testIdFile)
if not os.path.isabs(self.idfile):
self.idfile = os.path.join(conf.workingDir, self.idfile)
self.id = 1
# Ids and tests are mirror images: ids are {id: test address} and
# tests are {test address: id}
self.ids = {}
self.tests = {}
self.failed = []
self.source_names = []
# used to track ids seen when tests is filled from
# loaded ids file
self._seen = {}
self._write_hashes = conf.verbosity >= 2
def finalize(self, result):
"""Save new ids file, if needed.
"""
if result.wasSuccessful():
self.failed = []
if self.collecting:
ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys()))))
else:
ids = self.ids
fh = open(self.idfile, 'wb')
dump({'ids': ids,
'failed': self.failed,
'source_names': self.source_names}, fh)
fh.close()
log.debug('Saved test ids: %s, failed %s to %s',
ids, self.failed, self.idfile)
def loadTestsFromNames(self, names, module=None):
"""Translate ids in the list of requested names into their
test addresses, if they are found in my dict of tests.
"""
log.debug('ltfn %s %s', names, module)
try:
fh = open(self.idfile, 'rb')
data = load(fh)
if 'ids' in data:
self.ids = data['ids']
self.failed = data['failed']
self.source_names = data['source_names']
else:
# old ids field
self.ids = data
self.failed = []
self.source_names = names
if self.ids:
self.id = max(self.ids) + 1
self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys()))))
else:
self.id = 1
log.debug(
'Loaded test ids %s tests %s failed %s sources %s from %s',
self.ids, self.tests, self.failed, self.source_names,
self.idfile)
fh.close()
except IOError:
log.debug('IO error reading %s', self.idfile)
if self.loopOnFailed and self.failed:
self.collecting = False
names = self.failed
self.failed = []
# I don't load any tests myself, only translate names like '#2'
# into the associated test addresses
translated = []
new_source = []
really_new = []
for name in names:
trans = self.tr(name)
if trans != name:
translated.append(trans)
else:
new_source.append(name)
# names that are not ids and that are not in the current
# list of source names go into the list for next time
if new_source:
new_set = set(new_source)
old_set = set(self.source_names)
log.debug("old: %s new: %s", old_set, new_set)
really_new = [s for s in new_source
if not s in old_set]
if really_new:
# remember new sources
self.source_names.extend(really_new)
if not translated:
# new set of source names, no translations
# means "run the requested tests"
names = new_source
else:
# no new names to translate and add to id set
self.collecting = False
log.debug("translated: %s new sources %s names %s",
translated, really_new, names)
return (None, translated + really_new or names)
def makeName(self, addr):
log.debug("Make name %s", addr)
filename, module, call = addr
if filename is not None:
head = src(filename)
else:
head = module
if call is not None:
return "%s:%s" % (head, call)
return head
def setOutputStream(self, stream):
"""Get handle on output stream so the plugin can print id #s
"""
self.stream = stream
def startTest(self, test):
"""Maybe output an id # before the test name.
Example output::
#1 test.test ... ok
#2 test.test_two ... ok
"""
adr = test.address()
log.debug('start test %s (%s)', adr, adr in self.tests)
if adr in self.tests:
if adr in self._seen:
self.write(' ')
else:
self.write('#%s ' % self.tests[adr])
self._seen[adr] = 1
return
self.tests[adr] = self.id
self.write('#%s ' % self.id)
self.id += 1
def afterTest(self, test):
# None means test never ran, False means failed/err
if test.passed is False:
try:
key = str(self.tests[test.address()])
except KeyError:
# never saw this test -- startTest didn't run
pass
else:
if key not in self.failed:
self.failed.append(key)
def tr(self, name):
log.debug("tr '%s'", name)
try:
key = int(name.replace('#', ''))
except ValueError:
return name
log.debug("Got key %s", key)
# I'm running tests mapped from the ids file,
# not collecting new ones
if key in self.ids:
return self.makeName(self.ids[key])
return name
def write(self, output):
if self._write_hashes:
self.stream.write(output)
|
mpl-2.0
| -4,487,236,496,559,780,000 | 2,424,506,929,699,981,300 | 30.506536 | 92 | 0.552743 | false |
rabipanda/tensorflow
|
tensorflow/contrib/distributions/python/kernel_tests/cauchy_test.py
|
33
|
16857
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cauchy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.contrib.distributions.python.ops import cauchy as cauchy_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class CauchyTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(tensor.eval())
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
with self.test_session():
param_shapes = cauchy_lib.Cauchy.param_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, loc_shape.eval())
self.assertAllEqual(expected, scale_shape.eval())
loc = array_ops.zeros(loc_shape)
scale = array_ops.ones(scale_shape)
self.assertAllEqual(expected,
array_ops.shape(
cauchy_lib.Cauchy(loc, scale).sample()).eval())
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = cauchy_lib.Cauchy.param_static_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, loc_shape)
self.assertEqual(expected, scale_shape)
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
def testCauchyLogPDF(self):
with self.test_session():
batch_size = 6
loc = constant_op.constant([3.0] * batch_size)
scale = constant_op.constant([np.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.eval().shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testCauchyLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant(
[[np.sqrt(10.0), np.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf_values.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testCauchyCDF(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).cdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0)
def testCauchySurvivalFunction(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).sf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0)
def testCauchyLogCDF(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.log_cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).logcdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0, rtol=1e-5)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
loc = variables.Variable(dtype(0.0))
scale = variables.Variable(dtype(1.0))
dist = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [loc, scale])
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testCauchyLogSurvivalFunction(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.log_survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).logsf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0, rtol=1e-5)
def testCauchyEntropy(self):
with self.test_session():
loc = np.array([1.0, 1.0, 1.0])
scale = np.array([[1.0, 2.0, 3.0]])
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
entropy = cauchy.entropy()
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), entropy.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
entropy.eval().shape)
self.assertAllEqual(cauchy.batch_shape, entropy.shape)
self.assertAllEqual(cauchy.batch_shape, entropy.eval().shape)
if not stats:
return
expected_entropy = stats.cauchy(loc, scale[0]).entropy().reshape((1, 3))
self.assertAllClose(expected_entropy, entropy.eval())
def testCauchyMode(self):
with self.test_session():
# Mu will be broadcast to [7, 7, 7].
loc = [7.]
scale = [11., 12., 13.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mode().shape)
self.assertAllEqual([7., 7, 7], cauchy.mode().eval())
def testCauchyMean(self):
with self.test_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mean().shape)
self.assertAllEqual([np.nan] * 3, cauchy.mean().eval())
def testCauchyNanMean(self):
with self.test_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.mean().eval()
def testCauchyQuantile(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
p = np.linspace(0.000001, 0.999999, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = cauchy.quantile(p)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.eval().shape)
self.assertAllEqual(cauchy.batch_shape, x.shape)
self.assertAllEqual(cauchy.batch_shape, x.eval().shape)
if not stats:
return
expected_x = stats.cauchy(loc, scale).ppf(p)
self.assertAllClose(expected_x, x.eval(), atol=0.)
def testCauchyVariance(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.variance().shape)
self.assertAllEqual([np.nan] * 3, cauchy.variance().eval())
def testCauchyNanVariance(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.variance().eval()
def testCauchyStandardDeviation(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.stddev().shape)
self.assertAllEqual([np.nan] * 3, cauchy.stddev().eval())
def testCauchyNanStandardDeviation(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.stddev().eval()
def testCauchySample(self):
with self.test_session():
loc = constant_op.constant(3.0)
scale = constant_op.constant(1.0)
loc_v = 3.0
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(np.median(sample_values), loc_v, atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchySampleMultiDimensional(self):
with self.test_session():
batch_size = 2
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant([[0.5, 1.0]] * batch_size)
loc_v = [3.0, -3.0]
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(samples.shape, (100000, batch_size, 2))
self.assertAllClose(
np.median(sample_values[:, 0, 0]), loc_v[0], atol=1e-1)
self.assertAllClose(
np.median(sample_values[:, 0, 1]), loc_v[1], atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchyNegativeLocFails(self):
with self.test_session():
cauchy = cauchy_lib.Cauchy(loc=[1.], scale=[-5.], validate_args=True)
with self.assertRaisesOpError("Condition x > 0 did not hold"):
cauchy.mode().eval()
def testCauchyShape(self):
with self.test_session():
loc = constant_op.constant([-3.0] * 5)
scale = constant_op.constant(11.0)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertEqual(cauchy.batch_shape_tensor().eval(), [5])
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertEqual(cauchy.event_shape, tensor_shape.TensorShape([]))
def testCauchyShapeWithPlaceholders(self):
loc = array_ops.placeholder(dtype=dtypes.float32)
scale = array_ops.placeholder(dtype=dtypes.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
with self.test_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(cauchy.event_shape, ())
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertAllEqual(
sess.run(
cauchy.batch_shape_tensor(),
feed_dict={
loc: 5.0,
scale: [1.0, 2.0]
}), [2])
if __name__ == "__main__":
test.main()
|
apache-2.0
| -2,444,699,564,891,437,600 | 1,302,528,796,115,885,000 | 37.486301 | 80 | 0.649938 | false |
pwhelan/djshouts
|
django/db/backends/postgresql_psycopg2/base.py
|
239
|
8346
|
"""
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import sys
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.postgresql.operations import DatabaseOperations as PostgresqlDatabaseOperations
from django.db.backends.postgresql.client import DatabaseClient
from django.db.backends.postgresql.creation import DatabaseCreation
from django.db.backends.postgresql.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.utils.safestring import SafeUnicode, SafeString
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeUnicode, psycopg2.extensions.QuotedString)
class CursorWrapper(object):
"""
A thin wrapper around psycopg2's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
"""
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = False
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
class DatabaseOperations(PostgresqlDatabaseOperations):
def last_executed_query(self, cursor, sql, params):
# With psycopg2, cursor objects have a "query" attribute that is the
# exact query sent to the database. See docs here:
# http://www.initd.org/tracker/psycopg/wiki/psycopg2_documentation#postgresql-status-message-and-executed-query
return cursor.query
def return_insert_id(self):
return "RETURNING %s", ()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
autocommit = self.settings_dict["OPTIONS"].get('autocommit', False)
self.features.uses_autocommit = autocommit
self._set_isolation_level(int(not autocommit))
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
new_connection = False
set_tz = False
settings_dict = self.settings_dict
if self.connection is None:
new_connection = True
set_tz = settings_dict.get('TIME_ZONE')
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify NAME in your Django settings file.")
conn_params = {
'database': settings_dict['NAME'],
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = settings_dict['PASSWORD']
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
self.connection = Database.connect(**conn_params)
self.connection.set_client_encoding('UTF8')
self.connection.set_isolation_level(self.isolation_level)
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
cursor.tzinfo_factory = None
if new_connection:
if set_tz:
cursor.execute("SET TIME ZONE %s", [settings_dict['TIME_ZONE']])
if not hasattr(self, '_version'):
self.__class__._version = get_version(cursor)
if self._version[0:2] < (8, 0):
# No savepoint support for earlier version of PostgreSQL.
self.features.uses_savepoints = False
if self.features.uses_autocommit:
if self._version[0:2] < (8, 2):
# FIXME: Needs extra code to do reliable model insert
# handling, so we forbid it for now.
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You cannot use autocommit=True with PostgreSQL prior to 8.2 at the moment.")
else:
# FIXME: Eventually we're enable this by default for
# versions that support it, but, right now, that's hard to
# do without breaking other things (#10509).
self.features.can_return_id_from_insert = True
return CursorWrapper(cursor)
def _enter_transaction_management(self, managed):
"""
Switch the isolation level when needing transaction support, so that
the same transaction is visible across all the queries.
"""
if self.features.uses_autocommit and managed and not self.isolation_level:
self._set_isolation_level(1)
def _leave_transaction_management(self, managed):
"""
If the normal operating mode is "autocommit", switch back to that when
leaving transaction management.
"""
if self.features.uses_autocommit and not managed and self.isolation_level:
self._set_isolation_level(0)
def _set_isolation_level(self, level):
"""
Do all the related feature configurations for changing isolation
levels. This doesn't touch the uses_autocommit feature, since that
controls the movement *between* isolation levels.
"""
assert level in (0, 1)
try:
if self.connection is not None:
self.connection.set_isolation_level(level)
finally:
self.isolation_level = level
self.features.uses_savepoints = bool(level)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
|
bsd-3-clause
| -7,546,225,972,495,199,000 | 6,675,312,040,933,442,000 | 40.316832 | 124 | 0.6318 | false |
legrosbuffle/or-tools
|
examples/python/coins3.py
|
7
|
2902
|
# Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Coin application in Google CP Solver.
From 'Constraint Logic Programming using ECLiPSe'
pages 99f and 234 ff.
The solution in ECLiPSe is at page 236.
'''
What is the minimum number of coins that allows one to pay _exactly_
any amount smaller than one Euro? Recall that there are six different
euro cents, of denomination 1, 2, 5, 10, 20, 50
'''
Compare with the following models:
* MiniZinc: http://hakank.org/minizinc/coins3.mzn
* Comet : http://www.hakank.org/comet/coins3.co
* Gecode : http://hakank.org/gecode/coins3.cpp
* SICStus : http://hakank.org/sicstus/coins3.pl
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver("Coins")
#
# data
#
n = 6 # number of different coins
variables = [1, 2, 5, 10, 25, 50]
# declare variables
x = [solver.IntVar(0, 99, "x%i" % i) for i in range(n)]
num_coins = solver.IntVar(0, 99, "num_coins")
#
# constraints
#
# number of used coins, to be minimized
solver.Add(num_coins == solver.Sum(x))
# Check that all changes from 1 to 99 can be made.
for j in range(1, 100):
tmp = [solver.IntVar(0, 99, "b%i" % i) for i in range(n)]
solver.Add(solver.ScalProd(tmp, variables) == j)
[solver.Add(tmp[i] <= x[i]) for i in range(n)]
# objective
objective = solver.Minimize(num_coins, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
solution.Add(num_coins)
solution.AddObjective(num_coins)
db = solver.Phase(x,
solver.CHOOSE_MIN_SIZE_LOWEST_MAX,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
print("x: ", [x[i].Value() for i in range(n)])
print("num_coins:", num_coins.Value())
print()
num_solutions += 1
solver.EndSearch()
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
if __name__ == "__main__":
main()
|
apache-2.0
| -6,510,479,510,290,095,000 | 3,123,263,142,440,691,000 | 26.377358 | 74 | 0.676085 | false |
xujb/odoo
|
addons/account_payment/account_move_line.py
|
241
|
4455
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from operator import itemgetter
class account_move_line(osv.osv):
_inherit = "account.move.line"
# delegate to parent, used for local fields.function redefinition
def _amount_to_pay(self, cr, uid, ids, field_names, args, context=None):
return {
id: value['amount_residual']
for id, value in self._amount_residual(cr, uid, ids, field_names, args,
context=context).items()
}
def _to_pay_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
line_obj = self.pool.get('account.move.line')
query = line_obj._query_get(cr, uid, context={})
where = ' and '.join(map(lambda x: '''(SELECT
CASE WHEN l.amount_currency < 0
THEN - l.amount_currency
ELSE l.credit
END - coalesce(sum(pl.amount_currency), 0)
FROM payment_line pl
INNER JOIN payment_order po ON (pl.order_id = po.id)
WHERE move_line_id = l.id
AND po.state != 'cancel'
) %(operator)s %%s ''' % {'operator': x[1]}, args))
sql_args = tuple(map(itemgetter(2), args))
cr.execute(('''SELECT id
FROM account_move_line l
WHERE account_id IN (select id
FROM account_account
WHERE type=%s AND active)
AND reconcile_id IS null
AND credit > 0
AND ''' + where + ' and ' + query), ('payable',)+sql_args )
res = cr.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', map(lambda x:x[0], res))]
def line2bank(self, cr, uid, ids, payment_type=None, context=None):
"""
Try to return for each Ledger Posting line a corresponding bank
account according to the payment type. This work using one of
the bank of the partner defined on the invoice eventually
associated to the line.
Return the first suitable bank for the corresponding partner.
"""
payment_mode_obj = self.pool.get('payment.mode')
line2bank = {}
if not ids:
return {}
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type,
context=context)
for line in self.browse(cr, uid, ids, context=context):
line2bank[line.id] = False
if line.invoice and line.invoice.partner_bank_id:
line2bank[line.id] = line.invoice.partner_bank_id.id
elif line.partner_id:
if not line.partner_id.bank_ids:
line2bank[line.id] = False
else:
for bank in line.partner_id.bank_ids:
if bank.state in bank_type:
line2bank[line.id] = bank.id
break
if not line2bank.get(line.id) and line.partner_id.bank_ids:
line2bank[line.id] = line.partner_id.bank_ids[0].id
else:
raise osv.except_osv(_('Error!'), _('There is no partner defined on the entry line.'))
return line2bank
_columns = {
'amount_to_pay': fields.function(_amount_to_pay,
type='float', string='Amount to pay', fnct_search=_to_pay_search),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -1,281,225,873,491,469,300 | 4,937,682,041,157,978,000 | 41.028302 | 102 | 0.562963 | false |
rahushen/ansible
|
lib/ansible/utils/module_docs_fragments/k8s_state_options.py
|
80
|
1411
|
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Options for specifying object state
class ModuleDocFragment(object):
DOCUMENTATION = '''
options:
state:
description:
- Determines if an object should be created, patched, or deleted. When set to C(present), an object will be
created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to
C(present), an existing object will be patched, if its attributes differ from those specified using
I(resource_definition) or I(src).
default: present
choices:
- present
- absent
force:
description:
- If set to C(True), and I(state) is C(present), an existing object will be replaced.
default: false
type: bool
'''
|
gpl-3.0
| 4,789,000,591,914,117,000 | -3,694,319,857,100,118,500 | 33.414634 | 111 | 0.720765 | false |
soxofaan/luigi
|
test/simulate_test.py
|
13
|
2971
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
import luigi
from luigi.contrib.simulate import RunAnywayTarget
from multiprocessing import Process
import os
import tempfile
def temp_dir():
return os.path.join(tempfile.gettempdir(), 'luigi-simulate')
def is_writable():
d = temp_dir()
fn = os.path.join(d, 'luigi-simulate-write-test')
exists = True
try:
try:
os.makedirs(d)
except OSError:
pass
open(fn, 'w').close()
os.remove(fn)
except BaseException:
exists = False
return unittest.skipIf(not exists, 'Can\'t write to temporary directory')
class TaskA(luigi.Task):
i = luigi.IntParameter(default=0)
def output(self):
return RunAnywayTarget(self)
def run(self):
fn = os.path.join(temp_dir(), 'luigi-simulate-test.tmp')
try:
os.makedirs(os.path.dirname(fn))
except OSError:
pass
with open(fn, 'a') as f:
f.write('{0}={1}\n'.format(self.__class__.__name__, self.i))
self.output().done()
class TaskB(TaskA):
def requires(self):
return TaskA(i=10)
class TaskC(TaskA):
def requires(self):
return TaskA(i=5)
class TaskD(TaskA):
def requires(self):
return [TaskB(), TaskC(), TaskA(i=20)]
class TaskWrap(luigi.WrapperTask):
def requires(self):
return [TaskA(), TaskD()]
def reset():
# Force tasks to be executed again (because multiple pipelines are executed inside of the same process)
t = TaskA().output()
with t.unique.get_lock():
t.unique.value = 0
class RunAnywayTargetTest(unittest.TestCase):
@is_writable()
def test_output(self):
reset()
fn = os.path.join(temp_dir(), 'luigi-simulate-test.tmp')
luigi.build([TaskWrap()], local_scheduler=True)
with open(fn, 'r') as f:
data = f.read().strip().split('\n')
data.sort()
reference = ['TaskA=0', 'TaskA=10', 'TaskA=20', 'TaskA=5', 'TaskB=0', 'TaskC=0', 'TaskD=0']
reference.sort()
os.remove(fn)
self.assertEqual(data, reference)
@is_writable()
def test_output_again(self):
# Running the test in another process because the PID is used to determine if the target exists
p = Process(target=self.test_output)
p.start()
p.join()
|
apache-2.0
| 2,168,885,675,269,276,000 | 5,924,287,187,561,095,000 | 24.393162 | 107 | 0.629081 | false |
aviaryan/pythons
|
TheHyliaSoundtrack/hylia_s.py
|
1
|
1588
|
from bs4 import BeautifulSoup
from urllib.request import urlopen
from platform import subprocess
# if genlist = 0, then this script downloads the files, the cmd_downloader variable comes into play
# if genlist = 1, then this script generates a list.txt file containing direct links to music files in the working directory
# the list.txt can be imported in any download manager like IDM , FDM etc to download all files at once with full speed
genlist = 1
cmd_downloader = 'aria2c -x 8 -s 8 -k 3M'
# example of url : http://anime.thehylia.com/soundtracks/album/death-note-original-soundtrack
def run():
url = input('url of soundtrack album \n> ')
response = urlopen(url)
data = response.read()
soup = BeautifulSoup(data, 'lxml') # HTML.parser fails, smart technique hylia
# open('list.html', 'w').write(data.decode())
getsongs( soup.body.find_all('a') )
def getsongs( tds ):
downlist = ''
cur = 1
for i in tds:
link = i['href']
if not ismp3(link):
continue
# download song
response = urlopen(link)
songdata = response.read()
songsoup = BeautifulSoup(songdata, 'lxml')
links = songsoup.body.find_all('a')
for dlink in links:
if not ismp3(dlink['href']):
continue
print('Downloading song #' + str(cur))
if genlist:
downlist += dlink['href'] + '\n'
else:
subprocess.call(cmd_downloader + ' ' + dlink['href'])
break # ehh
cur += 1
if genlist:
open('list.txt', 'w').write(downlist)
def ismp3(link):
if len(link) < 5:
return False
if link[-4:] != '.mp3':
return False
return True
if __name__ == '__main__':
run()
|
apache-2.0
| 1,989,442,349,989,927,700 | -4,319,191,057,958,765,000 | 23.828125 | 124 | 0.677582 | false |
chipx86/reviewboard
|
reviewboard/diffviewer/tests/test_forms.py
|
2
|
35354
|
from __future__ import unicode_literals
import base64
import json
import nose
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test.client import RequestFactory
from django.utils import six
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.filesystem import is_exe_in_path
from kgb import SpyAgency
from reviewboard.diffviewer.diffutils import (get_original_file,
get_patched_file,
patch)
from reviewboard.diffviewer.errors import (DiffParserError, DiffTooBigError,
EmptyDiffError)
from reviewboard.diffviewer.forms import (UploadCommitForm, UploadDiffForm,
ValidateCommitForm)
from reviewboard.diffviewer.models import DiffSet, DiffSetHistory
from reviewboard.scmtools.errors import FileNotFoundError
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.testing import TestCase
class UploadCommitFormTests(SpyAgency, TestCase):
"""Unit tests for UploadCommitForm."""
fixtures = ['test_scmtools']
_default_form_data = {
'base_commit_id': '1234',
'basedir': '/',
'commit_id': 'r1',
'parent_id': 'r0',
'commit_message': 'Message',
'author_name': 'Author',
'author_email': '[email protected]',
'author_date': '1970-01-01 00:00:00+0000',
'committer_name': 'Committer',
'committer_email': '[email protected]',
'committer_date': '1970-01-01 00:00:00+0000',
}
def setUp(self):
super(UploadCommitFormTests, self).setUp()
self.repository = self.create_repository(tool_name='Git')
self.spy_on(self.repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
self.diffset = DiffSet.objects.create_empty(repository=self.repository)
def test_create(self):
"""Testing UploadCommitForm.create"""
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=self._default_form_data.copy(),
files={
'diff': diff,
})
self.assertTrue(form.is_valid())
commit = form.create()
self.assertEqual(self.diffset.files.count(), 1)
self.assertEqual(self.diffset.commits.count(), 1)
self.assertEqual(commit.files.count(), 1)
self.assertEqual(set(self.diffset.files.all()),
set(commit.files.all()))
def test_clean_parent_diff_path(self):
"""Testing UploadCommitForm.clean() for a subsequent commit with a
parent diff
"""
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
parent_diff = SimpleUploadedFile('parent_diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=self._default_form_data.copy(),
files={
'diff': diff,
'parent_diff': parent_diff,
})
self.assertTrue(form.is_valid())
form.create()
form = UploadCommitForm(
diffset=self.diffset,
data=dict(
self._default_form_data,
**{
'parent_id': 'r1',
'commit_id': 'r2',
}
),
files={
'diff': diff,
'parent_diff': parent_diff,
})
self.assertTrue(form.is_valid())
self.assertNotIn('parent_diff', form.errors)
def test_clean_published_diff(self):
"""Testing UploadCommitForm.clean() for a DiffSet that has already been
published
"""
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=self._default_form_data,
files={
'diff': diff,
})
self.assertTrue(form.is_valid())
form.create()
self.diffset.history = DiffSetHistory.objects.create()
self.diffset.save(update_fields=('history_id',))
form = UploadCommitForm(
diffset=self.diffset,
data=dict(
self._default_form_data,
parent_id='r1',
commit_id='r0',
),
files={
'diff_path': SimpleUploadedFile(
'diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch'),
})
self.assertFalse(form.is_valid())
self.assertNotEqual(form.non_field_errors, [])
def test_clean_author_date(self):
"""Testing UploadCommitForm.clean_author_date"""
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=dict(self._default_form_data, **{
'author_date': 'Jan 1 1970',
}),
files={
'diff': diff,
})
self.assertFalse(form.is_valid())
self.assertIn('author_date', form.errors)
def test_clean_committer_date(self):
"""Testing UploadCommitForm.clean_committer_date"""
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=dict(self._default_form_data, **{
'committer_date': 'Jun 1 1970',
}),
files={
'diff': diff,
})
self.assertFalse(form.is_valid())
self.assertIn('committer_date', form.errors)
def test_clean_no_committer(self):
"""Testing UploadCommitForm.clean when no committer_ fields are present
"""
field_names = {
'committer_date',
'committer_email',
'committer_name',
}
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form_data = self._default_form_data.copy()
for field in field_names:
del form_data[field]
form = UploadCommitForm(
diffset=self.diffset,
data=form_data,
files={
'diff': diff,
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
field: ['This field is required.']
for field in field_names
})
def test_clean_commiter_unsupported(self):
"""Testing UploadCommitForm.clean when committer_ fields are present
for a SCMTool that doesn't support them
"""
if not is_exe_in_path('hg'):
raise nose.SkipTest('Hg is not installed')
self.repository.tool = Tool.objects.get(name='Mercurial')
self.repository.save()
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=self._default_form_data.copy(),
files={
'diff': diff,
})
self.assertTrue(form.is_valid())
self.assertNotIn('committer_date', form.cleaned_data)
self.assertNotIn('committer_email', form.cleaned_data)
self.assertNotIn('committer_name', form.cleaned_data)
class UploadDiffFormTests(SpyAgency, TestCase):
"""Unit tests for UploadDiffForm."""
fixtures = ['test_scmtools']
def test_create(self):
"""Testing UploadDiffForm.create"""
diff_file = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
form = UploadDiffForm(
repository=repository,
data={
'basedir': '/',
'base_commit_id': '1234',
},
files={
'path': diff_file,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
self.assertEqual(diffset.basedir, '/')
self.assertEqual(diffset.base_commit_id, '1234')
def test_create_filters_parent_diffs(self):
"""Testing UploadDiffForm.create filters parent diff files"""
saw_file_exists = {}
def get_file_exists(repository, filename, revision, *args, **kwargs):
saw_file_exists[(filename, revision)] = True
return True
parent_diff_1 = (
b'diff --git a/README b/README\n'
b'index d6613f4..5b50865 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -2 +2 @@\n'
b'-blah..\n'
b'+blah blah\n'
)
parent_diff_2 = (
b'diff --git a/UNUSED b/UNUSED\n'
b'index 1234567..5b50866 100644\n'
b'--- UNUSED\n'
b'+++ UNUSED\n'
b'@@ -1,1 +1,1 @@\n'
b'-foo\n'
b'+bar\n'
)
parent_diff = parent_diff_1 + parent_diff_2
diff_file = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
parent_diff_file = SimpleUploadedFile('parent_diff', parent_diff,
content_type='text/x-patch')
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists, call_fake=get_file_exists)
form = UploadDiffForm(
repository=repository,
data={
'basedir': '/',
},
files={
'path': diff_file,
'parent_diff_path': parent_diff_file,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
filediff = diffset.files.get()
self.assertEqual(filediff.diff, self.DEFAULT_GIT_FILEDIFF_DATA_DIFF)
self.assertEqual(filediff.parent_diff, parent_diff_1)
self.assertIn(('/README', 'd6613f4'), saw_file_exists)
self.assertNotIn(('/UNUSED', '1234567'), saw_file_exists)
self.assertEqual(len(saw_file_exists), 1)
def test_create_with_parser_get_orig_commit_id(self):
"""Testing UploadDiffForm.create uses correct base revision returned
by DiffParser.get_orig_commit_id
"""
if not is_exe_in_path('hg'):
raise nose.SkipTest('Hg is not installed')
diff = (
b'# Node ID a6fc203fee9091ff9739c9c00cd4a6694e023f48\n'
b'# Parent 7c4735ef51a7c665b5654f1a111ae430ce84ebbd\n'
b'diff --git a/doc/readme b/doc/readme\n'
b'--- a/doc/readme\n'
b'+++ b/doc/readme\n'
b'@@ -1,3 +1,3 @@\n'
b' Hello\n'
b'-\n'
b'+...\n'
b' goodbye\n'
)
parent_diff = (
b'# Node ID 7c4735ef51a7c665b5654f1a111ae430ce84ebbd\n'
b'# Parent 661e5dd3c4938ecbe8f77e2fdfa905d70485f94c\n'
b'diff --git a/doc/newfile b/doc/newfile\n'
b'new file mode 100644\n'
b'--- /dev/null\n'
b'+++ b/doc/newfile\n'
b'@@ -0,0 +1,1 @@\n'
b'+Lorem ipsum\n'
)
diff_file = SimpleUploadedFile('diff', diff,
content_type='text/x-patch')
parent_diff_file = SimpleUploadedFile('parent_diff', parent_diff,
content_type='text/x-patch')
repository = Repository.objects.create(
name='Test HG',
path='scmtools/testdata/hg_repo',
tool=Tool.objects.get(name='Mercurial'))
form = UploadDiffForm(
repository=repository,
files={
'path': diff_file,
'parent_diff_path': parent_diff_file,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
filediff = diffset.files.get()
self.assertEqual(filediff.source_revision,
'661e5dd3c4938ecbe8f77e2fdfa905d70485f94c')
def test_create_with_parent_filediff_with_move_and_no_change(self):
"""Testing UploadDiffForm.create with a parent diff consisting only
of a move/rename without content change
"""
revisions = [
b'93e6b3e8944c48737cb11a1e52b046fa30aea7a9',
b'4839fc480f47ca59cf05a9c39410ea744d1e17a2',
]
parent_diff = SimpleUploadedFile(
'parent_diff',
(b'diff --git a/foo b/bar\n'
b'similarity index 100%%\n'
b'rename from foo\n'
b'rename to bar\n'),
content_type='text/x-patch')
diff = SimpleUploadedFile(
'diff',
(b'diff --git a/bar b/bar\n'
b'index %s..%s 100644\n'
b'--- a/bar\n'
b'+++ b/bar\n'
b'@@ -1,2 +1,3 @@\n'
b' Foo\n'
b'+Bar\n') % (revisions[0], revisions[1]),
content_type='text/x-patch')
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
# We will only be making one call to get_file and we can fake it out.
self.spy_on(repository.get_file,
call_fake=lambda *args, **kwargs: b'Foo\n')
self.spy_on(patch)
form = UploadDiffForm(
repository=repository,
data={
'basedir': '/',
},
files={
'path': diff,
'parent_diff_path': parent_diff,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
f = diffset.files.get()
self.assertEqual(f.source_revision, revisions[0].decode('utf-8'))
self.assertEqual(f.dest_detail, revisions[1].decode('utf-8'))
# We shouldn't call out to patch because the parent diff is just a
# rename.
original_file = get_original_file(filediff=f,
request=None,
encoding_list=['ascii'])
self.assertEqual(original_file, b'Foo\n')
self.assertFalse(patch.spy.called)
patched_file = get_patched_file(source_data=original_file,
filediff=f)
self.assertEqual(patched_file, b'Foo\nBar\n')
self.assertTrue(patch.spy.called)
def test_create_with_parent_filediff_with_move_and_change(self):
"""Testing UploadDiffForm.create with a parent diff consisting of a
move/rename with content change
"""
revisions = [
b'5d36b88bb697a2d778f024048bafabd443d74503',
b'9b32edcd37a88c6ada91efc562afa637ccfdad36',
b'8a567d328293f85d68332bc693b0a98869b23b47',
]
parent_diff = SimpleUploadedFile(
'parent_diff',
(b'diff --git a/foo b/bar\n'
b'similarity index 55%%\n'
b'rename from foo\n'
b'rename to bar\n'
b'index %s..%s 100644\n'
b'--- a/foo\n'
b'+++ b/bar\n'
b'@@ -1,2 +1,3 @@\n'
b' Foo\n'
b'+Bar\n') % (revisions[0], revisions[1]),
content_type='text/x-patch')
diff = SimpleUploadedFile(
'diff',
(b'diff --git a/bar b/bar\n'
b'index %s..%s 100644\n'
b'--- a/bar\n'
b'+++ b/bar\n'
b'@@ -1,3 +1,4 @@\n'
b' Foo\n'
b' Bar\n'
b'+Baz\n') % (revisions[1], revisions[2]),
content_type='text/x-patch')
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
# We will only be making one call to get_file and we can fake it out.
self.spy_on(repository.get_file,
call_fake=lambda *args, **kwargs: b'Foo\n')
self.spy_on(patch)
form = UploadDiffForm(
repository=repository,
data={
'basedir': '/',
},
files={
'path': diff,
'parent_diff_path': parent_diff,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
filediff = diffset.files.get()
self.assertEqual(filediff.source_file, 'bar')
self.assertEqual(filediff.dest_file, 'bar')
self.assertEqual(filediff.source_revision, revisions[1].decode('utf-8'))
self.assertEqual(filediff.dest_detail, revisions[2].decode('utf-8'))
self.assertEqual(filediff.extra_data, {
'__parent_diff_empty': False,
'is_symlink': False,
'parent_moved': True,
'parent_source_filename': '/foo',
'parent_source_revision': revisions[0].decode('utf-8'),
'raw_delete_count': 0,
'raw_insert_count': 1,
})
original_file = get_original_file(filediff=filediff,
request=None,
encoding_list=['ascii'])
self.assertEqual(original_file, b'Foo\nBar\n')
self.assertTrue(patch.spy.called)
patched_file = get_patched_file(source_data=original_file,
filediff=filediff)
self.assertEqual(patched_file, b'Foo\nBar\nBaz\n')
self.assertEqual(len(patch.spy.calls), 2)
def test_create_missing_basedir(self):
"""Testing UploadDiffForm with a missing basedir field that is
required
"""
repository = self.create_repository(tool_name='Test')
scmtool = repository.get_scmtool()
self.spy_on(repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
revisions = [
b'93e6b3e8944c48737cb11a1e52b046fa30aea7a9',
b'4839fc480f47ca59cf05a9c39410ea744d1e17a2',
]
diff = SimpleUploadedFile(
'diff',
(b'diff --git a/bar b/bar\n'
b'index %s..%s 100644\n'
b'--- a/bar\n'
b'+++ b/bar\n'
b'@@ -1,2 +1,3 @@\n'
b' Foo\n'
b'+Bar\n') % (revisions[0], revisions[1]),
content_type='text/x-patch')
try:
orig_use_abs_paths = scmtool.diffs_use_absolute_paths
scmtool.diffs_use_absolute_paths = True
form = UploadDiffForm(
repository=repository,
files={
'path': diff,
}
)
self.assertFalse(form.is_valid())
finally:
scmtool.diffs_use_absolute_paths = orig_use_abs_paths
self.assertIn('basedir', form.errors)
self.assertIn('This field is required.', form.errors['basedir'])
def test_create_with_parent_filediff_with_new_file(self):
"""Testing UploadDiffForm.create with a parent diff consisting of a
newly-introduced file
"""
revisions = [
b'0000000000000000000000000000000000000000',
b'9b32edcd37a88c6ada91efc562afa637ccfdad36',
b'8a567d328293f85d68332bc693b0a98869b23b47',
]
parent_diff = SimpleUploadedFile(
'parent_diff',
(b'diff --git a/foo b/foo\n'
b'new file mode 100644\n'
b'index %s..%s\n'
b'--- /dev/null\n'
b'+++ b/foo\n'
b'@@ -0,0 +1,2 @@\n'
b'+Foo\n'
b'+Bar\n') % (revisions[0], revisions[1]),
content_type='text/x-patch')
diff = SimpleUploadedFile(
'diff',
(b'diff --git a/foo b/foo\n'
b'index %s..%s 100644\n'
b'--- a/foo\n'
b'+++ b/foo\n'
b'@@ -1,3 +1,4 @@\n'
b' Foo\n'
b' Bar\n'
b'+Baz\n') % (revisions[1], revisions[2]),
content_type='text/x-patch')
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
# We will only be making one call to get_file and we can fake it out.
self.spy_on(repository.get_file,
call_fake=lambda *args, **kwargs: b'Foo\n')
self.spy_on(patch)
form = UploadDiffForm(
repository=repository,
data={
'basedir': '/',
},
files={
'parent_diff_path': parent_diff,
'path': diff,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
filediff = diffset.files.get()
self.assertEqual(filediff.source_file, 'foo')
self.assertEqual(filediff.dest_file, 'foo')
self.assertEqual(filediff.source_revision, revisions[1].decode('utf-8'))
self.assertEqual(filediff.dest_detail, revisions[2].decode('utf-8'))
self.assertEqual(filediff.extra_data, {
'__parent_diff_empty': False,
'is_symlink': False,
'parent_source_filename': '/foo',
'parent_source_revision': 'PRE-CREATION',
'raw_delete_count': 0,
'raw_insert_count': 1,
})
# Double-check the types.
self.assertIsInstance(filediff.extra_data['parent_source_filename'],
six.text_type)
self.assertIsInstance(filediff.extra_data['parent_source_revision'],
six.text_type)
original_file = get_original_file(filediff=filediff,
request=None,
encoding_list=['ascii'])
self.assertEqual(original_file, b'Foo\nBar\n')
self.assertSpyCalled(patch)
patched_file = get_patched_file(source_data=original_file,
filediff=filediff)
self.assertEqual(patched_file, b'Foo\nBar\nBaz\n')
self.assertEqual(len(patch.calls), 2)
class ValidateCommitFormTests(SpyAgency, TestCase):
"""Unit tests for ValidateCommitForm."""
fixtures = ['test_scmtools']
_PARENT_DIFF_DATA = (
b'diff --git a/README b/README\n'
b'new file mode 100644\n'
b'index 0000000..94bdd3e\n'
b'--- /dev/null\n'
b'+++ b/README\n'
b'@@ -0,0 +2 @@\n'
b'+blah blah\n'
b'+blah blah\n'
)
@classmethod
def setUpClass(cls):
super(ValidateCommitFormTests, cls).setUpClass()
cls.request_factory = RequestFactory()
def setUp(self):
super(ValidateCommitFormTests, self).setUp()
self.repository = self.create_repository(tool_name='Git')
self.request = self.request_factory.get('/')
self.diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
def test_clean_already_validated(self):
"""Testing ValidateCommitForm.clean for a commit that has already been
validated
"""
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [],
'removed': [],
'modified': [],
},
},
})
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r1',
'parent_id': 'r0',
'validation_info': validation_info,
},
files={
'diff': self.diff,
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'validation_info': ['This commit was already validated.'],
})
def test_clean_parent_not_validated(self):
"""Testing ValidateCommitForm.clean for a commit whose parent has not
been validated
"""
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [],
'removed': [],
'modified': [],
},
},
})
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r3',
'parent_id': 'r2',
'validation_info': validation_info,
},
files={
'diff': self.diff,
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'validation_info': ['The parent commit was not validated.'],
})
def test_clean_parent_diff_subsequent_commit(self):
"""Testing ValidateCommitForm.clean with a non-empty parent diff for
a subsequent commit
"""
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [],
'removed': [],
'modified': [],
},
},
})
parent_diff = SimpleUploadedFile('diff',
self._PARENT_DIFF_DATA,
content_type='text/x-patch')
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': validation_info,
},
files={
'diff': self.diff,
'parent_diff': parent_diff,
})
self.assertTrue(form.is_valid())
def test_clean_validation_info(self):
"""Testing ValidateCommitForm.clean_validation_info"""
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [],
'removed': [],
'modified': [],
},
},
})
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': validation_info,
},
files={
'diff': self.diff,
})
self.assertTrue(form.is_valid())
def test_clean_validation_info_invalid_base64(self):
"""Testing ValidateCommitForm.clean_validation_info with
non-base64-encoded data"""
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': 'This is not base64!',
},
files={
'diff': self.diff,
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'validation_info': [
'Could not parse validation info "This is not base64!": '
'Incorrect padding',
],
})
def test_clean_validation_info_invalid_json(self):
"""Testing ValidateCommitForm.clean_validation_info with base64-encoded
non-json data
"""
validation_info = base64.b64encode(b'Not valid json.')
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': validation_info,
},
files={
'diff': self.diff,
})
self.assertFalse(form.is_valid())
# Python 2 and 3 differ in the error contents you'll get when
# attempting to load non-JSON data.
if six.PY3:
expected_error = 'Expecting value: line 1 column 1 (char 0)'
else:
expected_error = 'No JSON object could be decoded'
self.assertEqual(form.errors, {
'validation_info': [
'Could not parse validation info "%s": %s'
% (validation_info.decode('utf-8'), expected_error),
],
})
def test_validate_diff(self):
"""Testing ValidateCommitForm.validate_diff"""
self.spy_on(self.repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r1',
'parent_id': 'r2',
},
files={
'diff': self.diff,
})
self.assertTrue(form.is_valid())
form.validate_diff()
def test_validate_diff_subsequent_commit(self):
"""Testing ValidateCommitForm.validate_diff for a subsequent commit"""
diff_content = (
b'diff --git a/foo b/foo\n'
b'index %s..%s 100644\n'
b'--- a/foo\n'
b'+++ b/foo\n'
b'@@ -0,0 +1,2 @@\n'
b'+This is not a new file.\n'
% (b'a' * 40, b'b' * 40)
)
diff = SimpleUploadedFile('diff', diff_content,
content_type='text/x-patch')
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [{
'filename': 'foo',
'revision': 'a' * 40,
}],
'removed': [],
'modified': [],
},
},
})
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': validation_info,
},
files={
'diff': diff,
})
self.assertTrue(form.is_valid())
form.validate_diff()
def test_validate_diff_missing_files(self):
"""Testing ValidateCommitForm.validate_diff for a subsequent commit
with missing files
"""
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [],
'removed': [],
'modified': [],
},
},
})
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': validation_info,
},
files={
'diff': self.diff,
})
self.assertTrue(form.is_valid())
with self.assertRaises(FileNotFoundError):
form.validate_diff()
def test_validate_diff_empty(self):
"""Testing ValidateCommitForm.validate_diff for an empty diff"""
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r1',
'parent_id': 'r0',
},
files={
'diff': SimpleUploadedFile('diff', b' ',
content_type='text/x-patch'),
})
self.assertTrue(form.is_valid())
with self.assertRaises(EmptyDiffError):
form.validate_diff()
def test_validate_diff_too_big(self):
"""Testing ValidateCommitForm.validate_diff for a diff that is too
large
"""
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r1',
'parent_id': 'r0',
},
files={
'diff': self.diff,
})
self.assertTrue(form.is_valid())
with self.assertRaises(DiffTooBigError):
with self.siteconfig_settings({'diffviewer_max_diff_size': 1},
reload_settings=False):
form.validate_diff()
def test_validate_diff_parser_error(self):
"""Testing ValidateCommitForm.validate_diff for an invalid diff"""
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r1',
'parent_id': 'r0',
},
files={
'diff': SimpleUploadedFile('diff', b'asdf',
content_type='text/x-patch'),
})
self.assertTrue(form.is_valid())
with self.assertRaises(DiffParserError):
form.validate_diff()
def _base64_json(self, data):
"""Return a Base64-encoded JSON payload.
Args:
data (object):
The data to encode to JSON.
Returns:
bytes:
The Base64-encoded JSON payload.
"""
return base64.b64encode(json.dumps(data).encode('utf-8'))
|
mit
| 726,879,804,108,407,300 | 3,316,995,821,245,601,300 | 32.415879 | 80 | 0.498529 | false |
BlueLens/bl-magi
|
tensorflow/slim/datasets/cifar10.py
|
7
|
3237
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the Cifar10 dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow/models/research/slim/datasets/download_and_convert_cifar10.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from datasets import dataset_utils
slim = tf.contrib.slim
_FILE_PATTERN = 'cifar10_%s.tfrecord'
SPLITS_TO_SIZES = {'train': 50000, 'test': 10000}
_NUM_CLASSES = 10
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A [32 x 32 x 3] color image.',
'label': 'A single integer between 0 and 9',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading cifar10.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if not reader:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(shape=[32, 32, 3]),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
|
apache-2.0
| 2,510,063,752,941,313,500 | 3,706,358,235,217,738,000 | 32.030612 | 80 | 0.69169 | false |
RasaHQ/rasa_nlu
|
rasa/core/policies/form_policy.py
|
1
|
5093
|
import logging
from typing import List, Optional, Dict, Text
from rasa.core.actions.action import ACTION_LISTEN_NAME
from rasa.core.domain import PREV_PREFIX, ACTIVE_FORM_PREFIX, Domain
from rasa.core.events import FormValidation
from rasa.core.featurizers import TrackerFeaturizer
from rasa.core.policies.memoization import MemoizationPolicy
from rasa.core.trackers import DialogueStateTracker
logger = logging.getLogger(__name__)
class FormPolicy(MemoizationPolicy):
"""Policy which handles prediction of Forms"""
ENABLE_FEATURE_STRING_COMPRESSION = True
def __init__(
self,
featurizer: Optional[TrackerFeaturizer] = None,
priority: int = 4,
lookup: Optional[Dict] = None,
) -> None:
# max history is set to 2 in order to capture
# previous meaningful action before action listen
super(FormPolicy, self).__init__(
featurizer=featurizer, priority=priority, max_history=2, lookup=lookup
)
@staticmethod
def _get_active_form_name(state):
found_forms = [
state_name[len(ACTIVE_FORM_PREFIX) :]
for state_name, prob in state.items()
if ACTIVE_FORM_PREFIX in state_name and prob > 0
]
# by construction there is only one active form
return found_forms[0] if found_forms else None
@staticmethod
def _prev_action_listen_in_state(state):
return any(
PREV_PREFIX + ACTION_LISTEN_NAME in state_name and prob > 0
for state_name, prob in state.items()
)
@staticmethod
def _modified_states(states):
"""Modify the states to
- capture previous meaningful action before action_listen
- ignore previous intent
"""
if states[0] is None:
action_before_listen = None
else:
action_before_listen = {
state_name: prob
for state_name, prob in states[0].items()
if PREV_PREFIX in state_name and prob > 0
}
return [action_before_listen, states[-1]]
def _add_states_to_lookup(
self, trackers_as_states, trackers_as_actions, domain, online=False
):
"""Add states to lookup dict"""
for states in trackers_as_states:
active_form = self._get_active_form_name(states[-1])
if active_form and self._prev_action_listen_in_state(states[-1]):
# modify the states
states = self._modified_states(states)
feature_key = self._create_feature_key(states)
# even if there are two identical feature keys
# their form will be the same
# because of `active_form_...` feature
self.lookup[feature_key] = active_form
def recall(
self,
states: List[Dict[Text, float]],
tracker: DialogueStateTracker,
domain: Domain,
) -> Optional[int]:
# modify the states
return self._recall_states(self._modified_states(states))
def state_is_unhappy(self, tracker, domain):
# since it is assumed that training stories contain
# only unhappy paths, notify the form that
# it should not be validated if predicted by other policy
tracker_as_states = self.featurizer.prediction_states([tracker], domain)
states = tracker_as_states[0]
memorized_form = self.recall(states, tracker, domain)
state_is_unhappy = (
memorized_form is not None
and memorized_form == tracker.active_form.get("name")
)
if state_is_unhappy:
logger.debug(
"There is a memorized tracker state {}, "
"added `FormValidation(False)` event"
"".format(self._modified_states(states))
)
return state_is_unhappy
def predict_action_probabilities(
self, tracker: DialogueStateTracker, domain: Domain
) -> List[float]:
"""Predicts the corresponding form action if there is an active form"""
result = [0.0] * domain.num_actions
if tracker.active_form.get("name"):
logger.debug(
"There is an active form '{}'".format(tracker.active_form["name"])
)
if tracker.latest_action_name == ACTION_LISTEN_NAME:
# predict form action after user utterance
if tracker.active_form.get("rejected"):
if self.state_is_unhappy(tracker, domain):
tracker.update(FormValidation(False))
return result
idx = domain.index_for_action(tracker.active_form["name"])
result[idx] = 1.0
elif tracker.latest_action_name == tracker.active_form.get("name"):
# predict action_listen after form action
idx = domain.index_for_action(ACTION_LISTEN_NAME)
result[idx] = 1.0
else:
logger.debug("There is no active form")
return result
|
apache-2.0
| -1,437,766,162,792,606,500 | 1,114,566,816,792,416,500 | 35.120567 | 82 | 0.596112 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.