repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
abhidrona/gn-osc-custom | oscar/apps/wishlists/abstract_models.py | 5 | 4545 | import hashlib
import random
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from oscar.core.compat import AUTH_USER_MODEL
class AbstractWishList(models.Model):
"""
Represents a user's wish lists of products.
A user can have multiple wish lists, move products between them, etc.
"""
# Only authenticated users can have wishlists
owner = models.ForeignKey(AUTH_USER_MODEL, related_name='wishlists',
verbose_name=_('Owner'))
name = models.CharField(verbose_name=_('Name'), default=_('Default'),
max_length=255)
#: This key acts as primary key and is used instead of an int to make it
#: harder to guess
key = models.CharField(_('Key'), max_length=6, db_index=True, unique=True,
editable=False)
# Oscar core does not support public or shared wishlists at the moment, but
# all the right hooks should be there
PUBLIC, PRIVATE, SHARED = ('Public', 'Private', 'Shared')
VISIBILITY_CHOICES = (
(PRIVATE, _('Private - Only the owner can see the wish list')),
(SHARED, _('Shared - Only the owner and people with access to the obfuscated link can see the wish list')),
(PUBLIC, _('Public - Everybody can see the wish list')),
)
visibility = models.CharField(
_('Visibility'), max_length=20, default=PRIVATE, choices=VISIBILITY_CHOICES)
# Convention: A user can have multiple wish lists. The last created wish
# list for a user shall be her "default" wish list.
# If an UI element only allows adding to wish list without
# specifying which one , one shall use the default one.
# That is a rare enough case to handle it by convention instead of a
# BooleanField.
date_created = models.DateTimeField(
_('Date created'), auto_now_add=True, editable=False)
def __unicode__(self):
return u"%s's Wish List '%s'" % (self.owner, self.name)
def save(self, *args, **kwargs):
if not self.pk or kwargs.get('force_insert', False):
self.key = self.__class__.random_key()
super(AbstractWishList, self).save(*args, **kwargs)
@classmethod
def random_key(cls, length=6):
"""
Get a unique random generated key based on SHA-1 and owner
"""
while True:
key = hashlib.sha1(str(random.random())).hexdigest()[:length]
if cls._default_manager.filter(key=key).count() == 0:
return key
def is_allowed_to_see(self, user):
if self.visibility in (self.PUBLIC, self.SHARED):
return True
else:
return user == self.owner
def is_allowed_to_edit(self, user):
# currently only the owner can edit her wish list
return user == self.owner
class Meta:
ordering = ('owner', 'date_created', )
verbose_name = _('Wish List')
abstract = True
def get_absolute_url(self):
return reverse('customer:wishlists-detail', kwargs={
'key': self.key})
def add(self, product):
"""
Add a product to this wishlist
"""
lines = self.lines.filter(product=product)
if len(lines) == 0:
self.lines.create(
product=product, title=product.get_title())
else:
line = lines[0]
line.quantity += 1
line.save()
class AbstractLine(models.Model):
"""
One entry in a wish list. Similar to order lines or basket lines.
"""
wishlist = models.ForeignKey('wishlists.WishList', related_name='lines',
verbose_name=_('Wish List'))
product = models.ForeignKey(
'catalogue.Product', verbose_name=_('Product'),
related_name='wishlists_lines', on_delete=models.SET_NULL,
blank=True, null=True)
quantity = models.PositiveIntegerField(_('Quantity'), default=1)
#: Store the title in case product gets deleted
title = models.CharField(_("Title"), max_length=255)
def __unicode__(self):
return u'%sx %s on %s' % (self.quantity, self.title,
self.wishlist.name)
def get_title(self):
if self.product:
return self.product.get_title()
else:
return self.title
class Meta:
abstract = True
verbose_name = _('Wish list line')
unique_together = (('wishlist', 'product'), )
| bsd-3-clause | 1,156,368,756,368,309,200 | 34.787402 | 115 | 0.60374 | false |
stevehof/CouchPotatoServer | couchpotato/core/media/movie/searcher.py | 5 | 17381 | from datetime import date
import random
import re
import time
import traceback
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.variable import getTitle, possibleTitles, getImdb, getIdentifier, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.searcher.base import SearcherBase
from couchpotato.core.media.movie import MovieTypeBase
from couchpotato.environment import Env
log = CPLog(__name__)
autoload = 'MovieSearcher'
class MovieSearcher(SearcherBase, MovieTypeBase):
in_progress = False
def __init__(self):
super(MovieSearcher, self).__init__()
addEvent('movie.searcher.all', self.searchAll)
addEvent('movie.searcher.all_view', self.searchAllView)
addEvent('movie.searcher.single', self.single)
addEvent('movie.searcher.try_next_release', self.tryNextRelease)
addEvent('movie.searcher.could_be_released', self.couldBeReleased)
addEvent('searcher.correct_release', self.correctRelease)
addEvent('searcher.get_search_title', self.getSearchTitle)
addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = {
'desc': 'Marks the snatched results as ignored and try the next best release',
'params': {
'media_id': {'desc': 'The id of the media'},
},
})
addApiView('movie.searcher.full_search', self.searchAllView, docs = {
'desc': 'Starts a full search for all wanted movies',
})
addApiView('movie.searcher.progress', self.getProgress, docs = {
'desc': 'Get the progress of current full search',
'return': {'type': 'object', 'example': """{
'progress': False || object, total & to_go,
}"""},
})
if self.conf('run_on_launch'):
addEvent('app.load', self.searchAll)
def searchAllView(self, **kwargs):
fireEventAsync('movie.searcher.all')
return {
'success': not self.in_progress
}
def searchAll(self):
if self.in_progress:
log.info('Search already in progress')
fireEvent('notify.frontend', type = 'movie.searcher.already_started', data = True, message = 'Full search already in progress')
return
self.in_progress = True
fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started')
medias = [x['_id'] for x in fireEvent('media.with_status', 'active', with_doc = False, single = True)]
random.shuffle(medias)
total = len(medias)
self.in_progress = {
'total': total,
'to_go': total,
}
try:
search_protocols = fireEvent('searcher.protocols', single = True)
for media_id in medias:
media = fireEvent('media.get', media_id, single = True)
try:
self.single(media, search_protocols)
except IndexError:
log.error('Forcing library update for %s, if you see this often, please report: %s', (getIdentifier(media), traceback.format_exc()))
fireEvent('movie.update_info', media_id)
except:
log.error('Search failed for %s: %s', (getIdentifier(media), traceback.format_exc()))
self.in_progress['to_go'] -= 1
# Break if CP wants to shut down
if self.shuttingDown():
break
except SearchSetupError:
pass
self.in_progress = False
def single(self, movie, search_protocols = None, manual = False):
# Find out search type
try:
if not search_protocols:
search_protocols = fireEvent('searcher.protocols', single = True)
except SearchSetupError:
return
if not movie['profile_id'] or (movie['status'] == 'done' and not manual):
log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.')
return
pre_releases = fireEvent('quality.pre_releases', single = True)
release_dates = fireEvent('movie.update_release_dates', movie['_id'], merge = True)
found_releases = []
previous_releases = movie.get('releases', [])
too_early_to_search = []
default_title = getTitle(movie)
if not default_title:
log.error('No proper info found for movie, removing it from library to cause it from having more issues.')
fireEvent('media.delete', movie['_id'], single = True)
return
fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'_id': movie['_id']}, message = 'Searching for "%s"' % default_title)
db = get_db()
profile = db.get('id', movie['profile_id'])
ret = False
index = 0
for q_identifier in profile.get('qualities'):
quality_custom = {
'index': index,
'quality': q_identifier,
'finish': profile['finish'][index],
'wait_for': tryInt(profile['wait_for'][index]),
'3d': profile['3d'][index] if profile.get('3d') else False
}
index += 1
if not self.conf('always_search') and not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year']):
too_early_to_search.append(q_identifier)
continue
has_better_quality = 0
# See if better quality is available
for release in movie.get('releases', []):
if release['status'] not in ['available', 'ignored', 'failed']:
is_higher = fireEvent('quality.ishigher', \
{'identifier': q_identifier, 'is_3d': quality_custom.get('3d', 0)}, \
{'identifier': release['quality'], 'is_3d': release.get('is_3d', 0)}, \
profile, single = True)
if is_higher != 'higher':
has_better_quality += 1
# Don't search for quality lower then already available.
if has_better_quality > 0:
log.info('Better quality (%s) already available or snatched for %s', (q_identifier, default_title))
fireEvent('media.restatus', movie['_id'])
break
quality = fireEvent('quality.single', identifier = q_identifier, single = True)
log.info('Search for %s in %s', (default_title, quality['label']))
# Extend quality with profile customs
quality['custom'] = quality_custom
results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or []
if len(results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality['label']))
# Check if movie isn't deleted while searching
if not fireEvent('media.get', movie.get('_id'), single = True):
break
# Add them to this movie releases list
found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True)
# Try find a valid result and download it
if fireEvent('release.try_download_result', results, movie, quality_custom, manual, single = True):
ret = True
# Remove releases that aren't found anymore
temp_previous_releases = []
for release in previous_releases:
if release.get('status') == 'available' and release.get('identifier') not in found_releases:
fireEvent('release.delete', release.get('_id'), single = True)
else:
temp_previous_releases.append(release)
previous_releases = temp_previous_releases
del temp_previous_releases
# Break if CP wants to shut down
if self.shuttingDown() or ret:
break
if len(too_early_to_search) > 0:
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'_id': movie['_id']})
return ret
def correctRelease(self, nzb = None, media = None, quality = None, **kwargs):
if media.get('type') != 'movie': return
media_title = fireEvent('searcher.get_search_title', media, single = True)
imdb_results = kwargs.get('imdb_results', False)
retention = Env.setting('retention', section = 'nzb')
if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name']))
return False
# Check for required and ignored words
if not fireEvent('searcher.correct_words', nzb['name'], media, single = True):
return False
preferred_quality = quality if quality else fireEvent('quality.single', identifier = quality['identifier'], single = True)
# Contains lower quality string
contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year = media['info']['year'], preferred_quality = preferred_quality, single = True)
if contains_other != False:
log.info2('Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality'))
return False
# Contains lower quality string
if not fireEvent('searcher.correct_3d', nzb, preferred_quality = preferred_quality, single = True):
log.info2('Wrong: %s, %slooking for %s in 3D', (nzb['name'], ('' if preferred_quality['custom'].get('3d') else 'NOT '), quality['label']))
return False
# File to small
if nzb['size'] and tryInt(preferred_quality['size_min']) > tryInt(nzb['size']):
log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min']))
return False
# File to large
if nzb['size'] and tryInt(preferred_quality['size_max']) < tryInt(nzb['size']):
log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max']))
return False
# Provider specific functions
get_more = nzb.get('get_more_info')
if get_more:
get_more(nzb)
extra_check = nzb.get('extra_check')
if extra_check and not extra_check(nzb):
return False
if imdb_results:
return True
# Check if nzb contains imdb link
if getImdb(nzb.get('description', '')) == getIdentifier(media):
return True
for raw_title in media['info']['titles']:
for movie_title in possibleTitles(raw_title):
movie_words = re.split('\W+', simplifyString(movie_title))
if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True):
# if no IMDB link, at least check year range 1
if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 1, single = True):
return True
# if no IMDB link, at least check year
if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 0, single = True):
return True
log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['info']['year']))
return False
def couldBeReleased(self, is_pre_release, dates, year = None):
now = int(time.time())
now_year = date.today().year
now_month = date.today().month
if (year is None or year < now_year - 1 or (year <= now_year - 1 and now_month > 4)) and (not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0)):
return True
else:
# Don't allow movies with years to far in the future
add_year = 1 if now_month > 10 else 0 # Only allow +1 year if end of the year
if year is not None and year > (now_year + add_year):
return False
# For movies before 1972
if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0:
return True
if is_pre_release:
# Prerelease 1 week before theaters
if dates.get('theater') - 604800 < now:
return True
else:
# 12 weeks after theater release
if dates.get('theater') > 0 and dates.get('theater') + 7257600 < now:
return True
if dates.get('dvd') > 0:
# 4 weeks before dvd release
if dates.get('dvd') - 2419200 < now:
return True
# Dvd should be released
if dates.get('dvd') < now:
return True
return False
def tryNextReleaseView(self, media_id = None, **kwargs):
trynext = self.tryNextRelease(media_id, manual = True)
return {
'success': trynext
}
def tryNextRelease(self, media_id, manual = False):
try:
db = get_db()
rels = fireEvent('media.with_status', ['snatched', 'done'], single = True)
for rel in rels:
rel['status'] = 'ignored'
db.update(rel)
movie_dict = fireEvent('media.get', media_id, single = True)
log.info('Trying next release for: %s', getTitle(movie_dict))
self.single(movie_dict, manual = manual)
return True
except:
log.error('Failed searching for next release: %s', traceback.format_exc())
return False
def getSearchTitle(self, media):
if media['type'] == 'movie':
return getTitle(media)
class SearchSetupError(Exception):
pass
config = [{
'name': 'moviesearcher',
'order': 20,
'groups': [
{
'tab': 'searcher',
'name': 'movie_searcher',
'label': 'Movie search',
'description': 'Search options for movies',
'advanced': True,
'options': [
{
'name': 'always_search',
'default': False,
'migrate_from': 'searcher',
'type': 'bool',
'label': 'Always search',
'description': 'Search for movies even before there is a ETA. Enabling this will probably get you a lot of fakes.',
},
{
'name': 'run_on_launch',
'migrate_from': 'searcher',
'label': 'Run on launch',
'advanced': True,
'default': 0,
'type': 'bool',
'description': 'Force run the searcher after (re)start.',
},
{
'name': 'search_on_add',
'label': 'Search after add',
'advanced': True,
'default': 1,
'type': 'bool',
'description': 'Disable this to only search for movies on cron.',
},
{
'name': 'cron_day',
'migrate_from': 'searcher',
'label': 'Day',
'advanced': True,
'default': '*',
'type': 'string',
'description': '<strong>*</strong>: Every day, <strong>*/2</strong>: Every 2 days, <strong>1</strong>: Every first of the month. See <a href="http://packages.python.org/APScheduler/cronschedule.html">APScheduler</a> for details.',
},
{
'name': 'cron_hour',
'migrate_from': 'searcher',
'label': 'Hour',
'advanced': True,
'default': random.randint(0, 23),
'type': 'string',
'description': '<strong>*</strong>: Every hour, <strong>*/8</strong>: Every 8 hours, <strong>3</strong>: At 3, midnight.',
},
{
'name': 'cron_minute',
'migrate_from': 'searcher',
'label': 'Minute',
'advanced': True,
'default': random.randint(0, 59),
'type': 'string',
'description': "Just keep it random, so the providers don't get DDOSed by every CP user on a 'full' hour."
},
],
},
],
}]
| gpl-3.0 | -3,676,888,153,977,547,300 | 38.773455 | 250 | 0.540936 | false |
sourabhdattawad/BuildingMachineLearningSystemsWithPython | ch10/threshold.py | 24 | 1187 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
import mahotas as mh
# Load our example image:
image = mh.imread('../SimpleImageDataset/building05.jpg')
# Convert to greyscale
image = mh.colors.rgb2gray(image, dtype=np.uint8)
# Compute a threshold value:
thresh = mh.thresholding.otsu(image)
print('Otsu threshold is {0}'.format(thresh))
# Compute the thresholded image
otsubin = (image > thresh)
print('Saving thresholded image (with Otsu threshold) to otsu-threshold.jpeg')
mh.imsave('otsu-threshold.jpeg', otsubin.astype(np.uint8) * 255)
# Execute morphological opening to smooth out the edges
otsubin = mh.open(otsubin, np.ones((15, 15)))
mh.imsave('otsu-closed.jpeg', otsubin.astype(np.uint8) * 255)
# An alternative thresholding method:
thresh = mh.thresholding.rc(image)
print('Ridley-Calvard threshold is {0}'.format(thresh))
print('Saving thresholded image (with Ridley-Calvard threshold) to rc-threshold.jpeg')
mh.imsave('rc-threshold.jpeg', (image > thresh).astype(np.uint8) * 255)
| mit | 9,102,124,804,402,839,000 | 33.911765 | 86 | 0.759899 | false |
seung-lab/Julimaps | src/archive/review/neuroglancer/roi_select.py | 2 | 3001 | import sys
from os.path import expanduser, join, isfile
import threading
from tornado import web, ioloop, httpserver
from sockjs.tornado import SockJSConnection, SockJSRouter
import json
from collections import OrderedDict
import numpy as np
clients = set()
n_messages = 0
def read_points(fn):
if isfile(fn):
print 'read_points: ' + fn
return np.genfromtxt(fn, delimiter=",", dtype=int).tolist()
else:
return []
def write_points(fn, points):
if points:
print 'write_points: ' + fn
np.savetxt(fn, points, delimiter=',', fmt='%d')
def get_points(state):
if 'points' in state['layers']['points']:
points = state['layers']['points']['points']
return [[int(round(pt)) for pt in point] for point in points]
else:
return None
def get_z(state):
return int(state['navigation']['pose']['position']['voxelCoordinates'][2])
def get_filename(z):
return str(z) + ".csv"
def set_points(state, points):
state['layers']['points'] = {'type':'point', 'points':points}
broadcast(state)
current_z = 0
class Connection(SockJSConnection):
def on_open(self, info):
"""
info is an object which contains caller IP address, query string
parameters and cookies associated with this request"""
# When new client comes in, will add it to the clients list
clients.add(self)
def on_message(self, json_state):
"""
This will call initialize_state or on_state_change depening on if it is
the first message recieved.
"""
global current_state
current_state = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(json_state)
global n_messages
if not n_messages: #first message ever
print('state initialized')
n_messages += 1
def on_close(self):
# If client disconnects, remove him from the clients list
clients.remove(self)
def initialize_state(self, state):
"""
This is called once the connection is stablished
"""
return state
def on_state_change(self, state):
"""
This is called every time there is a new state available
(except the very first time).
"""
# store position
return state
# In order for the webbrowser to connect to this server
# add to the url 'stateURL':'http://localhost:9999'
router = SockJSRouter(Connection)
def broadcast(state):
"""
Use this method to broadcast a new state to all connected clients.
Without the need to wait for an `on_state_change`.
"""
router.broadcast(clients.copy(), json.dumps(state))
socketApp = web.Application(router.urls)
http_server = httpserver.HTTPServer(socketApp, ssl_options={
"certfile": "./certificate.crt",
"keyfile": "./privateKey.key",
})
http_server.bind(9999) #port
http_server.start(1)
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
ioloop.IOLoop.instance().stop()
| mit | -8,480,919,320,542,387,000 | 28.135922 | 90 | 0.648117 | false |
ondrejkajinek/pyGrim | pygrim/components/config/yaml_config.py | 1 | 2427 | # coding: utf8
from __future__ import print_function
from .abstract_config import AbstractConfig
try:
from uwsgi import log as uwsgi_log
except ImportError:
uwsgi_log = print
from yaml import load as yaml_load, parser as yaml_parser
from yaml import SafeLoader, MappingNode
from yaml.constructor import ConstructorError
class PygrimYamlLoader(SafeLoader):
"""
This reimplements PyYAML loader
PyYAML loader rewrites values when multiple keys are present in dict.
PygrimYamlLoader will turn the value into list
"""
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
if not isinstance(node, MappingNode):
raise ConstructorError(
None,
None,
"expected a mapping node, but found %s" % (
node.id, node.start_mark
)
)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError, exc:
raise ConstructorError(
"while constructing a mapping",
node.start_mark,
"found unacceptable key (%s)" % exc,
key_node.start_mark
)
value = self.construct_object(value_node, deep=deep)
if key in mapping:
if isinstance(mapping[key], list):
mapping[key].append(value)
else:
mapping[key] = [mapping[key], value]
else:
mapping[key] = value
return mapping
class YamlConfig(AbstractConfig):
SEPARATOR = ":"
def _load_config(self, path):
try:
with open(path, "rb") as conf_in:
yaml_string = conf_in.read()
config = yaml_load(yaml_string, Loader=PygrimYamlLoader)
except yaml_parser.ParserError as exc:
uwsgi_log("Error when parsing file %r:\n\t%s" % (path, exc))
config = {}
except IOError as exc:
uwsgi_log("Error when loading file %r:\n\t%s" % (path, exc))
config = {}
return config
def _asdict(self):
return self.config
def __getitem__(self, key):
return self.config[key]
| mit | -1,123,103,988,856,537,000 | 28.962963 | 73 | 0.552122 | false |
fogleman/xy | xy/drawing.py | 2 | 5284 | from shapely import geometry, ops
import cPickle as pickle
import math
import planner
import util
def shapely_paths(shape):
if hasattr(shape, 'coords'):
return [list(shape.coords)]
elif hasattr(shape, 'exterior'):
paths = []
paths.append(list(shape.exterior.coords))
for interior in shape.interiors:
paths.append(list(interior.coords))
return paths
elif hasattr(shape, 'geoms'):
paths = []
for child in shape.geoms:
paths.extend(shapely_paths(child))
return paths
else:
raise Exception
class Drawing(object):
def __init__(self, paths=None):
self.paths = paths or []
self._bounds = None
@staticmethod
def from_shapely(shape):
return Drawing(shapely_paths(shape))
def to_shapely(self):
return geometry.MultiLineString(self.paths)
@staticmethod
def load(path):
with open(path, 'rb') as fp:
return Drawing(pickle.load(fp))
def save(self, path):
with open(path, 'wb') as fp:
pickle.dump(self.paths, fp, -1)
@property
def bounds(self):
if not self._bounds:
points = [(x, y) for path in self.paths for x, y in path]
if points:
x1 = min(x for x, y in points)
x2 = max(x for x, y in points)
y1 = min(y for x, y in points)
y2 = max(y for x, y in points)
else:
x1 = x2 = y1 = y2 = 0
self._bounds = (x1, y1, x2, y2)
return self._bounds
@property
def width(self):
x1, y1, x2, y2 = self.bounds
return x2 - x1
@property
def height(self):
x1, y1, x2, y2 = self.bounds
return y2 - y1
def sort_paths_greedy(self, reversable=True):
return Drawing(planner.sort_paths_greedy(self.paths, reversable))
def sort_paths(self, iterations=100000, reversable=True):
return Drawing(planner.sort_paths(self.paths, iterations, reversable))
def join_paths(self, tolerance=0.05):
return Drawing(util.join_paths(self.paths, tolerance))
def remove_duplicates(self):
return Drawing(util.remove_duplicates(self.paths))
def simplify_paths(self, tolerance=0.05):
return Drawing(util.simplify_paths(self.paths, tolerance))
def crop(self, x1, y1, x2, y2):
box = geometry.Polygon([
(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1),
])
return Drawing.from_shapely(box.intersection(self.to_shapely()))
def linemerge(self):
lines = ops.linemerge([geometry.LineString(x) for x in self.paths])
return Drawing.from_shapely(lines)
def transform(self, func):
return Drawing([[func(x, y) for x, y in path] for path in self.paths])
def translate(self, dx, dy):
def func(x, y):
return (x + dx, y + dy)
return self.transform(func)
def scale(self, sx, sy):
def func(x, y):
return (x * sx, y * sy)
return self.transform(func)
def rotate(self, angle):
c = math.cos(math.radians(angle))
s = math.sin(math.radians(angle))
def func(x, y):
return (x * c - y * s, y * c + x * s)
return self.transform(func)
def move(self, x, y, ax, ay):
x1, y1, x2, y2 = self.bounds
dx = x1 + (x2 - x1) * ax - x
dy = y1 + (y2 - y1) * ay - y
return self.translate(-dx, -dy)
def origin(self):
return self.move(0, 0, 0, 0)
def rotate_to_fit(self, width, height, step=5):
for a in range(0, 180, step):
g = self.rotate(a)
if g.width <= width and g.height <= height:
return g.origin()
return None
def scale_to_fit(self, width, height, padding=0):
width -= padding * 2
height -= padding * 2
s = min(width / self.width, height / self.height)
return self.scale(s, s).origin()
def rotate_and_scale_to_fit(self, width, height, padding=0, step=5):
gs = []
width -= padding * 2
height -= padding * 2
for a in range(0, 180, step):
g = self.rotate(a)
s = min(width / g.width, height / g.height)
gs.append((s, a, g))
s, a, g = max(gs)
return g.scale(s, s).origin()
def render(self, scale=96/25.4, margin=10, line_width=0.5):
import cairo
x1, y1, x2, y2 = self.bounds
width = int(scale * self.width + margin * 2)
height = int(scale * self.height + margin * 2)
surface = cairo.ImageSurface(cairo.FORMAT_RGB24, width, height)
dc = cairo.Context(surface)
dc.set_line_cap(cairo.LINE_CAP_ROUND)
dc.translate(margin, height - margin)
dc.scale(scale, -scale)
dc.translate(-x1, -y1)
dc.set_line_width(line_width)
dc.set_source_rgb(1, 1, 1)
dc.paint()
# dc.arc(0, 0, 3.0 / scale, 0, 2 * math.pi)
# dc.set_source_rgb(1, 0, 0)
# dc.fill()
dc.set_source_rgb(0, 0, 0)
for path in self.paths:
dc.move_to(*path[0])
for x, y in path:
dc.line_to(x, y)
dc.stroke()
return surface
| mit | -3,669,883,366,242,197,500 | 29.900585 | 78 | 0.549016 | false |
terracoin/terracoin | qa/rpc-tests/blockchain.py | 1 | 3880 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test RPC calls related to blockchain state. Tests correspond to code in
# rpc/blockchain.cpp.
#
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import (
initialize_chain,
assert_equal,
assert_raises,
assert_is_hex_string,
assert_is_hash_string,
start_nodes,
connect_nodes_bi,
)
class BlockchainTest(BitcoinTestFramework):
"""
Test blockchain-related RPC calls:
- gettxoutsetinfo
- verifychain
"""
def setup_chain(self):
print("Initializing test directory " + self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_network(self, split=False):
self.nodes = start_nodes(1, self.options.tmpdir)
self.is_network_split = False
self.sync_all()
def run_test(self):
self._test_gettxoutsetinfo()
self._test_getblockheader()
self.nodes[0].verifychain(4, 0)
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res[u'total_amount'], Decimal('3490.00000000'))
assert_equal(res[u'transactions'], 200)
assert_equal(res[u'height'], 200)
assert_equal(res[u'txouts'], 200)
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res[u'bestblock']), 64)
assert_equal(len(res[u'hash_serialized_2']), 64)
print("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
print("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
assert_equal(res['total_amount'], res3['total_amount'])
assert_equal(res['transactions'], res3['transactions'])
assert_equal(res['height'], res3['height'])
assert_equal(res['txouts'], res3['txouts'])
assert_equal(res['bestblock'], res3['bestblock'])
assert_equal(res['hash_serialized_2'], res3['hash_serialized_2'])
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises(
JSONRPCException, lambda: node.getblockheader('nonsense'))
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(header['difficulty'], Decimal)
if __name__ == '__main__':
BlockchainTest().main()
| mit | 8,258,459,189,932,558,000 | 33.642857 | 102 | 0.650773 | false |
boumenot/azure-linux-extensions | OSPatching/azure/storage/queueservice.py | 52 | 19818 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure import (
WindowsAzureConflictError,
WindowsAzureError,
DEV_QUEUE_HOST,
QUEUE_SERVICE_HOST_BASE,
xml_escape,
_convert_class_to_xml,
_dont_fail_not_exist,
_dont_fail_on_exist,
_get_request_body,
_int_or_none,
_parse_enum_results_list,
_parse_response,
_parse_response_for_dict_filter,
_parse_response_for_dict_prefix,
_str,
_str_or_none,
_update_request_uri_query_local_storage,
_validate_not_none,
_ERROR_CONFLICT,
)
from azure.http import (
HTTPRequest,
HTTP_RESPONSE_NO_CONTENT,
)
from azure.storage import (
Queue,
QueueEnumResults,
QueueMessagesList,
StorageServiceProperties,
_update_storage_queue_header,
)
from azure.storage.storageclient import _StorageClient
class QueueService(_StorageClient):
'''
This is the main class managing queue resources.
'''
def __init__(self, account_name=None, account_key=None, protocol='https',
host_base=QUEUE_SERVICE_HOST_BASE, dev_host=DEV_QUEUE_HOST):
'''
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
protocol: Optional. Protocol. Defaults to http.
host_base:
Optional. Live host base url. Defaults to Azure url. Override this
for on-premise.
dev_host: Optional. Dev host url. Defaults to localhost.
'''
super(QueueService, self).__init__(
account_name, account_key, protocol, host_base, dev_host)
def get_queue_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Queue Service, including
Windows Azure Storage Analytics.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, StorageServiceProperties)
def list_queues(self, prefix=None, marker=None, maxresults=None,
include=None):
'''
Lists all of the queues in a given storage account.
prefix:
Filters the results to return only queues with names that begin
with the specified prefix.
marker:
A string value that identifies the portion of the list to be
returned with the next list operation. The operation returns a
NextMarker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
maxresults:
Specifies the maximum number of queues to return. If maxresults is
not specified, the server will return up to 5,000 items.
include:
Optional. Include this parameter to specify that the container's
metadata be returned as part of the response body.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_enum_results_list(
response, QueueEnumResults, "Queues", Queue)
def create_queue(self, queue_name, x_ms_meta_name_values=None,
fail_on_exist=False):
'''
Creates a queue under the given account.
queue_name: name of the queue.
x_ms_meta_name_values:
Optional. A dict containing name-value pairs to associate with the
queue as metadata.
fail_on_exist: Specify whether throw exception when queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
if not fail_on_exist:
try:
response = self._perform_request(request)
if response.status == HTTP_RESPONSE_NO_CONTENT:
return False
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
response = self._perform_request(request)
if response.status == HTTP_RESPONSE_NO_CONTENT:
raise WindowsAzureConflictError(
_ERROR_CONFLICT.format(response.message))
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Permanently deletes the specified queue.
queue_name: Name of the queue.
fail_not_exist:
Specify whether throw exception when queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_queue_metadata(self, queue_name):
'''
Retrieves user-defined metadata and queue properties on the specified
queue. Metadata is associated with the queue as name-values pairs.
queue_name: Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '?comp=metadata'
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_prefix(
response,
prefixes=['x-ms-meta', 'x-ms-approximate-messages-count'])
def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):
'''
Sets user-defined metadata on the specified queue. Metadata is
associated with the queue as name-value pairs.
queue_name: Name of the queue.
x_ms_meta_name_values:
Optional. A dict containing name-value pairs to associate with the
queue as metadata.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '?comp=metadata'
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def put_message(self, queue_name, message_text, visibilitytimeout=None,
messagettl=None):
'''
Adds a new message to the back of the message queue. A visibility
timeout can also be specified to make the message invisible until the
visibility timeout expires. A message must be in a format that can be
included in an XML request with UTF-8 encoding. The encoded message can
be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size
for previous versions.
queue_name: Name of the queue.
message_text: Message content.
visibilitytimeout:
Optional. If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The new value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibilitytimeout
should be set to a value smaller than the time-to-live value.
messagettl:
Optional. Specifies the time-to-live interval for the message, in
seconds. The maximum time-to-live allowed is 7 days. If this
parameter is omitted, the default time-to-live is 7 days.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_text', message_text)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.query = [
('visibilitytimeout', _str_or_none(visibilitytimeout)),
('messagettl', _str_or_none(messagettl))
]
request.body = _get_request_body(
'<?xml version="1.0" encoding="utf-8"?> \
<QueueMessage> \
<MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \
</QueueMessage>')
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def get_messages(self, queue_name, numofmessages=None,
visibilitytimeout=None):
'''
Retrieves one or more messages from the front of the queue.
queue_name: Name of the queue.
numofmessages:
Optional. A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If
fewer are visible, the visible messages are returned. By default,
a single message is retrieved from the queue with this operation.
visibilitytimeout:
Specifies the new visibility timeout value, in seconds, relative
to server time. The new value must be larger than or equal to 1
second, and cannot be larger than 7 days, or larger than 2 hours
on REST protocol versions prior to version 2011-08-18. The
visibility timeout of a message can be set to a value later than
the expiry time.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.query = [
('numofmessages', _str_or_none(numofmessages)),
('visibilitytimeout', _str_or_none(visibilitytimeout))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, QueueMessagesList)
def peek_messages(self, queue_name, numofmessages=None):
'''
Retrieves one or more messages from the front of the queue, but does
not alter the visibility of the message.
queue_name: Name of the queue.
numofmessages:
Optional. A nonzero integer value that specifies the number of
messages to peek from the queue, up to a maximum of 32. By default,
a single message is peeked from the queue with this operation.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages?peekonly=true'
request.query = [('numofmessages', _str_or_none(numofmessages))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, QueueMessagesList)
def delete_message(self, queue_name, message_id, popreceipt):
'''
Deletes the specified message.
queue_name: Name of the queue.
message_id: Message to delete.
popreceipt:
Required. A valid pop receipt value returned from an earlier call
to the Get Messages or Update Message operation.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
_validate_not_none('popreceipt', popreceipt)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + \
_str(queue_name) + '/messages/' + _str(message_id) + ''
request.query = [('popreceipt', _str_or_none(popreceipt))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def clear_messages(self, queue_name):
'''
Deletes all messages from the specified queue.
queue_name: Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def update_message(self, queue_name, message_id, message_text, popreceipt,
visibilitytimeout):
'''
Updates the visibility timeout of a message. You can also use this
operation to update the contents of a message.
queue_name: Name of the queue.
message_id: Message to update.
message_text: Content of message.
popreceipt:
Required. A valid pop receipt value returned from an earlier call
to the Get Messages or Update Message operation.
visibilitytimeout:
Required. Specifies the new visibility timeout value, in seconds,
relative to server time. The new value must be larger than or equal
to 0, and cannot be larger than 7 days. The visibility timeout of a
message cannot be set to a value later than the expiry time. A
message can be updated until it has been deleted or has expired.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
_validate_not_none('message_text', message_text)
_validate_not_none('popreceipt', popreceipt)
_validate_not_none('visibilitytimeout', visibilitytimeout)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(queue_name) + '/messages/' + _str(message_id) + ''
request.query = [
('popreceipt', _str_or_none(popreceipt)),
('visibilitytimeout', _str_or_none(visibilitytimeout))
]
request.body = _get_request_body(
'<?xml version="1.0" encoding="utf-8"?> \
<QueueMessage> \
<MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \
</QueueMessage>')
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_filter(
response,
filter=['x-ms-popreceipt', 'x-ms-time-next-visible'])
def set_queue_service_properties(self, storage_service_properties,
timeout=None):
'''
Sets the properties of a storage account's Queue service, including
Windows Azure Storage Analytics.
storage_service_properties: StorageServiceProperties object.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('storage_service_properties',
storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(
_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
self._perform_request(request)
| apache-2.0 | -8,556,447,390,257,501,000 | 42.270742 | 79 | 0.615097 | false |
ThibaultReuille/graphiti | Scripts/standard.py | 1 | 11573 | from Scripts import graphiti
from Scripts import nx
import re
import fnmatch
import json
import random
def vec2_to_str(v):
return str(v[0]) + " " + str(v[1])
def vec3_to_str(v):
return str(v[0]) + " " + str(v[1]) + " " + str(v[2])
def vec4_to_str(v):
return str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + " " + str(v[3])
def random_vec4(r1, r2, r3, r4):
return [
r1[0] + random.random() * (r1[1] - r1[0]),
r2[0] + random.random() * (r2[1] - r2[0]),
r3[0] + random.random() * (r3[1] - r3[0]),
r4[0] + random.random() * (r4[1] - r4[0])
]
reserved_attributes = [ "id", "label", "src", "dst" ]
graph_attributes = [
{ 'name' : "raindance:space:title", 'type' : "string" },
]
node_attributes = [
{ 'name' : "type", 'type' : "string" },
{ 'name' : "depth", 'type' : "float" },
{ 'name' : "label", 'type' : "string"},
{ 'name' : "sgraph:score", 'type' : "float" },
{ 'name' : "sgraph:infected", 'type' : "int" },
{ 'name' : "sgraph:dga:score", 'type' : "float" },
{ 'name' : "sgraph:dga:perplexity", 'type' : "float" },
{ 'name' : "sgraph:dga:entropy", 'type' : "float" },
{ 'name' : "sgraph:first_seen", 'type' : "string" },
{ 'name' : "sgraph:last_seen", 'type' : "string" },
{ 'name' : "world:geolocation", 'type' : "vec2" },
{ 'name' : "world:country", 'type' : "string" },
{ 'name' : "og:space:position", 'type' : "vec3" },
{ 'name' : "og:space:color", 'type' : "vec4" },
{ 'name' : "og:space:locked", 'type' : "bool" },
{ 'name' : "og:space:lod", 'type' : "float" },
{ 'name' : "og:space:activity", 'type' : "float" },
{ 'name' : "og:space:mark", 'type' : "int" },
{ 'name' : "og:space:size", 'type' : "float" },
# TODO : { 'name' : "og:space:icon", 'type' : "string" },
{ 'name' : "og:network:position", 'type' : "vec3" },
{ 'name' : "og:network:color", 'type' : "vec4" },
{ 'name' : "og:network:locked", 'type' : "bool" },
{ 'name' : "og:network:lod", 'type' : "float" },
{ 'name' : "og:network:activity", 'type' : "float" },
{ 'name' : "og:network:mark", 'type' : "int" },
{ 'name' : "og:network:size", 'type' : "float" },
# TODO : { 'name' : "og:network:icon", 'type' : "string" },
{ 'name' : "og:world:geolocation", 'type' : "vec2" },
{ 'name' : "og:world:color", 'type' : "vec4" },
{ 'name' : "og:world:size", 'type' : "float" }
]
edge_attributes = [
{ 'name' : "type", 'type' : 'string' },
{ 'name' : "depth", 'type' : 'float' },
{ 'name' : "sgraph:first_seen", 'type' : 'string' },
{ 'name' : "sgraph:last_seen", 'type' : 'string' },
{ 'name' : "og:space:activity", 'type' : 'float' },
{ 'name' : "og:space:color1", 'type' : "vec4" },
{ 'name' : "og:space:color2", 'type' : "vec4" },
{ 'name' : "og:space:width", 'type' : "float" },
{ 'name' : "og:space:lod", 'type' : "float" },
# TODO : { 'name' : "og:space:icon", 'type' : "string" }
{ 'name' : "og:network:color1", 'type' : "vec4" },
{ 'name' : "og:network:color2", 'type' : "vec4" },
{ 'name' : "og:network:width", 'type' : "float" },
# TODO { 'name' : "og:network:activity", 'type' : 'float' },
# TODO { 'name' : "og:network:lod", 'type' : "float" },
# TODO : { 'name' : "og:space:icon", 'type' : "string" }
{ 'name' : "og:world:color", 'type' : "vec4" }
]
def info():
print (str(graphiti.count_nodes()) + " nodes, " + str(graphiti.count_edges()) + " edges.")
def get_attribute_info(attribute):
t = type(attribute)
if (t is bool) or (t is int) or (t is float):
return (t.__name__, str(attribute))
if t is unicode:
return ("string", attribute.encode("utf-8"))
elif t is list:
l = len(attribute)
count = sum(type(i) is float for i in attribute)
if l != count or (l == count and (l < 2 or l > 4)):
return None
return ("vec" + str(l), filter(lambda x : not(x in "[,]"), str(attribute)))
else:
return None
def prepare_node(node):
if 'label' not in node:
node['label'] = str(node['id'])
if 'depth'in node and node['depth'] == 0:
node['og:space:activity'] = 1.0
try: node['sgraph:infected'] = node['investigate']['categorization']['status']
except: pass
try: node['sgraph:dga:score'] = node['investigate']['security']['dga_score']
except: pass
return node
def prepare_edge(edge):
if 'id' not in edge:
edge['id'] = "{0} -> {1}".format(edge['src'], edge['dst'])
if "investigate" in edge:
for ee in edge['investigate']:
if 'type' in ee and ee['type'] == "co-occurrence":
edge['og:space:activity'] = 4 * ee['score']
break
return edge
def load_json(json_filename):
nodes = {}
edges = {}
global node_attributes
global edge_attributes
print ("Loading \"" + json_filename + "\" ...")
import json
with open(json_filename, "r") as infile:
data = json.load(infile)
print("Loading meta information ...")
# TODO : Find a more generic way of doing this
if "meta" in data:
if "title" in data["meta"].keys():
if data["meta"]["title"] is not None:
graphiti.set_attribute("raindance:space:title", "string", data["meta"]["title"])
if "attributes" in data:
for key in data["attributes"].keys():
if key in reserved_attributes:
continue
att_info = get_attribute_info(data["attributes"][key])
if att_info is None:
print("Error: Couldn't parse key '" + key + "' with value '" + str(data["attributes"][key]) + "'!")
continue
graphiti.set_attribute(key, att_info[0], att_info[1])
print(". Loading nodes ...")
for n in data["nodes"]:
n = prepare_node(n)
label = ""
if "label" in n:
label = n["label"].encode("utf-8")
nid = graphiti.add_node(label)
nodes[n["id"]] = nid
for key in n.keys():
if key in reserved_attributes:
continue
att_info = get_attribute_info(n[key])
if att_info is None:
print("Error: Couldn't parse key '" + key + "' with value '" + str(n[key]) + "'!")
continue
graphiti.set_node_attribute(nid, key, att_info[0], att_info[1])
print(". Loading edges ...")
for e in data["edges"]:
e = prepare_edge(e)
if "src" in e:
eid = graphiti.add_edge(nodes[e["src"]], nodes[e["dst"]])
else:
eid = graphiti.add_edge(nodes[e['source']], nodes[e['target']])
edges[e["id"]] = eid
for key in e.keys():
if key in reserved_attributes:
continue
att_info = get_attribute_info(e[key])
if att_info is None:
print("Error: Couldn't parse key '" + key + "' with value '" + str(e[key]) + "'!")
continue
graphiti.set_edge_attribute(eid, key, att_info[0], att_info[1])
if "timeline" in data:
print(". Loading timeline ...")
for c in data["timeline"]:
# TODO : Get rid of this translation phase when possible.
if c[1].startswith("graph:"):
if c[1] in ["graph:remove_node", "graph:set_node_attribute"]:
c[2]["id"] = nodes[c[2]["id"]]
elif c[1] in ["graph:remove_edge", "graph:set_edge_attribute"]:
c[2]["id"] = edges[c[2]["id"]]
elif c[1] in ["graph:add_edge"]:
c[2]["src"] = nodes[c[2]["src"]]
c[2]["dst"] = nodes[c[2]["dst"]]
graphiti.send_command(c[0], c[1], c[2])
print("Done.")
def save_json(filename):
graph = {}
graph["meta"] = dict()
graph["nodes"] = list()
graph["edges"] = list()
global node_attributes
global edge_attributes
for id in graphiti.get_node_ids():
node = dict()
node["id"] = id
node["label"] = graphiti.get_node_label(id)
for attribute in node_attributes:
name = attribute['name']
value = graphiti.get_node_attribute(id, name)
if value is None:
continue
node[name] = value
graph["nodes"].append(node)
for id in graphiti.get_edge_ids():
edge = dict()
edge["id"] = id
edge["src"] = graphiti.get_edge_node1(id)
edge["dst"] = graphiti.get_edge_node2(id)
for attribute in edge_attributes:
name = attribute['name']
value = graphiti.get_edge_attribute(id, name)
if value is None:
continue
edge[name] = value
graph["edges"].append(edge)
with open(filename, 'w') as outfile:
json.dump(graph, outfile, indent=True, sort_keys=True)
def load_nx_graph():
global node_attributes
global edge_attributes
graph = nx.Graph()
print(graphiti.get_node_ids())
for id in graphiti.get_node_ids():
graph.add_node(id)
graph.node[id]['label'] = graphiti.get_node_label(id)
for a in node_attributes:
attribute = graphiti.get_node_attribute(id, a['name'])
if not(attribute is None):
value = str(attribute)
if a['type'] == "vec2" or a['type'] == "vec3":
value = filter(lambda x: not (x in "[,]"), value)
graph.node[id][a['name']] = value
for id in graphiti.get_edge_ids():
node1 = graphiti.get_edge_node1(id)
node2 = graphiti.get_edge_node2(id)
graph.add_edge(node1, node2)
return graph
def regex_map(expression, attribute, node_flag, edge_flag, f, translate = True):
if translate:
expression = fnmatch.translate(expression)
r = re.compile(expression)
if r is None:
print("Invalid expression : <" + expression + "> !")
return
if node_flag:
for nid in graphiti.get_node_ids():
value = None
if attribute == "label":
value = graphiti.get_node_label(nid)
elif attribute == "mark":
value = graphiti.get_node_mark(nid)
elif attribute == "weight":
value = graphiti.get_node_weight(nid)
else:
value = graphiti.get_node_attribute(nid, attribute)
if value is None:
f("node", nid, None)
else:
f("node", nid, r.match(str(value)))
if edge_flag:
for eid in graphiti.get_edge_ids():
value = None
if attribute == "node1":
value = graphiti.get_edge_node1(eid)
elif attribute == "node2":
value = graphiti.get_edge_node2(eid)
else:
value = graphiti.get_edge_attribute(eid, attribute)
if value is None:
f("edge", eid, None)
else:
value_str = str(value)
f("edge", eid, r.match(value_str))
| bsd-2-clause | 698,332,109,435,509,900 | 33.443452 | 115 | 0.491057 | false |
fantuanmianshi/Daily | LeetCode/binary_tree_zigzag_level_order_traversal.py | 1 | 1312 | """
Given a binary tree, return the zigzag level order traversal of its nodes'
values. (ie, from left to right, then right to left for the next level and
alternate between).
For example:
Given binary tree {3,9,20,#,#,15,7},
3
/ \
9 20
/ \
15 7
return its zigzag level order traversal as:
[
[3],
[20,9],
[15,7]
]
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {integer[][]}
def zigzagLevelOrder(self, root):
if root is None:
return []
whole_tree = []
stack = [root]
while stack:
new_stack = []
whole_tree.append(stack[:])
while stack:
item = stack.pop(0)
if item.left is not None:
new_stack.append(item.left)
if item.right is not None:
new_stack.append(item.right)
stack = new_stack
result = []
for i, v in enumerate(whole_tree):
if i % 2 != 0:
result.append([n.val for n in v][::-1])
else:
result.append([n.val for n in v])
return result
| mit | 6,709,004,252,778,821,000 | 22.854545 | 74 | 0.505335 | false |
Ziqi-Li/bknqgis | pandas/pandas/tests/indexing/test_datetime.py | 4 | 8827 | import numpy as np
import pandas as pd
from pandas import date_range, Index, DataFrame, Series, Timestamp
from pandas.util import testing as tm
class TestDatetimeIndex(object):
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = pd.date_range('20010101', periods=4, tz='UTC')
df = pd.DataFrame({'a': np.arange(4)}, index=idx).astype('float64')
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = pd.date_range('20010101', periods=4)
df = pd.DataFrame({'a': np.arange(4)}, index=idx).astype('float64')
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
dr = date_range('20130110', periods=3)
df = DataFrame({'A': idx, 'B': dr})
df['C'] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series([Timestamp('2013-01-02 00:00:00-0500',
tz='US/Eastern'), np.nan, np.nan],
index=list('ABC'), dtype='object', name=1)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series([Timestamp('2013-01-02 00:00:00-0500',
tz='US/Eastern'), np.nan, np.nan],
index=list('ABC'), dtype='object', name=1)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({'a': date_range('2014-01-01', periods=10, tz='UTC')})
result = df.iloc[5]
expected = Timestamp('2014-01-06 00:00:00+0000', tz='UTC', freq='D')
assert result == expected
result = df.loc[5]
assert result == expected
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(data=pd.to_datetime(
['2015-03-30 20:12:32', '2015-03-12 00:11:11']), columns=['time'])
df['new_col'] = ['new', 'old']
df.time = df.set_index('time').index.tz_localize('UTC')
v = df[df.new_col == 'new'].set_index('time').index.tz_convert(
'US/Pacific')
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == 'new', 'time'] = v
expected = Series([v[0], df.loc[1, 'time']], name='time')
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == 'new', 'time'] + pd.Timedelta('1s')
df.loc[df.new_col == 'new', 'time'] = v
tm.assert_series_equal(df.loc[df.new_col == 'new', 'time'], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp('2016-03-30 14:35:25',
tz='Europe/Brussels')]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected = Timestamp('2016-03-30 14:35:25+0200',
tz='Europe/Brussels')
result = df[0][0]
assert result == expected
result = df.iloc[0, 0]
assert result == expected
result = df.loc[0, 0]
assert result == expected
result = df.iat[0, 0]
assert result == expected
result = df.at[0, 0]
assert result == expected
result = df[0].loc[0]
assert result == expected
result = df[0].at[0]
assert result == expected
def test_indexing_with_datetimeindex_tz(self):
# GH 12050
# indexing on a series with a datetimeindex with tz
index = pd.date_range('2015-01-01', periods=2, tz='utc')
ser = pd.Series(range(2), index=index,
dtype='int64')
# list-like indexing
for sel in (index, list(index)):
# getitem
tm.assert_series_equal(ser[sel], ser)
# setitem
result = ser.copy()
result[sel] = 1
expected = pd.Series(1, index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
tm.assert_series_equal(ser.loc[sel], ser)
# .loc setitem
result = ser.copy()
result.loc[sel] = 1
expected = pd.Series(1, index=index)
tm.assert_series_equal(result, expected)
# single element indexing
# getitem
assert ser[index[1]] == 1
# setitem
result = ser.copy()
result[index[1]] = 5
expected = pd.Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
assert ser.loc[index[1]] == 1
# .loc setitem
result = ser.copy()
result.loc[index[1]] = 5
expected = pd.Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
def test_partial_setting_with_datetimelike_dtype(self):
# GH9478
# a datetimeindex alignment issue with partial setting
df = pd.DataFrame(np.arange(6.).reshape(3, 2), columns=list('AB'),
index=pd.date_range('1/1/2000', periods=3,
freq='1H'))
expected = df.copy()
expected['C'] = [expected.index[0]] + [pd.NaT, pd.NaT]
mask = df.A < 1
df.loc[mask, 'C'] = df.loc[mask].index
tm.assert_frame_equal(df, expected)
def test_loc_setitem_datetime(self):
# GH 9516
dt1 = Timestamp('20130101 09:00:00')
dt2 = Timestamp('20130101 10:00:00')
for conv in [lambda x: x, lambda x: x.to_datetime64(),
lambda x: x.to_pydatetime(), lambda x: np.datetime64(x)]:
df = pd.DataFrame()
df.loc[conv(dt1), 'one'] = 100
df.loc[conv(dt2), 'one'] = 200
expected = DataFrame({'one': [100.0, 200.0]}, index=[dt1, dt2])
tm.assert_frame_equal(df, expected)
def test_series_partial_set_datetime(self):
# GH 11497
idx = date_range('2011-01-01', '2011-01-02', freq='D', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
result = ser.loc[[Timestamp('2011-01-01'), Timestamp('2011-01-02')]]
exp = Series([0.1, 0.2], index=idx, name='s')
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [Timestamp('2011-01-02'), Timestamp('2011-01-02'),
Timestamp('2011-01-01')]
exp = Series([0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name='idx'),
name='s')
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [Timestamp('2011-01-03'), Timestamp('2011-01-02'),
Timestamp('2011-01-03')]
exp = Series([np.nan, 0.2, np.nan],
index=pd.DatetimeIndex(keys, name='idx'), name='s')
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
def test_series_partial_set_period(self):
# GH 11497
idx = pd.period_range('2011-01-01', '2011-01-02', freq='D', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
result = ser.loc[[pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-02', freq='D')]]
exp = Series([0.1, 0.2], index=idx, name='s')
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [pd.Period('2011-01-02', freq='D'),
pd.Period('2011-01-02', freq='D'),
pd.Period('2011-01-01', freq='D')]
exp = Series([0.2, 0.2, 0.1], index=pd.PeriodIndex(keys, name='idx'),
name='s')
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [pd.Period('2011-01-03', freq='D'),
pd.Period('2011-01-02', freq='D'),
pd.Period('2011-01-03', freq='D')]
exp = Series([np.nan, 0.2, np.nan],
index=pd.PeriodIndex(keys, name='idx'), name='s')
result = ser.loc[keys]
tm.assert_series_equal(result, exp)
| gpl-2.0 | -7,010,948,482,511,775,000 | 34.027778 | 79 | 0.529738 | false |
jjhuff/fcc-comments | lib/nltk/tokenize/texttiling.py | 5 | 16782 | # Natural Language Toolkit: TextTiling
#
# Copyright (C) 2001-2012 NLTK Project
# Author: George Boutsioukis
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import re
import math
import numpy
from nltk.tokenize.api import TokenizerI
BLOCK_COMPARISON, VOCABULARY_INTRODUCTION = range(2)
LC, HC = range(2)
DEFAULT_SMOOTHING = range(1)
class TextTilingTokenizer(TokenizerI):
"""Tokenize a document into topical sections using the TextTiling algorithm.
This algorithm detects subtopic shifts based on the analysis of lexical
co-occurrence patterns.
The process starts by tokenizing the text into pseudosentences of
a fixed size w. Then, depending on the method used, similarity
scores are assigned at sentence gaps. The algorithm proceeds by
detecting the peak differences between these scores and marking
them as boundaries. The boundaries are normalized to the closest
paragraph break and the segmented text is returned.
:param w: Pseudosentence size
:type w: int
:param k: Size (in sentences) of the block used in the block comparison method
:type k: int
:param similarity_method: The method used for determining similarity scores:
`BLOCK_COMPARISON` (default) or `VOCABULARY_INTRODUCTION`.
:type similarity_method: constant
:param stopwords: A list of stopwords that are filtered out (defaults to NLTK's stopwords corpus)
:type stopwords: list(str)
:param smoothing_method: The method used for smoothing the score plot:
`DEFAULT_SMOOTHING` (default)
:type smoothing_method: constant
:param smoothing_width: The width of the window used by the smoothing method
:type smoothing_width: int
:param smoothing_rounds: The number of smoothing passes
:type smoothing_rounds: int
:param cutoff_policy: The policy used to determine the number of boundaries:
`HC` (default) or `LC`
:type cutoff_policy: constant
"""
def __init__(self,
w=20,
k=10,
similarity_method=BLOCK_COMPARISON,
stopwords=None,
smoothing_method=DEFAULT_SMOOTHING,
smoothing_width=2,
smoothing_rounds=1,
cutoff_policy=HC,
demo_mode=False):
if stopwords is None:
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
self.__dict__.update(locals())
del self.__dict__['self']
def tokenize(self, text):
"""Return a tokenized copy of *text*, where each "token" represents
a separate topic."""
lowercase_text = text.lower()
paragraph_breaks = self._mark_paragraph_breaks(text)
text_length = len(lowercase_text)
# Tokenization step starts here
# Remove punctuation
nopunct_text = ''.join([c for c in lowercase_text
if re.match("[a-z\-\' \n\t]", c)])
nopunct_par_breaks = self._mark_paragraph_breaks(nopunct_text)
tokseqs = self._divide_to_tokensequences(nopunct_text)
# The morphological stemming step mentioned in the TextTile
# paper is not implemented. A comment in the original C
# implementation states that it offers no benefit to the
# process. It might be interesting to test the existing
# stemmers though.
#words = _stem_words(words)
# Filter stopwords
for ts in tokseqs:
ts.wrdindex_list = filter(lambda wi: wi[0] not in self.stopwords,
ts.wrdindex_list)
token_table = self._create_token_table(tokseqs, nopunct_par_breaks)
# End of the Tokenization step
# Lexical score determination
if self.similarity_method == BLOCK_COMPARISON:
gap_scores = self._block_comparison(tokseqs, token_table)
elif self.similarity_method == VOCABULARY_INTRODUCTION:
raise NotImplementedError("Vocabulary introduction not implemented")
if self.smoothing_method == DEFAULT_SMOOTHING:
smooth_scores = self._smooth_scores(gap_scores)
# End of Lexical score Determination
# Boundary identification
depth_scores = self._depth_scores(smooth_scores)
segment_boundaries = self._identify_boundaries(depth_scores)
normalized_boundaries = self._normalize_boundaries(text,
segment_boundaries,
paragraph_breaks)
# End of Boundary Identification
segmented_text = []
prevb = 0
for b in normalized_boundaries:
if b == 0:
continue
segmented_text.append(text[prevb:b])
prevb = b
if prevb < text_length: # append any text that may be remaining
segmented_text.append(text[prevb:])
if not segmented_text:
segmented_text = [text]
if self.demo_mode:
return gap_scores, smooth_scores, depth_scores, segment_boundaries
return segmented_text
def _block_comparison(self, tokseqs, token_table):
"Implements the block comparison method"
def blk_frq(tok, block):
ts_occs = filter(lambda o: o[0] in block,
token_table[tok].ts_occurences)
freq = sum([tsocc[1] for tsocc in ts_occs])
return freq
gap_scores = []
numgaps = len(tokseqs)-1
for curr_gap in range(numgaps):
score_dividend, score_divisor_b1, score_divisor_b2 = 0.0, 0.0, 0.0
score = 0.0
#adjust window size for boundary conditions
if curr_gap < self.k-1:
window_size = curr_gap + 1
elif curr_gap > numgaps-self.k:
window_size = numgaps - curr_gap
else:
window_size = self.k
b1 = [ts.index
for ts in tokseqs[curr_gap-window_size+1 : curr_gap+1]]
b2 = [ts.index
for ts in tokseqs[curr_gap+1 : curr_gap+window_size+1]]
for t in token_table:
score_dividend += blk_frq(t, b1)*blk_frq(t, b2)
score_divisor_b1 += blk_frq(t, b1)**2
score_divisor_b2 += blk_frq(t, b2)**2
try:
score = score_dividend/math.sqrt(score_divisor_b1*
score_divisor_b2)
except ZeroDivisionError:
pass # score += 0.0
gap_scores.append(score)
return gap_scores
def _smooth_scores(self, gap_scores):
"Wraps the smooth function from the SciPy Cookbook"
return list(smooth(numpy.array(gap_scores[:]),
window_len = self.smoothing_width+1))
def _mark_paragraph_breaks(self, text):
"""Identifies indented text or line breaks as the beginning of
paragraphs"""
MIN_PARAGRAPH = 100
pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*")
matches = pattern.finditer(text)
last_break = 0
pbreaks = [0]
for pb in matches:
if pb.start()-last_break < MIN_PARAGRAPH:
continue
else:
pbreaks.append(pb.start())
last_break = pb.start()
return pbreaks
def _divide_to_tokensequences(self, text):
"Divides the text into pseudosentences of fixed size"
w = self.w
wrdindex_list = []
matches = re.finditer("\w+", text)
for match in matches:
wrdindex_list.append((match.group(), match.start()))
return [TokenSequence(i/w, wrdindex_list[i:i+w])
for i in range(0, len(wrdindex_list), w)]
def _create_token_table(self, token_sequences, par_breaks):
"Creates a table of TokenTableFields"
token_table = {}
current_par = 0
current_tok_seq = 0
pb_iter = par_breaks.__iter__()
current_par_break = pb_iter.next()
if current_par_break == 0:
try:
current_par_break = pb_iter.next() #skip break at 0
except StopIteration:
raise ValueError(
"No paragraph breaks were found(text too short perhaps?)"
)
for ts in token_sequences:
for word, index in ts.wrdindex_list:
try:
while index > current_par_break:
current_par_break = pb_iter.next()
current_par += 1
except StopIteration:
#hit bottom
pass
if word in token_table:
token_table[word].total_count += 1
if token_table[word].last_par != current_par:
token_table[word].last_par = current_par
token_table[word].par_count += 1
if token_table[word].last_tok_seq != current_tok_seq:
token_table[word].last_tok_seq = current_tok_seq
token_table[word]\
.ts_occurences.append([current_tok_seq,1])
else:
token_table[word].ts_occurences[-1][1] += 1
else: #new word
token_table[word] = TokenTableField(first_pos=index,
ts_occurences= \
[[current_tok_seq,1]],
total_count=1,
par_count=1,
last_par=current_par,
last_tok_seq= \
current_tok_seq)
current_tok_seq += 1
return token_table
def _identify_boundaries(self, depth_scores):
"""Identifies boundaries at the peaks of similarity score
differences"""
boundaries = [0 for x in depth_scores]
avg = sum(depth_scores)/len(depth_scores)
numpy.stdev = numpy.std(depth_scores)
#SB: what is the purpose of this conditional?
if self.cutoff_policy == LC:
cutoff = avg-numpy.stdev/2.0
else:
cutoff = avg-numpy.stdev/2.0
depth_tuples = zip(depth_scores, range(len(depth_scores)))
depth_tuples.sort()
depth_tuples.reverse()
hp = filter(lambda x:x[0]>cutoff, depth_tuples)
for dt in hp:
boundaries[dt[1]] = 1
for dt2 in hp: #undo if there is a boundary close already
if dt[1] != dt2[1] and abs(dt2[1]-dt[1]) < 4 \
and boundaries[dt2[1]] == 1:
boundaries[dt[1]] = 0
return boundaries
def _depth_scores(self, scores):
"""Calculates the depth of each gap, i.e. the average difference
between the left and right peaks and the gap's score"""
depth_scores = [0 for x in scores]
#clip boundaries: this holds on the rule of thumb(my thumb)
#that a section shouldn't be smaller than at least 2
#pseudosentences for small texts and around 5 for larger ones.
clip = min(max(len(scores)/10, 2), 5)
index = clip
# SB: next three lines are redundant as depth_scores is already full of zeros
for i in range(clip):
depth_scores[i] = 0
depth_scores[-i-1] = 0
for gapscore in scores[clip:-clip]:
lpeak = gapscore
for score in scores[index::-1]:
if score >= lpeak:
lpeak = score
else:
break
rpeak = gapscore
for score in scores[:index:]:
if score >= rpeak:
rpeak=score
else:
break
depth_scores[index] = lpeak + rpeak - 2*gapscore
index += 1
return depth_scores
def _normalize_boundaries(self, text, boundaries, paragraph_breaks):
"""Normalize the boundaries identified to the original text's
paragraph breaks"""
norm_boundaries = []
char_count, word_count, gaps_seen = 0, 0, 0
seen_word = False
for char in text:
char_count += 1
if char in " \t\n" and seen_word:
seen_word = False
word_count += 1
if char not in " \t\n" and not seen_word:
seen_word=True
if gaps_seen < len(boundaries) and word_count > \
(max(gaps_seen*self.w, self.w)):
if boundaries[gaps_seen] == 1:
#find closest paragraph break
best_fit = len(text)
for br in paragraph_breaks:
if best_fit > abs(br-char_count):
best_fit = abs(br-char_count)
bestbr = br
else:
break
if bestbr not in norm_boundaries: #avoid duplicates
norm_boundaries.append(bestbr)
gaps_seen += 1
return norm_boundaries
class TokenTableField(object):
"""A field in the token table holding parameters for each token,
used later in the process"""
def __init__(self,
first_pos,
ts_occurences,
total_count=1,
par_count=1,
last_par=0,
last_tok_seq=None):
self.__dict__.update(locals())
del self.__dict__['self']
class TokenSequence(object):
"A token list with its original length and its index"
def __init__(self,
index,
wrdindex_list,
original_length=None):
original_length=original_length or len(wrdindex_list)
self.__dict__.update(locals())
del self.__dict__['self']
#Pasted from the SciPy cookbook: http://www.scipy.org/Cookbook/SignalSmooth
def smooth(x,window_len=11,window='flat'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the beginning and end part of the output signal.
:param x: the input signal
:param window_len: the dimension of the smoothing window; should be an odd integer
:param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
:return: the smoothed signal
example::
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
:see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve,
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=numpy.ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(w/w.sum(),s,mode='same')
return y[window_len-1:-window_len+1]
def demo(text=None):
from nltk.corpus import brown
import pylab
tt=TextTilingTokenizer(demo_mode=True)
if text is None: text=brown.raw()[:10000]
s,ss,d,b=tt.tokenize(text)
pylab.xlabel("Sentence Gap index")
pylab.ylabel("Gap Scores")
pylab.plot(range(len(s)), s, label="Gap Scores")
pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
pylab.plot(range(len(d)), d, label="Depth scores")
pylab.stem(range(len(b)),b)
pylab.legend()
pylab.show()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| apache-2.0 | -4,244,034,159,465,340,400 | 35.802632 | 101 | 0.554821 | false |
wileeam/airflow | airflow/providers/email/operators/email.py | 4 | 2941 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, Optional, Union
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.email import send_email
class EmailOperator(BaseOperator):
"""
Sends an email.
:param to: list of emails to send the email to. (templated)
:type to: list or string (comma or semicolon delimited)
:param subject: subject line for the email. (templated)
:type subject: str
:param html_content: content of the email, html markup
is allowed. (templated)
:type html_content: str
:param files: file names to attach in email
:type files: list
:param cc: list of recipients to be added in CC field
:type cc: list or string (comma or semicolon delimited)
:param bcc: list of recipients to be added in BCC field
:type bcc: list or string (comma or semicolon delimited)
:param mime_subtype: MIME sub content type
:type mime_subtype: str
:param mime_charset: character set parameter added to the Content-Type
header.
:type mime_charset: str
"""
template_fields = ('to', 'subject', 'html_content')
template_ext = ('.html',)
ui_color = '#e6faf9'
@apply_defaults
def __init__(
self,
to: Union[List[str], str],
subject: str,
html_content: str,
files: Optional[List] = None,
cc: Optional[Union[List[str], str]] = None,
bcc: Optional[Union[List[str], str]] = None,
mime_subtype: str = 'mixed',
mime_charset: str = 'utf-8',
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.to = to
self.subject = subject
self.html_content = html_content
self.files = files or []
self.cc = cc
self.bcc = bcc
self.mime_subtype = mime_subtype
self.mime_charset = mime_charset
def execute(self, context):
send_email(self.to, self.subject, self.html_content,
files=self.files, cc=self.cc, bcc=self.bcc,
mime_subtype=self.mime_subtype, mime_charset=self.mime_charset)
| apache-2.0 | 2,045,310,632,522,141,700 | 36.705128 | 82 | 0.657599 | false |
s-gv/rnicu-webapp | stream-plot/src/galry/galrywidget.py | 2 | 35392 | import sys
import os
import re
import time
import timeit
import numpy as np
import numpy.random as rdn
from .qtools.qtpy import QtCore, QtGui
from .qtools.qtpy.QtCore import Qt, pyqtSignal
from galry import DEBUG, log_debug, log_info, log_warn
try:
from .qtools.qtpy.QtOpenGL import QGLWidget, QGLFormat
except Exception as e:
log_warn((("The Qt-OpenGL bindings are not available. "
"On Ubuntu, please install python-qt4-gl (PyQt4) or "
"python-pyside.qtopengl (PySide). "
"Original exception was: %s" % e)))
# mock QGLWidget
class QGLWidget(QtGui.QWidget):
def initializeGL(self):
pass
def paintGL(self):
pass
def updateGL(self):
pass
def resizeGL(self):
pass
QGLFormat = None
from galry import get_cursor, FpsCounter, show_window, PaintManager, \
InteractionManager, BindingManager, \
UserActionGenerator, PlotBindings, Bindings, FpsCounter, \
show_window, get_icon
__all__ = [
'GalryWidget',
'GalryTimerWidget',
'AutodestructibleWindow',
'create_custom_widget',
'create_basic_window',
'show_basic_window',
]
# # DEBUG: raise errors if Numpy arrays are unnecessarily copied
# from OpenGL.arrays import numpymodule
# try:
# numpymodule.NumpyHandler.ERROR_ON_COPY = True
# except Exception as e:
# print "WARNING: unable to set the Numpy-OpenGL copy warning"
# Set to True or to a number of milliseconds to have all windows automatically
# killed after a fixed time. It is useful for automatic debugging or
# benchmarking.
AUTODESTRUCT = False
DEFAULT_AUTODESTRUCT = 1000
# Display the FPS or not.
DISPLAY_FPS = DEBUG == True
# Default manager classes.
DEFAULT_MANAGERS = dict(
paint_manager=PaintManager,
binding_manager=BindingManager,
interaction_manager=InteractionManager,
)
# Main Galry class.
class GalryWidget(QGLWidget):
"""Efficient interactive 2D visualization widget.
This QT widget is based on OpenGL and depends on both PyQT (or PySide)
and PyOpenGL. It implements low-level mechanisms for interaction processing
and acts as a glue between the different managers (PaintManager,
BindingManager, InteractionManager).
"""
w = 600.
h = 600.
# Initialization methods
# ----------------------
def __init__(self, format=None, autosave=None, getfocus=True, **kwargs):
"""Constructor. Call `initialize` and initialize the companion classes
as well."""
if format is not None:
super(GalryWidget, self).__init__(format)
else:
super(GalryWidget, self).__init__()
self.initialized = False
self.just_initialized = False
self.i = 0
# background color as a 4-tuple (R,G,B,A)
self.bgcolor = (0, 0, 0, 0)
self.autosave = None
# default window size
# self.width, self.height = 600, 600
# FPS counter, used for debugging
self.fps_counter = FpsCounter()
self.display_fps = DISPLAY_FPS
self.activate3D = None
# widget creation parameters
self.bindings = None
self.companion_classes_initialized = False
# constrain width/height ratio when resizing of zooming
self.constrain_ratio = False
self.constrain_navigation = False
self.activate_help = True
self.activate_grid = False
self.block_refresh = False
# Capture keyboard events.
if getfocus:
self.setFocusPolicy(Qt.WheelFocus)
# Capture mouse events.
self.setMouseTracking(True)
# Capture touch events.
self.setAcceptTouchEvents = True
self.grabGesture(QtCore.Qt.PinchGesture)
self.mouse_blocked = False # True during a pinch gesture
# Initialize the objects providing the core features of the widget.
self.user_action_generator = UserActionGenerator()
self.is_fullscreen = False
self.events_to_signals = {}
# keyword arguments without "_manager" => passed to initialize
self.initialize(**kwargs)
# initialize companion classes if it has not been done in initialize
if not self.companion_classes_initialized:
self.initialize_companion_classes()
self.initialize_bindings()
# update rendering options
self.paint_manager.set_rendering_options(
activate3D=self.activate3D,
constrain_ratio=self.constrain_ratio,
)
self.autosave = autosave
def set_bindings(self, *bindings):
"""Set the interaction mode by specifying the binding object.
Several binding objects can be given for the binding manager, such that
the first one is the currently active one.
Arguments:
* bindings: a list of classes instances deriving from
Bindings.
"""
bindings = list(bindings)
if not bindings:
bindings = [PlotBindings()]
# if type(bindings) is not list and type(bindings) is not tuple:
# bindings = [bindings]
# if binding is a class, try instanciating it
for i in range(len(bindings)):
if not isinstance(bindings[i], Bindings):
bindings[i] = bindings[i]()
self.bindings = bindings
def set_companion_classes(self, **kwargs):
"""Set specified companion classes, unspecified ones are set to
default classes.
Arguments:
* **kwargs: the naming convention is: `paint_manager=PaintManager`.
The key `paint_manager` is the name the manager is accessed from
this widget and from all other companion classes. The value
is the name of the class, it should end with `Manager`.
"""
if not hasattr(self, "companion_classes"):
self.companion_classes = {}
self.companion_classes.update(kwargs)
# default companion classes
self.companion_classes.update([(k,v) for k,v in \
list(DEFAULT_MANAGERS.items()) if k not in self.companion_classes])
def initialize_bindings(self):
"""Initialize the interaction bindings."""
if self.bindings is None:
self.set_bindings()
self.binding_manager.add(*self.bindings)
def initialize_companion_classes(self):
"""Initialize companion classes."""
# default companion classes
if not getattr(self, "companion_classes", None):
self.set_companion_classes()
# create the managers
for key, val in list(self.companion_classes.items()):
log_debug("Initializing '%s'" % key)
obj = val(self)
setattr(self, key, obj)
# link all managers
for key, val in list(self.companion_classes.items()):
for child_key, child_val in list(self.companion_classes.items()):
# no self-reference
if child_key == key:
continue
obj = getattr(self, key)
setattr(obj, child_key, getattr(self, child_key))
self.interaction_manager.constrain_navigation = self.constrain_navigation
self.companion_classes_initialized = True
def initialize(self, **kwargs):
"""Initialize the widget.
Parameters such as bindings, companion_classes can be
set here, by overriding this method. If initializations must be done
after companion classes instanciation, then
self.initialize_companion_classes can be called here.
Otherwise, it will be called automatically after initialize().
"""
pass
def clear(self):
"""Clear the view."""
self.paint_manager.reset()
def reinit(self):
"""Reinitialize OpenGL.
The clear method should be called before.
"""
self.initializeGL()
self.resizeGL(self.w, self.h)
self.updateGL()
# OpenGL widget methods
# ---------------------
def initializeGL(self):
"""Initialize OpenGL parameters."""
self.paint_manager.initializeGL()
self.initialized = True
self.just_initialized = True
def paintGL(self):
"""Paint the scene.
Called as soon as the window needs to be painted (e.g. call to
`updateGL()`).
This method calls the `paint_all` method of the PaintManager.
"""
if self.just_initialized:
self.process_interaction('Initialize', do_update=False)
# paint fps
if self.display_fps:
self.paint_fps()
# paint everything
self.paint_manager.paintGL()
# compute FPS
self.fps_counter.tick()
if self.autosave:
if '%' in self.autosave:
autosave = self.autosave % self.i
else:
autosave = self.autosave
self.save_image(autosave, update=False)
self.just_initialized = False
self.i += 1
def paint_fps(self):
"""Display the FPS on the top-left of the screen."""
self.paint_manager.update_fps(int(self.fps_counter.get_fps()))
def resizeGL(self, width, height):
self.w, self.h = width, height
self.paint_manager.resizeGL(width, height)
def sizeHint(self):
return QtCore.QSize(self.w, self.h)
# Event methods
# -------------
def event(self, e):
r = super(GalryWidget, self).event(e)
if e.type() == QtCore.QEvent.Gesture:
e.accept()
gesture = e.gesture(QtCore.Qt.PinchGesture)
self.pinchEvent(gesture)
if gesture.state() == Qt.GestureStarted:
self.mouse_blocked = True
elif gesture.state() == Qt.GestureFinished:
self.mouse_blocked = False
return False
return r
def pinchEvent(self, e):
self.user_action_generator.pinchEvent(e)
self.process_interaction()
def mousePressEvent(self, e):
if self.mouse_blocked:
return
self.user_action_generator.mousePressEvent(e)
self.process_interaction()
def mouseReleaseEvent(self, e):
if self.mouse_blocked:
return
self.user_action_generator.mouseReleaseEvent(e)
self.process_interaction()
def mouseDoubleClickEvent(self, e):
if self.mouse_blocked:
return
self.user_action_generator.mouseDoubleClickEvent(e)
self.process_interaction()
def mouseMoveEvent(self, e):
if self.mouse_blocked:
return
self.user_action_generator.mouseMoveEvent(e)
self.process_interaction()
def keyPressEvent(self, e):
self.user_action_generator.keyPressEvent(e)
self.process_interaction()
# Close the application when pressing Q
if e.key() == QtCore.Qt.Key_Q:
if hasattr(self, 'window'):
self.close_widget()
def keyReleaseEvent(self, e):
self.user_action_generator.keyReleaseEvent(e)
def wheelEvent(self, e):
self.user_action_generator.wheelEvent(e)
self.process_interaction()
def reset_action_generator(self):
self.user_action_generator.reset()
# Normalization methods
# ---------------------
def normalize_position(self, x, y):
"""Window coordinates ==> world coordinates."""
if not hasattr(self.paint_manager, 'renderer'):
return (0, 0)
vx, vy = self.paint_manager.renderer.viewport
x = -vx + 2 * vx * x / float(self.w)
y = -(-vy + 2 * vy * y / float(self.h))
return x, y
def normalize_diff_position(self, x, y):
"""Normalize the coordinates of a difference vector between two
points.
"""
if not hasattr(self.paint_manager, 'renderer'):
return (0, 0)
vx, vy = self.paint_manager.renderer.viewport
x = 2 * vx * x/float(self.w)
y = -2 * vy * y/float(self.h)
return x, y
def normalize_action_parameters(self, parameters):
"""Normalize points in the action parameters object in the window
coordinate system.
Arguments:
* parameters: the action parameters object, containing all
variables related to user actions.
Returns:
* parameters: the updated parameters object with normalized
coordinates.
"""
parameters["mouse_position"] = self.normalize_position(\
*parameters["mouse_position"])
parameters["mouse_position_diff"] = self.normalize_diff_position(\
*parameters["mouse_position_diff"])
parameters["mouse_press_position"] = self.normalize_position(\
*parameters["mouse_press_position"])
parameters["pinch_position"] = self.normalize_position(\
*parameters["pinch_position"])
parameters["pinch_start_position"] = self.normalize_position(\
*parameters["pinch_start_position"])
return parameters
# Signal methods
# --------------
def connect_events(self, arg1, arg2):
"""Makes a connection between a QT signal and an interaction event.
The signal parameters must correspond to the event parameters.
Arguments:
* arg1: a QT bound signal or an interaction event.
* arg2: an interaction event or a QT bound signal.
"""
if type(arg1) == str:
self.connect_event_to_signal(arg1, arg2)
elif type(arg2) == str:
self.connect_signal_to_event(arg1, arg2)
def connect_signal_to_event(self, signal, event):
"""Connect a QT signal to an interaction event.
The event parameters correspond to the signal parameters.
Arguments:
* signal: a QT signal.
* event: an InteractionEvent string.
"""
if signal is None:
raise Exception("The signal %s is not defined" % signal)
slot = lambda *args, **kwargs: \
self.process_interaction(event, args, **kwargs)
signal.connect(slot)
def connect_event_to_signal(self, event, signal):
"""Connect an interaction event to a QT signal.
The event parameters correspond to the signal parameters.
Arguments:
* event: an InteractionEvent string.
* signal: a QT signal.
"""
self.events_to_signals[event] = signal
# Binding mode methods
# --------------------
def switch_interaction_mode(self):
"""Switch the interaction mode."""
binding = self.binding_manager.switch()
# set base cursor
# self.interaction_manager.base_cursor = binding.base_cursor
return binding
def set_interaction_mode(self, mode):
"""Set the interaction mode.
Arguments:
* mode: either a class deriving from `Bindings` and which has been
specified in `set_bindings`, or directly a `Bindings` instance.
"""
binding = self.binding_manager.set(mode)
# set base cursor
# self.interaction_manager.base_cursor = binding.base_cursor
return binding
# Interaction methods
# -------------------
def get_current_action(self):
"""Return the current user action with the action parameters."""
# get current action
action = self.user_action_generator.action
# get current key if the action was KeyPress
key = self.user_action_generator.key
# get key modifier
key_modifier = self.user_action_generator.key_modifier
# retrieve action parameters and normalize using the window size
parameters = self.normalize_action_parameters(
self.user_action_generator.get_action_parameters())
return action, key, key_modifier, parameters
def get_current_event(self):
"""Return the current interaction event corresponding to the current
user action."""
# get the current interaction mode
binding = self.binding_manager.get()
# get current user action
action, key, key_modifier, parameters = self.get_current_action()
# get the associated interaction event
event, param_getter = binding.get(action, key=key,
key_modifier=key_modifier)
# get the parameter object by calling the param getter
if param_getter is not None and parameters is not None:
args = param_getter(parameters)
else:
args = None
return event, args
def set_current_cursor(self):
cursor = self.interaction_manager.get_cursor()
# if no cursor set, then use the default one in the current binding
# mode
if cursor is None:
cursor = self.binding_manager.get().get_base_cursor()
qcursor = get_cursor(cursor)
if qcursor:
self.setCursor(qcursor)
def process_interaction(self, event=None, args=None, do_update=None):
"""Process user interaction.
This method is called after each user action (mouse, keyboard...).
It finds the right action associated to the command, then the event
associated to that action.
Arguments:
* event=None: if None, the current event associated to the current
user action is retrieved. Otherwise, an event can be directly
passed here to force the trigger of any interaction event.
* args=None: the arguments of the event if event is not None.
"""
if event is None:
# get current event from current user action
event, args = self.get_current_event()
if event == 'Animate' and self.block_refresh:
return
prev_event = self.interaction_manager.prev_event
# handle interaction mode change
if event == 'SwitchInteractionMode':
binding = self.switch_interaction_mode()
log_debug("Switching interaction mode to %s." % \
binding.__class__.__name__)
# process the interaction event
self.interaction_manager.process_event(event, args)
# raise a signal if there is one associated to the current event
if event in self.events_to_signals:
self.events_to_signals[event].emit(*args)
# set cursor
self.set_current_cursor()
# clean current action (unique usage)
self.user_action_generator.clean_action()
# update the OpenGL view
if do_update is None:
do_update = (
# (not isinstance(self, GalryTimerWidget)) and
(event is not None or prev_event is not None))
if do_update:
self.updateGL()
# Miscellaneous
# -------------
def save_image(self, file=None, update=True):
"""Save a screenshot of the widget in the specified file."""
if file is None:
file = "image.png"
if update:
self.updateGL()
image = self.grabFrameBuffer()
image.save(file, "PNG")
def toggle_fullscreen(self):
self.is_fullscreen = not self.is_fullscreen
if self.is_fullscreen:
if hasattr(self.window, 'showFullScreen'):
self.window.showFullScreen()
else:
if hasattr(self.window, 'showNormal'):
self.window.showNormal()
def close_widget(self):
self.user_action_generator.close()
if hasattr(self, 'window'):
if hasattr(self.window, 'close'):
self.window.close()
# Focus methods
# -------------
def focusOutEvent(self, event):
self.user_action_generator.focusOutEvent(event)
class GalryTimerWidget(GalryWidget):
timer = None
"""Special widget with periodic timer used to update the scene at
regular intervals."""
def initialize_timer(self, dt=1.):
"""Initialize the timer.
This method *must* be called in the `initialize` method of the widget.
Arguments:
* dt=1.: the timer interval in seconds.
"""
self.t = 0.
self.dt = dt
# start simulation after initialization completes
self.timer = QtCore.QTimer()
self.timer.setInterval(dt * 1000)
self.timer.timeout.connect(self.update_callback)
self.paint_manager.t = self.t
def update_callback(self):
"""Callback function for the timer.
Calls `paint_manager.update_callback`, so this latter method should be
implemented in the paint manager. The attribute `self.t` is
available here and in the paint manager.
"""
self.t = timeit.default_timer() - self.t0
self.process_interaction('Animate', (self.t,))
def start_timer(self):
"""Start the timer."""
if self.timer:
self.t0 = timeit.default_timer()
self.timer.start()
def stop_timer(self):
"""Stop the timer."""
if self.timer:
self.timer.stop()
def showEvent(self, e):
"""Called when the window is shown (for the first time or after
minimization). It starts the timer."""
# start simulation when showing window
self.start_timer()
def hideEvent(self, e):
"""Called when the window is hidden (e.g. minimized). It stops the
timer."""
# stop simulation when hiding window
self.stop_timer()
# Basic widgets helper functions and classes
# ------------------------------------------
def create_custom_widget(bindings=None,
antialiasing=False,
constrain_ratio=False,
constrain_navigation=False,
activate_help=True,
activate_grid=False,
show_grid=False,
display_fps=False,
activate3D=False,
animation_interval=.01,
autosave=None,
getfocus=True,
**companion_classes):
"""Helper function to create a custom widget class from various parameters.
Arguments:
* bindings=None: the bindings class, instance, or a list of those.
* antialiasing=False: whether to activate antialiasing or not. It can
hurt performance.
* constrain_ratio=False: if True, the ratio is 1:1 at all times.
* constrain_navigation=True: if True, the viewbox cannot be greater
than [-1,1]^2 by default (but it can be customized in
interactionmanager.MAX_VIEWBOX).
* display_fps=False: whether to display the FPS.
* animation_interval=None: if not None, a special widget with automatic
timer update is created. This variable then refers to the time interval
between two successive updates (in seconds).
* **companion_classes: keyword arguments with the companion classes.
"""
# use the GalryTimerWidget if animation_interval is not None
if animation_interval is not None:
baseclass = GalryTimerWidget
else:
baseclass = GalryWidget
if bindings is None:
bindings = []
if type(bindings) != list and type(bindings) != tuple:
bindings = [bindings]
# create the custom widget class
class MyWidget(baseclass):
"""Automatically-created Galry widget."""
def __init__(self):
# antialiasing
if QGLFormat is not None:
format = QGLFormat()
else:
format = None
if antialiasing:
if hasattr(format, 'setSampleBuffers'):
format.setSampleBuffers(True)
super(MyWidget, self).__init__(format=format, autosave=autosave,
getfocus=getfocus)
def initialize(self):
self.set_bindings(*bindings)
self.set_companion_classes(**companion_classes)
self.constrain_ratio = constrain_ratio
self.constrain_navigation = constrain_navigation
self.activate_help = activate_help
self.activate_grid = activate_grid
self.show_grid = show_grid
self.activate3D = activate3D
self.display_fps = display_fps
self.initialize_companion_classes()
if animation_interval is not None:
self.initialize_timer(dt=animation_interval)
return MyWidget
class AutodestructibleWindow(QtGui.QMainWindow):
"""Special QT window that can be destroyed automatically after a given
timeout. Useful for automatic debugging or benchmarking."""
autodestruct = None
def __init__(self, **kwargs):
super(AutodestructibleWindow, self).__init__()
# This is important in interaction sessions: it allows the widget
# to clean everything up as soon as we close the window (otherwise
# it is just hidden).
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.initialize(**kwargs)
def set_autodestruct(self, autodestruct=None):
# by default, use global variable
if autodestruct is None:
# use the autodestruct option in command line by default
autodestruct = "autodestruct" in sys.argv
if autodestruct is False:
global AUTODESTRUCT
autodestruct = AUTODESTRUCT
# option for autodestructing the window after a fixed number of
# seconds: useful for automatic testing
if autodestruct is True:
# 3 seconds by default, if True
global DEFAULT_AUTODESTRUCT
autodestruct = DEFAULT_AUTODESTRUCT
if autodestruct:
log_info("window autodestruction in %d second(s)" % (autodestruct / 1000.))
self.autodestruct = autodestruct
def initialize(self, **kwargs):
pass
def kill(self):
if self.autodestruct:
self.timer.stop()
self.close()
def showEvent(self, e):
if self.autodestruct:
self.timer = QtCore.QTimer()
self.timer.setInterval(self.autodestruct)
self.timer.setSingleShot(True)
self.timer.timeout.connect(self.kill)
self.timer.start()
def create_basic_window(widget=None, size=None, position=(20, 20),
autodestruct=None,
toolbar=False):
"""Create a basic QT window with a Galry widget inside.
Arguments:
* widget: a class or instance deriving from GalryWidget.
* size=None: the size of the window as a tuple (width, height).
* position=(100, 100): the initial position of the window on the screen,
in pixels (x, y).
* autodestruct=None: if not None, it is the time, in seconds, before the
window closes itself.
"""
class BasicWindow(AutodestructibleWindow):
"""Automatically-created QT window."""
def initialize(self, widget=widget, size=size, position=position,
autodestruct=autodestruct):
"""Create a basic window to display a single widget.
Arguments:
* widget: a class or instance deriving from GalryWidget.
* size=None: the size of the window as a tuple (width, height).
* position=(100, 100): the initial position of the window on the screen,
in pixels (x, y).
* autodestruct=None: if not None, it is the time, in seconds, before the
window closes itself.
"""
self.set_autodestruct(autodestruct)
# default widget
if widget is None:
widget = GalryWidget()
# if widget is not an instance of GalryWidget, maybe it's a class,
# then try to instanciate it
if not isinstance(widget, GalryWidget):
widget = widget()
widget.window = self
# create widget
self.widget = widget
if toolbar:
self.add_toolbar()
# if size is None:
# size = self.widget.w, self.widget.h
if size is not None:
self.widget.w, self.widget.h = size
self.setCentralWidget(self.widget)
self.setWindowTitle("Galry")
self.move(*position)
# ensure the main window size is adjusted so that the widget size
# is equal to the specified size
self.resize(self.sizeHint())
self.show()
def toggle_toolbar(self):
self.toolbar.setVisible(not self.toolbar.isVisible())#not )
def add_toolbar(self):
"""Add navigation toolbar"""
# reset
reset_action = QtGui.QAction("Reset view (R)", self)
reset_action.setIcon(get_icon('home'))
self.widget.connect_events(reset_action.triggered, 'Reset')
# show grid
grid_action = QtGui.QAction("Show grid (G)", self)
grid_action.setIcon(get_icon('grid'))
self.widget.connect_events(grid_action.triggered, 'Grid')
# fullscreen
fullscreen_action = QtGui.QAction("Fullscreen (F)", self)
fullscreen_action.setIcon(get_icon('fullscreen'))
self.widget.connect_events(fullscreen_action.triggered, 'Fullscreen')
# save image
save_action = QtGui.QAction("Save image (S)", self)
save_action.setIcon(get_icon('save'))
save_action.setShortcut("S")
save_action.triggered.connect(self.save)
toolbar_action = QtGui.QAction("Toggle toolbar visibility (T)", self)
toolbar_action.setIcon(get_icon('toolbar'))
toolbar_action.setShortcut("T")
toolbar_action.triggered.connect(self.toggle_toolbar)
# self.toolbar_action = toolbar_action
# help
help_action = QtGui.QAction("Show help (H)", self)
help_action.setIcon(get_icon('help'))
self.widget.connect_events(help_action.triggered, 'Help')
# exit
exit_action = QtGui.QAction("Exit (Q)", self)
exit_action.setIcon(get_icon('exit'))
exit_action.triggered.connect(self.close)
# add toolbar
mytoolbar = QtGui.QToolBar(self.widget)
mytoolbar.setIconSize(QtCore.QSize(32, 32))
for action in [reset_action, grid_action, fullscreen_action,
toolbar_action, save_action, help_action, exit_action]:
self.addAction(action)
mytoolbar.addAction(action)
mytoolbar.setStyleSheet("""
QToolBar, QToolButton
{
background: #000000;
border-color: #000000;
color: #ffffff;
}
QToolButton
{
margin-left: 5px;
}
QToolButton:hover
{
background: #2a2a2a;
}
""")
mytoolbar.setMovable(False)
mytoolbar.setFloatable(False)
self.toolbar = mytoolbar
self.addToolBar(mytoolbar)
def save(self):
"""Open a file dialog and save the current image in the specified
PNG file."""
initial_filename = 'screen'
existing = [f for f in os.listdir('.') if f.startswith(initial_filename)]
i = 0
if existing:
for f in existing:
r = re.match('screen([0-9]*).png', f)
i = max(i, int(r.groups()[0]))
i += 1
# if last:
# last = int(last)
# i = last + 1
filename, _ = QtGui.QFileDialog.getSaveFileName(self,
"Save the current view in a PNG image",
initial_filename + str(i) + '.png',
# '*.png',
# '*.png',
# QtGui.QFileDialog.AnyFile,
)
if filename:
self.widget.save_image(str(filename))
def closeEvent(self, e):
"""Clean up memory upon closing."""
self.widget.paint_manager.cleanup()
super(BasicWindow, self).closeEvent(e)
def contextMenuEvent(self, e):
return
return BasicWindow
def show_basic_window(widget_class=None, window_class=None, size=None,
position=(20, 20), autodestruct=None, toolbar=False, **kwargs):
"""Create a custom widget and/or window and show it immediately.
Arguments:
* widget_class=None: the class deriving from GalryWidget.
* window_class=None: the window class, deriving from `QMainWindow`.
* size=None: the size of the window as a tuple (width, height).
* position=(100, 100): the initial position of the window on the screen,
in pixels (x, y).
* autodestruct=None: if not None, it is the time, in seconds, before the
window closes itself.
* **kwargs: keyword arguments with the companion classes and other
parameters that are passed to `create_custom_widget`.
"""
# default widget class
if widget_class is None:
widget_class = create_custom_widget(**kwargs)
# defaut window class
if window_class is None:
window_class = create_basic_window(widget_class, size=size,
position=position, autodestruct=autodestruct, toolbar=toolbar,
)
# create and show window
return show_window(window_class)
| agpl-3.0 | -6,336,567,648,780,919,000 | 35.077472 | 89 | 0.567868 | false |
edmundgentle/schoolscript | SchoolScript/bin/Debug/pythonlib/Lib/lib2to3/fixes/fix_callable.py | 5 | 1188 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for callable().
This converts callable(obj) into isinstance(obj, collections.Callable), adding a
collections import if needed."""
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import Call, Name, String, Attr, touch_import
class FixCallable(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
# Ignore callable(*args) or use of keywords.
# Either could be a hint that the builtin callable() is not being used.
PATTERN = """
power< 'callable'
trailer< lpar='('
( not(arglist | argument<any '=' any>) func=any
| func=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
func = results['func']
touch_import(None, 'collections', node=node)
args = [func.clone(), String(', ')]
args.extend(Attr(Name('collections'), Name('Callable')))
return Call(Name('isinstance'), args, prefix=node.prefix)
| gpl-2.0 | -7,807,694,230,461,200,000 | 30.108108 | 80 | 0.594276 | false |
wradlib/wradlib | wradlib/classify.py | 1 | 6293 | #!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Hydrometeor Classification (HMC)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = ["msf_index_indep", "trapezoid", "fuzzyfi", "probability", "classify"]
__doc__ = __doc__.format("\n ".join(__all__))
import numpy as np
pr_types = {
0: ("LR", "Light Rain"),
1: ("MR", "Moderate Rain"),
2: ("HR", "Heavy Rain"),
3: ("LD", "Large Drops"),
4: ("HL", "Hail"),
5: ("RH", "Rain/Hail"),
6: ("GH", "Graupel/Hail"),
7: ("DS", "Dry Snow"),
8: ("WS", "Wet Snow"),
9: ("HC", "H Crystals"),
10: ("VC", "V Crystals"),
11: ("NP", "No Precip"),
}
def msf_index_indep(msf, idp, obs):
"""Retrieve membership function values based on independent observable
Parameters
----------
msf : :class:`numpy:numpy.ndarray`
Array of size (hmc-classes, observables, indep-ranges, 4) containing
the values of the trapezoidal msf values for every hmc-class and
observable within the independent observable range.
idp : :class:`numpy:numpy.ndarray`
Array of length of the independent observable containing the ranges
of the independent observable.
obs : :class:`numpy:numpy.ndarray`
Array of arbitrary shape containing the data of the independent
observable (eg. (rays, bins) or (scan, rays, bins)).
Returns
-------
out : :class:`numpy:numpy.ndarray`
Array of shape (hmc-classes, observables, obs.shape, 4) containing the
membership function values for every radar-bin for every hmc-class and
observable.
"""
bins = np.append(idp, idp[-1] + (idp[-1] - idp[-2]))
idx = np.digitize(obs, bins) - 1
idx_mask = np.zeros_like(idx, dtype=np.bool_)
idxm = np.ma.array(idx, mask=idx_mask)
idxm = np.ma.masked_outside(idxm, 0, bins.shape[0] - 2)
out = np.zeros((msf.shape[0], msf.shape[1], obs.size, msf.shape[-1]))
out[:, :, ~idxm.mask.flatten(), :] = msf[:, :, idxm.compressed(), :]
out = np.reshape(out, ((msf.shape[0], msf.shape[1]) + obs.shape + (msf.shape[-1],)))
return out
def trapezoid(msf, obs):
"""Calculates membership of `obs` using trapezoidal
membership functions
Parameters
----------
msf : :class:`numpy:numpy.ndarray`
Array which is of size (obs.shape, 4), containing the trapezoidal
membership function values for every `obs` point for one particular
hydrometeor class.
obs : :class:`numpy:numpy.ndarray`
Array of arbitrary size and dimensions containing
the data from which the membership shall be calculated.
Returns
-------
out : :class:`numpy:numpy.ndarray`
Array which is of (obs.shape) containing calculated membership
probabilities.
"""
out = np.zeros_like(obs)
ones = (obs >= msf[..., 1]) & (obs <= msf[..., 2])
out[ones] = 1.0
lower = (obs >= msf[..., 0]) & (obs < msf[..., 1])
out[lower] = (obs[lower] - msf[..., 0][lower]) / (
msf[..., 1][lower] - msf[..., 0][lower]
)
higher = (obs > msf[..., 2]) & (obs <= msf[..., 3])
out[higher] = (obs[higher] - msf[..., 3][higher]) / (
msf[..., 2][higher] - msf[..., 3][higher]
)
return out
def fuzzyfi(msf, obs):
"""Iterate over all hmc-classes and retrieve memberships
Parameters
----------
msf : :class:`numpy:numpy.ndarray`
Array which is of size (hmc-class, obs.shape, 4), containing the
trapezoidal membership function values for every `obs` point for
every hydrometeor class.
obs : :class:`numpy:numpy.ndarray`
Array of arbitrary size and dimensions containing
the data from which the memberships shall be calculated.
Returns
-------
out : :class:`numpy:numpy.ndarray`
Array which is of (hmc-class, obs.shape) containing calculated
membership probabilities.
"""
out = np.zeros(msf.shape[0:-1])
for i, m in enumerate(msf):
out[i] = trapezoid(m, obs)
return out
def probability(data, weights):
"""Calculate probability of hmc-class for every data bin.
Parameters
----------
data : :class:`numpy:numpy.ndarray`
Array which is of size (hmc-class, obs, data.shape), containing the
membership probability values.
weights : :class:`numpy:numpy.ndarray`
Array of length (observables) containing the weights for
each observable.
Returns
-------
out : :class:`numpy:numpy.ndarray`
Array which is of (hmc-class, data.shape) containing weighted
hmc-membership probabilities.
"""
data = data.copy()
weights = weights.copy()
maxw = np.sum(weights)
weights.shape = (1, len(weights)) + len(data.shape[2:]) * (1,)
weights = np.broadcast_to(weights, data.shape)
return np.sum(data * weights, axis=1) / maxw
def classify(data, threshold=0.0):
"""Calculate probability of hmc-class for every data bin.
Parameters
----------
data : np.ndarray
Array which is of size (hmc-class, data.shape), containing the
weighted hmc-membership probability values.
Keyword Arguments
-----------------
threshold : float
Threshold value where probability is considered no precip,
defaults to 0
Returns
-------
idx : np.ndarray
Array which is of (data.shape) containing the (sorted) index into
the hydrometeor-class.
No precip is added on the top.
vals : np.ndarray
Array which is of (data.shape) containing the (sorted) probability
scores. No precip is added on the top.
"""
data = data.copy()
shape = data.shape[0]
# handle no precipitation
nop = np.sum(data, axis=0) / data.shape[0]
mask = nop <= threshold
# add no precip field (with zero probability)
noprec = np.zeros_like(nop)
data = np.vstack((data, noprec[np.newaxis, ...]))
# sort idx and vals
idx = np.argsort(data, axis=0)
vals = np.sort(data, axis=0)
# set no precip in every class
idx[:, mask] = shape
vals[:, mask] = 1.0
return idx, vals
| mit | -8,411,251,589,499,235,000 | 30 | 88 | 0.598919 | false |
tsdmgz/ansible | lib/ansible/modules/windows/win_mapped_drive.py | 32 | 2613 | #!/usr/bin/python
# This file is part of Ansible
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub, actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_mapped_drive
version_added: '2.4'
short_description: maps a network drive for a user
description:
- Allows you to modify mapped network drives for individual users.
notes:
- This can only map a network drive for the current executing user and does not
allow you to set a default drive for all users of a system. Use other
Microsoft tools like GPOs to achieve this goal.
options:
letter:
description:
- The letter of the network path to map to.
- This letter must not already be in use with Windows.
required: yes
password:
description:
- The password for C(username).
path:
description:
- The UNC path to map the drive to.
- This is required if C(state=present).
- If C(state=absent) and path is not set, the module will delete the mapped
drive regardless of the target.
- If C(state=absent) and the path is set, the module will throw an error if
path does not match the target of the mapped drive.
state:
description:
- If C(state=present) will ensure the mapped drive exists.
- If C(state=absent) will ensure the mapped drive does not exist.
choices: [ absent, present ]
default: present
username:
description:
- Credentials to map the drive with.
- The username MUST include the domain or servername like SERVER\user, see
the example for more information.
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: create a mapped drive under Z
win_mapped_drive:
letter: Z
path: \\domain\appdata\accounting
- name: delete any mapped drives under Z
win_mapped_drive:
letter: Z
state: absent
- name: only delete the mapped drive Z if the paths match (error is thrown otherwise)
win_mapped_drive:
letter: Z
path: \\domain\appdata\accounting
state: absent
- name: create mapped drive with local credentials
win_mapped_drive:
letter: M
path: \\SERVER\c$
username: SERVER\Administrator
password: Password
- name: create mapped drive with domain credentials
win_mapped_drive:
letter: M
path: \\domain\appdata\it
username: DOMAIN\IT
password: Password
'''
RETURN = r'''
'''
| gpl-3.0 | -7,078,713,416,485,221,000 | 27.714286 | 92 | 0.688481 | false |
caperren/Archives | OSU Coursework/CS 325 - Analysis of Algorithms/Julie Schuffort - Passed/Homework 4/activity_selection.py | 1 | 1847 | INPUT_FILE = "act.txt"
def greedy_activity_selector(activity_set):
activity_num_index = 0
activity_start_index = 1
activity_finish_index = 2
activity_set.sort(key=lambda activity: activity[activity_start_index], reverse=True)
selections = [activity_set[0][activity_num_index]]
comparison_activity_index = 0
for i in range(1, len(activity_set)):
if activity_set[i][activity_finish_index] <= activity_set[comparison_activity_index][activity_start_index]:
selections.append(activity_set[i][activity_num_index])
comparison_activity_index = i
return list(reversed(selections))
def get_activity_sets(input_filename):
file_activity_sets = []
with open(input_filename, "r") as input_file:
lines = input_file.readlines()
last_index = len(lines) - 1
line_index = -1
while line_index != last_index:
current_set = []
line_index += 1
num_activities = int(lines[line_index])
for activity_number in range(num_activities):
line_index += 1
activity = lines[line_index]
activity_split = activity.split(" ")
current_set.append(list((int(value) for value in activity_split)))
file_activity_sets.append(current_set)
return file_activity_sets
if __name__ == "__main__":
activity_sets = get_activity_sets(INPUT_FILE)
for set_number, current_activity_set in enumerate(activity_sets):
selected_activities = greedy_activity_selector(current_activity_set)
print("Set {}".format(set_number + 1))
print("Number of activities selected = {}".format(len(selected_activities)))
print("Activities: {}".format(" ".join([str(activity) for activity in selected_activities])))
print()
| gpl-3.0 | 4,268,224,369,963,673,000 | 30.844828 | 115 | 0.628045 | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/errors/types/resource_count_limit_exceeded_error.py | 1 | 1499 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.errors',
marshal='google.ads.googleads.v6',
manifest={
'ResourceCountLimitExceededErrorEnum',
},
)
class ResourceCountLimitExceededErrorEnum(proto.Message):
r"""Container for enum describing possible resource count limit
exceeded errors.
"""
class ResourceCountLimitExceededError(proto.Enum):
r"""Enum describing possible resource count limit exceeded
errors.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACCOUNT_LIMIT = 2
CAMPAIGN_LIMIT = 3
ADGROUP_LIMIT = 4
AD_GROUP_AD_LIMIT = 5
AD_GROUP_CRITERION_LIMIT = 6
SHARED_SET_LIMIT = 7
MATCHING_FUNCTION_LIMIT = 8
RESPONSE_ROW_LIMIT_EXCEEDED = 9
RESOURCE_LIMIT = 10
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 3,110,499,643,458,912,000 | 29.591837 | 74 | 0.676451 | false |
csparkresearch/ExpEYES17-Qt | SPARK17/expeyes/eyemath17.py | 1 | 5663 | # -*- coding: utf-8; mode: python; indent-tabs-mode: t; tab-width:4 -*-
'''
expEYES data analysis library using numpy and scipy
Author : Ajith Kumar B.P, [email protected]
License : GNU GPL version 3
'''
import sys, time, math
from numpy import *
import numpy.fft
from scipy import optimize
from scipy.optimize import leastsq
import scipy.optimize as optimize
def find_peak(va):
vmax = 0.0
size = len(va)
index = 0
for i in range(1,size): # skip first 2 channels, DC
if va[i] > vmax:
vmax = va[i]
index = i
return index
#-------------------------- Fourier Transform ------------------------------------
def fft(ya, si):
'''
Returns positive half of the Fourier transform of the signal ya.
Sampling interval 'si', in milliseconds
'''
np = len(ya)
v = array(ya)
tr = abs(numpy.fft.fft(v))/np
frq = numpy.fft.fftfreq(np, si * 1.0e-3)
x = frq.reshape(2,np/2)
y = tr.reshape(2,np/2)
return x[0], y[0]
def find_frequency(x,y): # Returns the fundamental frequency using FFT
tx,ty = fft(y, x[1]-x[0])
index = find_peak(ty)
if index == 0:
return None
else:
return tx[index]
'''
m = mean(ty)
mx = max(ty)
for i in range(1,len(ty)):
if ty[i] != 0:
print 'FF', tx[i], ty[i]
if ty[i] > 5*m:
return tx[i]
return None # Could not find FFT Peak
'''
#-------------------------- Sine Fit ------------------------------------------------
def sine_erf(p,y,x):
return y - p[0] * sin(2*pi*p[1]*x+p[2])+p[3]
def sine_eval(x,p): # y = a * sin(2*pi*f*x + phi)+ offset
return p[0] * sin(2*pi*p[1]*x+p[2])-p[3]
def sineFunc(x, a1, a2, a3, a4):
return a4 + a1*sin(abs(a2*(2*pi))*x + a3)
def fit_sine(xa,ya, freq = 0): # Time in mS, V in volts, freq in Hz, accepts numpy arrays
size = len(ya)
mx = max(ya)
mn = min(ya)
amp = (mx-mn)/2
if freq == 0: # Guess frequency not given
freq = find_frequency(xa,ya)
if freq == None:
return None
#print 'guess a & freq = ', amp, freq
par = [abs(amp), freq*0.001, 0.0, 0.0] # Amp, freq, phase , offset
par, pcov = optimize.curve_fit(sineFunc, xa, ya, par)
yfit = sine_eval(xa, par)
return yfit,par
#--------------------------Damped Sine Fit ------------------------------------------------
def dsine_erf(p,y,x):
return y - p[0] * sin(2*pi*p[1]*x+p[2]) * exp(-p[4]*x) + p[3]
def dsine_eval(x,p):
return p[0] * sin(2*pi*p[1]*x+p[2]) * exp(-p[4]*x) - p[3]
def fit_dsine(xlist, ylist, freq = 0):
size = len(xlist)
xa = array(xlist, dtype=float)
ya = array(ylist, dtype=float)
amp = (max(ya)-min(ya))/2
if freq == 0:
freq = find_frequency(xa,ya)
par = [amp, freq, 0.0, 0.0, 0.1] # Amp, freq, phase , offset, decay constant
plsq = leastsq(dsine_erf, par,args=(ya,xa))
if plsq[1] > 4:
return None
yfit = dsine_eval(xa, plsq[0])
return yfit,plsq[0]
#-------------------------- Exponential Fit ----------------------------------------
def exp_erf(p,y,x):
return y - p[0] * exp(p[1]*x) + p[2]
def exp_eval(x,p):
return p[0] * exp(p[1]*x) -p[2]
def fit_exp(xlist, ylist):
size = len(xlist)
xa = array(xlist, dtype=float)
ya = array(ylist, dtype=float)
maxy = max(ya)
halfmaxy = maxy / 2.0
halftime = 1.0
for k in range(size):
if abs(ya[k] - halfmaxy) < halfmaxy/100:
halftime = xa[k]
break
par = [maxy, -halftime,0] # Amp, decay, offset
plsq = leastsq(exp_erf, par,args=(ya,xa))
if plsq[1] > 4:
return None
yfit = exp_eval(xa, plsq[0])
return yfit,plsq[0]
#-------------------------- Exponential Fit #2----------------------------------------
def exp_func(x, a, b, c):
return a * exp(-x/ b) + c
def fit_exp2(t,v): # accepts numpy arrays
size = len(t)
v80 = v[0] * 0.8
for k in range(size-1):
if v[k] < v80:
rc = t[k]/.223
break
pg = [v[0], rc, 0]
po, err = optimize.curve_fit(exp_func, t, v, pg)
if abs(err[0][0]) > 0.1:
return None, None
vf = po[0] * exp(-t/po[1]) + po[2]
return po, vf
#-------------------------- Gauss Fit ----------------------------------------
def gauss_erf(p,y,x):#height, mean, sigma
return y - p[0] * exp(-(x-p[1])**2 /(2.0 * p[2]**2))
def gauss_eval(x,p):
return p[0] * exp(-(x-p[1])**2 /(2.0 * p[2]**2))
def fit_gauss(xlist, ylist):
size = len(xlist)
xa = array(xlist, dtype=float)
ya = array(ylist, dtype=float)
maxy = max(ya)
halfmaxy = maxy / 2.0
for k in range(size):
if abs(ya[k] - maxy) < maxy/100:
mean = xa[k]
break
for k in range(size):
if abs(ya[k] - halfmaxy) < halfmaxy/10:
halfmaxima = xa[k]
break
sigma = mean - halfmaxima
par = [maxy, halfmaxima, sigma] # Amplitude, mean, sigma
plsq = leastsq(gauss_erf, par,args=(ya,xa))
if plsq[1] > 4:
return None
yfit = gauss_eval(xa, plsq[0])
return yfit,plsq[0]
#-------------------------- liniar Fit ------------------------------------------------
def line_erf(p,y,x):
return y - p[0] * x - p[1]
def line_eval(x,p): # y = a * x + b
return p[0] * x + p[1]
def fit_line(xlist,ylist): # Time in mS, V in volts
size = len(xlist)
xa = array(xlist, dtype=float)
ya = array(ylist, dtype=float)
par = [1,1] # m, c
plsq = leastsq(line_erf, par,args=(ya,xa))
if plsq[1] > 4:
return None
yfit = line_eval(xa, plsq[0])
return yfit,plsq[0]
#-------------------------- Quadratic Fit ----------------------------------------
def qdr_erf(p,y,x):
return y - (p[0] * x**2 +p[1]*x + p[2]) # ax^2 + bx + c
def qdr_eval(x,p):
return p[0] * x**2 +p[1]*x + p[2]
def fit_qdr(xlist, ylist):
size = len(xlist)
xa = array(xlist, dtype=float)
ya = array(ylist, dtype=float)
par = [1, 1, 1] # a,b,c
plsq = leastsq(qdr_erf, par,args=(ya,xa))
if plsq[1] > 4:
return None
yfit = qdr_eval(xa, plsq[0])
return yfit,plsq[0]
| mit | -2,234,818,711,183,544,800 | 25.586854 | 91 | 0.53876 | false |
nmenon/openocd | contrib/loaders/flash/fpga/xilinx_bscan_spi.py | 13 | 20095 | #!/usr/bin/python3
#
# Copyright (C) 2015 Robert Jordens <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
import unittest
import migen as mg
import migen.build.generic_platform as mb
from migen.genlib import io
from migen.build import xilinx
"""
This migen script produces proxy bitstreams to allow programming SPI flashes
behind FPGAs.
Bitstream binaries built with this script are available at:
https://github.com/jordens/bscan_spi_bitstreams
A JTAG2SPI transfer consists of:
1. an arbitrary number of 0 bits (from BYPASS registers in front of the
JTAG2SPI DR)
2. a marker bit (1) indicating the start of the JTAG2SPI transaction
3. 32 bits (big endian) describing the length of the SPI transaction
4. a number of SPI clock cycles (corresponding to 3.) with CS_N asserted
5. an arbitrary number of cycles (to shift MISO/TDO data through subsequent
BYPASS registers)
Notes:
* The JTAG2SPI DR is 1 bit long (due to different sampling edges of
{MISO,MOSI}/{TDO,TDI}).
* MOSI is TDI with half a cycle delay.
* TDO is MISO with half a cycle delay.
* CAPTURE-DR needs to be performed before SHIFT-DR on the BYPASSed TAPs in
JTAG chain to clear the BYPASS registers to 0.
https://github.com/m-labs/migen
"""
class JTAG2SPI(mg.Module):
def __init__(self, spi=None, bits=32):
self.jtag = mg.Record([
("sel", 1),
("shift", 1),
("capture", 1),
("tck", 1),
("tdi", 1),
("tdo", 1),
])
self.cs_n = mg.TSTriple()
self.clk = mg.TSTriple()
self.mosi = mg.TSTriple()
self.miso = mg.TSTriple()
# # #
self.cs_n.o.reset = mg.Constant(1)
self.mosi.o.reset_less = True
bits = mg.Signal(bits, reset_less=True)
head = mg.Signal(max=len(bits), reset=len(bits) - 1)
self.clock_domains.cd_sys = mg.ClockDomain()
self.submodules.fsm = mg.FSM("IDLE")
if spi is not None:
self.specials += [
self.cs_n.get_tristate(spi.cs_n),
self.mosi.get_tristate(spi.mosi),
self.miso.get_tristate(spi.miso),
]
if hasattr(spi, "clk"): # 7 Series drive it fixed
self.specials += self.clk.get_tristate(spi.clk)
# self.specials += io.DDROutput(1, 0, spi.clk, self.clk.o)
self.comb += [
self.cd_sys.rst.eq(self.jtag.sel & self.jtag.capture),
self.cd_sys.clk.eq(self.jtag.tck),
self.cs_n.oe.eq(self.jtag.sel),
self.clk.oe.eq(self.jtag.sel),
self.mosi.oe.eq(self.jtag.sel),
self.miso.oe.eq(0),
# Do not suppress CLK toggles outside CS_N asserted.
# Xilinx USRCCLK0 requires three dummy cycles to do anything
# https://www.xilinx.com/support/answers/52626.html
# This is fine since CS_N changes only on falling CLK.
self.clk.o.eq(~self.jtag.tck),
self.jtag.tdo.eq(self.miso.i),
]
# Latency calculation (in half cycles):
# 0 (falling TCK, rising CLK):
# JTAG adapter: set TDI
# 1 (rising TCK, falling CLK):
# JTAG2SPI: sample TDI -> set MOSI
# SPI: set MISO
# 2 (falling TCK, rising CLK):
# SPI: sample MOSI
# JTAG2SPI (BSCAN primitive): sample MISO -> set TDO
# 3 (rising TCK, falling CLK):
# JTAG adapter: sample TDO
self.fsm.act("IDLE",
mg.If(self.jtag.tdi & self.jtag.sel & self.jtag.shift,
mg.NextState("HEAD")
)
)
self.fsm.act("HEAD",
mg.If(head == 0,
mg.NextState("XFER")
)
)
self.fsm.act("XFER",
mg.If(bits == 0,
mg.NextState("IDLE")
),
)
self.sync += [
self.mosi.o.eq(self.jtag.tdi),
self.cs_n.o.eq(~self.fsm.ongoing("XFER")),
mg.If(self.fsm.ongoing("HEAD"),
bits.eq(mg.Cat(self.jtag.tdi, bits)),
head.eq(head - 1)
),
mg.If(self.fsm.ongoing("XFER"),
bits.eq(bits - 1)
)
]
class JTAG2SPITest(unittest.TestCase):
def setUp(self):
self.bits = 8
self.dut = JTAG2SPI(bits=self.bits)
def test_instantiate(self):
pass
def test_initial_conditions(self):
def check():
yield
self.assertEqual((yield self.dut.cs_n.oe), 0)
self.assertEqual((yield self.dut.mosi.oe), 0)
self.assertEqual((yield self.dut.miso.oe), 0)
self.assertEqual((yield self.dut.clk.oe), 0)
mg.run_simulation(self.dut, check())
def test_enable(self):
def check():
yield self.dut.jtag.sel.eq(1)
yield self.dut.jtag.shift.eq(1)
yield
self.assertEqual((yield self.dut.cs_n.oe), 1)
self.assertEqual((yield self.dut.mosi.oe), 1)
self.assertEqual((yield self.dut.miso.oe), 0)
self.assertEqual((yield self.dut.clk.oe), 1)
mg.run_simulation(self.dut, check())
def run_seq(self, tdi, tdo, spi=None):
yield self.dut.jtag.sel.eq(1)
yield
yield self.dut.jtag.shift.eq(1)
for di in tdi:
yield self.dut.jtag.tdi.eq(di)
yield
tdo.append((yield self.dut.jtag.tdo))
if spi is not None:
v = []
for k in "cs_n clk mosi miso".split():
t = getattr(self.dut, k)
v.append("{}>".format((yield t.o)) if (yield t.oe)
else "<{}".format((yield t.i)))
spi.append(" ".join(v))
yield self.dut.jtag.sel.eq(0)
yield
yield self.dut.jtag.shift.eq(0)
yield
def test_shift(self):
bits = 8
data = 0x81
tdi = [0, 0, 1] # dummy from BYPASS TAPs and marker
tdi += [((bits - 1) >> j) & 1 for j in range(self.bits - 1, -1, -1)]
tdi += [(data >> j) & 1 for j in range(bits)]
tdi += [0, 0, 0, 0] # dummy from BYPASS TAPs
tdo = []
spi = []
mg.run_simulation(self.dut, self.run_seq(tdi, tdo, spi))
# print(tdo)
for l in spi:
print(l)
class Spartan3(mg.Module):
macro = "BSCAN_SPARTAN3"
toolchain = "ise"
def __init__(self, platform):
platform.toolchain.bitgen_opt += " -g compress -g UnusedPin:Pullup"
self.submodules.j2s = j2s = JTAG2SPI(platform.request("spiflash"))
self.specials += [
mg.Instance(
self.macro,
o_SHIFT=j2s.jtag.shift, o_SEL1=j2s.jtag.sel,
o_CAPTURE=j2s.jtag.capture,
o_DRCK1=j2s.jtag.tck,
o_TDI=j2s.jtag.tdi, i_TDO1=j2s.jtag.tdo,
i_TDO2=0),
]
platform.add_period_constraint(j2s.jtag.tck, 6)
class Spartan3A(Spartan3):
macro = "BSCAN_SPARTAN3A"
class Spartan6(mg.Module):
toolchain = "ise"
def __init__(self, platform):
platform.toolchain.bitgen_opt += " -g compress -g UnusedPin:Pullup"
self.submodules.j2s = j2s = JTAG2SPI(platform.request("spiflash"))
# clk = mg.Signal()
self.specials += [
mg.Instance(
"BSCAN_SPARTAN6", p_JTAG_CHAIN=1,
o_SHIFT=j2s.jtag.shift, o_SEL=j2s.jtag.sel,
o_CAPTURE=j2s.jtag.capture,
o_DRCK=j2s.jtag.tck,
o_TDI=j2s.jtag.tdi, i_TDO=j2s.jtag.tdo),
# mg.Instance("BUFG", i_I=clk, o_O=j2s.jtag.tck)
]
platform.add_period_constraint(j2s.jtag.tck, 6)
class Series7(mg.Module):
toolchain = "vivado"
def __init__(self, platform):
platform.toolchain.bitstream_commands.extend([
"set_property BITSTREAM.GENERAL.COMPRESS True [current_design]",
"set_property BITSTREAM.CONFIG.UNUSEDPIN Pullnone [current_design]"
])
self.submodules.j2s = j2s = JTAG2SPI(platform.request("spiflash"))
# clk = mg.Signal()
self.specials += [
mg.Instance(
"BSCANE2", p_JTAG_CHAIN=1,
o_SHIFT=j2s.jtag.shift, o_SEL=j2s.jtag.sel,
o_CAPTURE=j2s.jtag.capture,
o_DRCK=j2s.jtag.tck,
o_TDI=j2s.jtag.tdi, i_TDO=j2s.jtag.tdo),
mg.Instance(
"STARTUPE2", i_CLK=0, i_GSR=0, i_GTS=0,
i_KEYCLEARB=0, i_PACK=1,
i_USRCCLKO=j2s.clk.o, i_USRCCLKTS=~j2s.clk.oe,
i_USRDONEO=1, i_USRDONETS=1),
# mg.Instance("BUFG", i_I=clk, o_O=j2s.jtag.tck)
]
platform.add_period_constraint(j2s.jtag.tck, 6)
try:
self.comb += [
platform.request("user_sma_gpio_p").eq(j2s.cs_n.i),
platform.request("user_sma_gpio_n").eq(j2s.clk.o),
platform.request("user_sma_clock_p").eq(j2s.mosi.o),
platform.request("user_sma_clock_n").eq(j2s.miso.i),
]
except mb.ConstraintError:
pass
class Ultrascale(mg.Module):
toolchain = "vivado"
def __init__(self, platform):
platform.toolchain.bitstream_commands.extend([
"set_property BITSTREAM.GENERAL.COMPRESS True [current_design]",
"set_property BITSTREAM.CONFIG.UNUSEDPIN Pullnone [current_design]",
])
self.submodules.j2s0 = j2s0 = JTAG2SPI()
self.submodules.j2s1 = j2s1 = JTAG2SPI(platform.request("spiflash"))
di = mg.Signal(4)
self.comb += mg.Cat(j2s0.mosi.i, j2s0.miso.i).eq(di)
self.specials += [
mg.Instance("BSCANE2", p_JTAG_CHAIN=1,
o_SHIFT=j2s0.jtag.shift, o_SEL=j2s0.jtag.sel,
o_CAPTURE=j2s0.jtag.capture,
o_DRCK=j2s0.jtag.tck,
o_TDI=j2s0.jtag.tdi, i_TDO=j2s0.jtag.tdo),
mg.Instance("BSCANE2", p_JTAG_CHAIN=2,
o_SHIFT=j2s1.jtag.shift, o_SEL=j2s1.jtag.sel,
o_CAPTURE=j2s1.jtag.capture,
o_DRCK=j2s1.jtag.tck,
o_TDI=j2s1.jtag.tdi, i_TDO=j2s1.jtag.tdo),
mg.Instance("STARTUPE3", i_GSR=0, i_GTS=0,
i_KEYCLEARB=0, i_PACK=1,
i_USRDONEO=1, i_USRDONETS=1,
i_USRCCLKO=mg.Mux(j2s0.clk.oe, j2s0.clk.o, j2s1.clk.o),
i_USRCCLKTS=~(j2s0.clk.oe | j2s1.clk.oe),
i_FCSBO=j2s0.cs_n.o, i_FCSBTS=~j2s0.cs_n.oe,
o_DI=di,
i_DO=mg.Cat(j2s0.mosi.o, j2s0.miso.o, 0, 0),
i_DTS=mg.Cat(~j2s0.mosi.oe, ~j2s0.miso.oe, 1, 1))
]
platform.add_period_constraint(j2s0.jtag.tck, 6)
platform.add_period_constraint(j2s1.jtag.tck, 6)
class XilinxBscanSpi(xilinx.XilinxPlatform):
packages = {
# (package-speedgrade, id): [cs_n, clk, mosi, miso, *pullups]
("cp132", 1): ["M2", "N12", "N2", "N8"],
("fg320", 1): ["U3", "U16", "T4", "N10"],
("fg320", 2): ["V3", "U16", "T11", "V16"],
("fg484", 1): ["Y4", "AA20", "AB14", "AB20"],
("fgg484", 1): ["Y4", "AA20", "AB14", "AB20"],
("fgg400", 1): ["Y2", "Y19", "W12", "W18"],
("ftg256", 1): ["T2", "R14", "P10", "T14"],
("ft256", 1): ["T2", "R14", "P10", "T14"],
("fg400", 1): ["Y2", "Y19", "W12", "W18"],
("cs484", 1): ["U7", "V17", "V13", "W17"],
("qg144-2", 1): ["P38", "P70", "P64", "P65", "P62", "P61"],
("cpg196-2", 1): ["P2", "N13", "P11", "N11", "N10", "P10"],
("cpg236-1", 1): ["K19", None, "D18", "D19", "G18", "F18"],
("csg484-2", 1): ["AB5", "W17", "AB17", "Y17", "V13", "W13"],
("csg324-2", 1): ["V3", "R15", "T13", "R13", "T14", "V14"],
("csg324-1", 1): ["L13", None, "K17", "K18", "L14", "M14"],
("fbg484-1", 1): ["T19", None, "P22", "R22", "P21", "R21"],
("fbg484-1", 2): ["L16", None, "H18", "H19", "G18", "F19"],
("fbg676-1", 1): ["C23", None, "B24", "A25", "B22", "A22"],
("ffg901-1", 1): ["V26", None, "R30", "T30", "R28", "T28"],
("ffg900-1", 1): ["U19", None, "P24", "R25", "R20", "R21"],
("ffg1156-1", 1): ["V30", None, "AA33", "AA34", "Y33", "Y34"],
("ffg1157-1", 1): ["AL33", None, "AN33", "AN34", "AK34", "AL34"],
("ffg1158-1", 1): ["C24", None, "A23", "A24", "B26", "A26"],
("ffg1926-1", 1): ["AK33", None, "AN34", "AN35", "AJ34", "AK34"],
("fhg1761-1", 1): ["AL36", None, "AM36", "AN36", "AJ36", "AJ37"],
("flg1155-1", 1): ["AL28", None, "AE28", "AF28", "AJ29", "AJ30"],
("flg1932-1", 1): ["V32", None, "T33", "R33", "U31", "T31"],
("flg1926-1", 1): ["AK33", None, "AN34", "AN35", "AJ34", "AK34"],
("ffva1156-2-e", 1): ["G26", None, "M20", "L20", "R21", "R22"],
("ffva1156-2-e", "sayma"): ["K21", None, "M20", "L20", "R21", "R22"],
}
pinouts = {
# bitstreams are named by die, package does not matter, speed grade
# should not matter.
#
# chip: (package, id, standard, class)
"xc3s100e": ("cp132", 1, "LVCMOS33", Spartan3),
"xc3s1200e": ("fg320", 1, "LVCMOS33", Spartan3),
"xc3s1400a": ("fg484", 1, "LVCMOS33", Spartan3A),
"xc3s1400an": ("fgg484", 1, "LVCMOS33", Spartan3A),
"xc3s1600e": ("fg320", 1, "LVCMOS33", Spartan3),
"xc3s200a": ("fg320", 2, "LVCMOS33", Spartan3A),
"xc3s200an": ("ftg256", 1, "LVCMOS33", Spartan3A),
"xc3s250e": ("cp132", 1, "LVCMOS33", Spartan3),
"xc3s400a": ("fg320", 2, "LVCMOS33", Spartan3A),
"xc3s400an": ("fgg400", 1, "LVCMOS33", Spartan3A),
"xc3s500e": ("cp132", 1, "LVCMOS33", Spartan3),
"xc3s50a": ("ft256", 1, "LVCMOS33", Spartan3A),
"xc3s50an": ("ftg256", 1, "LVCMOS33", Spartan3A),
"xc3s700a": ("fg400", 1, "LVCMOS33", Spartan3A),
"xc3s700an": ("fgg484", 1, "LVCMOS33", Spartan3A),
"xc3sd1800a": ("cs484", 1, "LVCMOS33", Spartan3A),
"xc3sd3400a": ("cs484", 1, "LVCMOS33", Spartan3A),
"xc6slx100": ("csg484-2", 1, "LVCMOS33", Spartan6),
"xc6slx100t": ("csg484-2", 1, "LVCMOS33", Spartan6),
"xc6slx150": ("csg484-2", 1, "LVCMOS33", Spartan6),
"xc6slx150t": ("csg484-2", 1, "LVCMOS33", Spartan6),
"xc6slx16": ("cpg196-2", 1, "LVCMOS33", Spartan6),
"xc6slx25": ("csg324-2", 1, "LVCMOS33", Spartan6),
"xc6slx25t": ("csg324-2", 1, "LVCMOS33", Spartan6),
"xc6slx45": ("csg324-2", 1, "LVCMOS33", Spartan6),
"xc6slx45t": ("csg324-2", 1, "LVCMOS33", Spartan6),
"xc6slx4": ("cpg196-2", 1, "LVCMOS33", Spartan6),
"xc6slx4t": ("qg144-2", 1, "LVCMOS33", Spartan6),
"xc6slx75": ("csg484-2", 1, "LVCMOS33", Spartan6),
"xc6slx75t": ("csg484-2", 1, "LVCMOS33", Spartan6),
"xc6slx9": ("cpg196-2", 1, "LVCMOS33", Spartan6),
"xc6slx9t": ("qg144-2", 1, "LVCMOS33", Spartan6),
"xc7a100t": ("csg324-1", 1, "LVCMOS25", Series7),
"xc7a15t": ("cpg236-1", 1, "LVCMOS25", Series7),
"xc7a200t": ("fbg484-1", 1, "LVCMOS25", Series7),
"xc7a35t": ("cpg236-1", 1, "LVCMOS25", Series7),
"xc7a50t": ("cpg236-1", 1, "LVCMOS25", Series7),
"xc7a75t": ("csg324-1", 1, "LVCMOS25", Series7),
"xc7k160t": ("fbg484-1", 2, "LVCMOS25", Series7),
"xc7k325t": ("fbg676-1", 1, "LVCMOS25", Series7),
"xc7k325t-debug": ("ffg900-1", 1, "LVCMOS25", Series7),
"xc7k355t": ("ffg901-1", 1, "LVCMOS25", Series7),
"xc7k410t": ("fbg676-1", 1, "LVCMOS25", Series7),
"xc7k420t": ("ffg1156-1", 1, "LVCMOS25", Series7),
"xc7k480t": ("ffg1156-1", 1, "LVCMOS25", Series7),
"xc7k70t": ("fbg484-1", 2, "LVCMOS25", Series7),
"xc7v2000t": ("fhg1761-1", 1, "LVCMOS18", Series7),
"xc7v585t": ("ffg1157-1", 1, "LVCMOS18", Series7),
"xc7vh580t": ("flg1155-1", 1, "LVCMOS18", Series7),
"xc7vh870t": ("flg1932-1", 1, "LVCMOS18", Series7),
"xc7vx1140t": ("flg1926-1", 1, "LVCMOS18", Series7),
"xc7vx330t": ("ffg1157-1", 1, "LVCMOS18", Series7),
"xc7vx415t": ("ffg1157-1", 1, "LVCMOS18", Series7),
"xc7vx485t": ("ffg1157-1", 1, "LVCMOS18", Series7),
"xc7vx550t": ("ffg1158-1", 1, "LVCMOS18", Series7),
"xc7vx690t": ("ffg1157-1", 1, "LVCMOS18", Series7),
"xc7vx980t": ("ffg1926-1", 1, "LVCMOS18", Series7),
"xcku040": ("ffva1156-2-e", 1, "LVCMOS18", Ultrascale),
"xcku040-sayma": ("ffva1156-2-e", "sayma", "LVCMOS18", Ultrascale),
}
def __init__(self, device, pins, std, toolchain="ise"):
ios = [self.make_spi(0, pins, std, toolchain)]
if device == "xc7k325t-ffg900-1": # debug
ios += [
("user_sma_clock_p", 0, mb.Pins("L25"), mb.IOStandard("LVCMOS25")),
("user_sma_clock_n", 0, mb.Pins("K25"), mb.IOStandard("LVCMOS25")),
("user_sma_gpio_p", 0, mb.Pins("Y23"), mb.IOStandard("LVCMOS25")),
("user_sma_gpio_n", 0, mb.Pins("Y24"), mb.IOStandard("LVCMOS25")),
]
xilinx.XilinxPlatform.__init__(self, device, ios, toolchain=toolchain)
@staticmethod
def make_spi(i, pins, std, toolchain):
pu = "PULLUP" if toolchain == "ise" else "PULLUP TRUE"
pd = "PULLDOWN" if toolchain == "ise" else "PULLDOWN TRUE"
cs_n, clk, mosi, miso = pins[:4]
io = ["spiflash", i,
mb.Subsignal("cs_n", mb.Pins(cs_n), mb.Misc(pu)),
mb.Subsignal("mosi", mb.Pins(mosi), mb.Misc(pu)),
mb.Subsignal("miso", mb.Pins(miso), mb.Misc(pu)),
mb.IOStandard(std),
]
if clk:
io.append(mb.Subsignal("clk", mb.Pins(clk), mb.Misc(pd)))
for i, p in enumerate(pins[4:]):
io.append(mb.Subsignal("pullup{}".format(i), mb.Pins(p),
mb.Misc(pu)))
return io
@classmethod
def make(cls, target, errors=False):
pkg, id, std, Top = cls.pinouts[target]
pins = cls.packages[(pkg, id)]
device = target.split("-", 1)[0]
platform = cls("{}-{}".format(device, pkg), pins, std, Top.toolchain)
top = Top(platform)
name = "bscan_spi_{}".format(target)
try:
platform.build(top, build_name=name)
except Exception as e:
print(("ERROR: xilinx_bscan_spi build failed "
"for {}: {}").format(target, e))
if errors:
raise
if __name__ == "__main__":
import argparse
import multiprocessing
p = argparse.ArgumentParser(description="build bscan_spi bitstreams "
"for openocd jtagspi flash driver")
p.add_argument("device", nargs="*",
default=sorted(list(XilinxBscanSpi.pinouts)),
help="build for these devices (default: %(default)s)")
p.add_argument("-p", "--parallel", default=1, type=int,
help="number of parallel builds (default: %(default)s)")
args = p.parse_args()
pool = multiprocessing.Pool(args.parallel)
pool.map(XilinxBscanSpi.make, args.device, chunksize=1)
| gpl-2.0 | -105,511,294,715,871,400 | 40.777547 | 83 | 0.523762 | false |
bsmrstu-warriors/Moytri---The-Drone-Aider | Lib/site-packages/scipy/signal/signaltools.py | 55 | 41609 | # Author: Travis Oliphant
# 1999 -- 2002
import warnings
import sigtools
from scipy import linalg
from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
ifftn, fftfreq
from numpy import polyadd, polymul, polydiv, polysub, roots, \
poly, polyval, polyder, cast, asarray, isscalar, atleast_1d, \
ones, real, real_if_close, zeros, array, arange, where, rank, \
newaxis, product, ravel, sum, r_, iscomplexobj, take, \
argsort, allclose, expand_dims, unique, prod, sort, reshape, \
transpose, dot, any, mean, flipud, ndarray
import numpy as np
from scipy.misc import factorial
from windows import get_window
_modedict = {'valid':0, 'same':1, 'full':2}
_boundarydict = {'fill':0, 'pad':0, 'wrap':2, 'circular':2, 'symm':1,
'symmetric':1, 'reflect':4}
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0,1,2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0,1,2] :
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm' (or 'symmetric').")
val = boundary << 2
return val
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate in1 and in2 with the output size determined by the mode
argument.
Parameters
----------
in1: array
first input.
in2: array
second input. Should have the same number of dimensions as in1.
mode: str {'valid', 'same', 'full'}
a string indicating the size of the output:
- 'valid': the output consists only of those elements that do not
rely on the zero-padding.
- 'same': the output is the same size as the largest input centered
with respect to the 'full' output.
- 'full': the output is the full discrete linear cross-correlation
of the inputs. (Default)
Returns
-------
out: array
an N-dimensional array containing a subset of the discrete linear
cross-correlation of in1 with in2.
Notes
-----
The correlation z of two arrays x and y of rank d is defined as
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
"""
val = _valfrommode(mode)
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
for i in range(len(ps)):
if ps[i] <= 0:
raise ValueError("Dimension of x(%d) < y(%d) " \
"not compatible with valid mode" % \
(in1.shape[i], in2.shape[i]))
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
else:
raise ValueError("Uknown mode %s" % mode)
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) / 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
"""
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fftn(in1,fsize)
IN1 *= fftn(in2,fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if product(s1,axis=0) > product(s2,axis=0):
osize = s1
else:
osize = s2
return _centered(ret,osize)
elif mode == "valid":
return _centered(ret,abs(s2-s1)+1)
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve in1 and in2 with output size determined by mode.
Parameters
----------
in1: array
first input.
in2: array
second input. Should have the same number of dimensions as in1.
mode: str {'valid', 'same', 'full'}
a string indicating the size of the output:
``valid`` : the output consists only of those elements that do not
rely on the zero-padding.
``same`` : the output is the same size as the largest input centered
with respect to the 'full' output.
``full`` : the output is the full discrete linear cross-correlation
of the inputs. (Default)
Returns
-------
out: array
an N-dimensional array containing a subset of the discrete linear
cross-correlation of in1 with in2.
"""
volume = asarray(in1)
kernel = asarray(in2)
if rank(volume) == rank(kernel) == 0:
return volume*kernel
elif not volume.ndim == kernel.ndim:
raise ValueError("in1 and in2 should have the same rank")
slice_obj = [slice(None,None,-1)]*len(kernel.shape)
if mode == 'valid':
for d1, d2 in zip(volume.shape, kernel.shape):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in " \
"every dimension for valid mode.")
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> import scipy.signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> sp.signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> sp.signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by kernel_size.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if len(kernel_size.shape) == 0:
kernel_size = [kernel_size.item()] * len(volume.shape)
kernel_size = asarray(kernel_size)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size,axis=0)
order = int(numels/2)
return sigtools._order_filterND(volume,domain,order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or arraylike, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize);
# Estimate the local mean
lMean = correlate(im,ones(mysize), 'same') / product(mysize,axis=0)
# Estimate the local variance
lVar = correlate(im**2,ones(mysize), 'same') / product(mysize,axis=0) - lMean**2
# Estimate the noise power if needed.
if noise==None:
noise = mean(ravel(lVar),axis=0)
res = (im - lMean)
res *= (1-noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by mode and boundary
conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : ndarray
Two-dimensional input arrays to be convolved.
mode: str, optional
A string indicating the size of the output:
``valid`` : the output consists only of those elements that do not
rely on the zero-padding.
``same`` : the output is the same size as the largest input centered
with respect to the 'full' output.
``full`` : the output is the full discrete linear cross-correlation
of the inputs. (Default)
boundary : str, optional
A flag indicating how to handle boundaries:
- 'fill' : pad input arrays with fillvalue. (default)
- 'wrap' : circular boundary conditions.
- 'symm' : symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
"""
if mode == 'valid':
for d1, d2 in zip(np.shape(in1), np.shape(in2)):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in " \
"every dimension for valid mode.")
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
return sigtools._convolve2d(in1,in2,1,val,bval,fillvalue)
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""Cross-correlate two 2-dimensional arrays.
Cross correlate in1 and in2 with output size determined by mode and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : ndarray
Two-dimensional input arrays to be convolved.
mode: str, optional
A string indicating the size of the output:
``valid`` : the output consists only of those elements that do not
rely on the zero-padding.
``same`` : the output is the same size as the largest input centered
with respect to the 'full' output.
``full`` : the output is the full discrete linear cross-correlation
of the inputs. (Default)
boundary : str, optional
A flag indicating how to handle boundaries:
- 'fill' : pad input arrays with fillvalue. (default)
- 'wrap' : circular boundary conditions.
- 'symm' : symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
"""
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
return sigtools._convolve2d(in1, in2, 0,val,bval,fillvalue)
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if len(kernel_size.shape) == 0:
kernel_size = [kernel_size.item()] * 2
kernel_size = asarray(kernel_size)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, x, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If a[0]
is not 1, then both a and b are normalized by a[0].
x : array_like
An N-dimensional input array.
axis : int
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis (*Default* = -1)
zi : array_like (optional)
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
max(len(a),len(b))-1. If zi=None or is not given then initial
rest is assumed. SEE signal.lfiltic for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array (optional)
If zi is None, this is not returned, otherwise, zf holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements
::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
if isscalar(a):
a = [a]
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b,a,y,x=None):
"""
Construct initial conditions for lfilter
Given a linear filter (b,a) and initial conditions on the output y
and the input x, return the inital conditions on the state vector zi
which is used by lfilter to generate the output given the input.
If M=len(b)-1 and N=len(a)-1. Then, the initial conditions are given
in the vectors x and y as::
x = {x[-1],x[-2],...,x[-M]}
y = {y[-1],y[-2],...,y[-N]}
If x is not given, its inital conditions are assumed zero.
If either vector is too short, then zeros are added
to achieve the proper length.
The output vector zi contains::
zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]} where K=max(M,N).
"""
N = np.size(a)-1
M = np.size(b)-1
K = max(M,N)
y = asarray(y)
zi = zeros(K,y.dtype.char)
if x is None:
x = zeros(M,y.dtype.char)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x,zeros(M-L)]
L = np.size(y)
if L < N:
y = r_[y,zeros(N-L)]
for m in range(M):
zi[m] = sum(b[m+1:]*x[:M-m],axis=0)
for m in range(N):
zi[m] -= sum(a[m+1:]*y[:N-m],axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves divisor out of signal.
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = [];
rem = num;
else:
input = ones(N-D+1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal `x_a(t)` of `x(t)` is::
x_a = F^{-1}(F(x) 2U) = x + i y
where ``F`` is the Fourier transform, ``U`` the unit step function,
and ``y`` the Hilbert transform of ``x``. [1]_
`axis` argument is new in scipy 0.8.0.
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = asarray(x)
if N is None:
N = x.shape[axis]
if N <=0:
raise ValueError("N must be positive.")
if iscomplexobj(x):
print "Warning: imaginary part of x ignored."
x = real(x)
Xf = fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N/2] = 1
h[1:N/2] = 2
else:
h[0] = 1
h[1:(N+1)/2] = 2
if len(x.shape) > 1:
ind = [newaxis]*x.ndim
ind[axis] = slice(None)
h = h[ind]
x = ifft(Xf*h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = asarray(x)
x = asarray(x)
if N is None:
N = x.shape
if len(N) < 2:
if N <=0:
raise ValueError("N must be positive.")
N = (N,N)
if iscomplexobj(x):
print "Warning: imaginary part of x ignored."
x = real(x)
Xf = fft2(x,N,axes=(0,1))
h1 = zeros(N[0],'d')
h2 = zeros(N[1],'d')
for p in range(2):
h = eval("h%d"%(p+1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1/2] = 1
h[1:N1/2] = 2
else:
h[0] = 1
h[1:(N1+1)/2] = 2
exec("h%d = h" % (p+1), globals(), locals())
h = h1[:,newaxis] * h2[newaxis,:]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf*h,axes=(0,1))
return x
def cmplx_sort(p):
"sort roots based on magnitude."
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p,indx,0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = sp.signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max','maximum']:
comproot = np.maximum
elif rtype in ['min','minimum']:
comproot = np.minimum
elif rtype in ['avg','mean']:
comproot = np.mean
p = asarray(p)*1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5*tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr-curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""Compute b(s) and a(s) from partial fraction expansion: r,p,k
If M = len(b) and N = len(a)
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
See Also
--------
residue, poly, polyval, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r,indx,0)
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]]*mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra,a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]]*mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]]*(mult[k]-m-1))
b = polyadd(b,r[indx]*poly(t2))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b,a = map(asarray,(b,a))
rscale = a[0]
k,b = polydiv(b,a)
p = roots(a)
r = p*0.0
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]]*mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]]*mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig,0,-1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn,1),an)
term2 = polymul(bn,polyder(an,1))
bn = polysub(term1,term2)
an = polymul(an,an)
r[indx+m-1] = polyval(bn,pout[n]) / polyval(an,pout[n]) \
/ factorial(sig-m)
indx += sig
return r/rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""Compute partial-fraction expansion of b(z) / a(z).
If M = len(b) and N = len(a)
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, poly, polyval, unique_roots
"""
b,a = map(asarray,(b,a))
gain = a[0]
brev, arev = b[::-1],a[::-1]
krev,brev = polydiv(brev,arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p*0.0
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]]*mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]]*mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig,0,-1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn,1),an)
term2 = polymul(bn,polyder(an,1))
bn = polysub(term1,term2)
an = polymul(an,an)
r[indx+m-1] = polyval(bn,1.0/pout[n]) / polyval(an,1.0/pout[n]) \
/ factorial(sig-m) / (-pout[n])**(sig-m)
indx += sig
return r/gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""Compute b(z) and a(z) from partial fraction expansion: r,p,k
If M = len(b) and N = len(a)
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
residuez, poly, polyval, unique_roots
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r,indx,0)
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]]*mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra,a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]]*mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]]*(mult[k]-m-1))
brev = polyadd(brev,(r[indx]*poly(t2))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from dx to:
dx * len(x) / num
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
"""
x = asarray(x)
X = fft(x,axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray) and window.shape == (Nx,):
W = window
else:
W = ifftshift(get_window(window,Nx))
newshape = ones(len(x.shape))
newshape[axis] = len(W)
W.shape = newshape
X = X*W
sl = [slice(None)]*len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num,Nx))
Y = zeros(newshape,'D')
sl[axis] = slice(0,(N+1)/2)
Y[sl] = X[sl]
sl[axis] = slice(-(N-1)/2,None)
Y[sl] = X[sl]
y = ifft(Y,axis=axis)*(float(num)/float(Nx))
if x.dtype.char not in ['F','D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0,num)*(t[1]-t[0])* Nx / float(num) + t[0]
return y, new_t
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> randgen = np.random.RandomState(9)
>>> npoints = 1e3
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (sp.signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear','l','constant','c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant','c']:
ret = data - expand_dims(mean(data,axis),axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0,bp,N]))
if any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0: axis = axis + rnk
newdims = r_[axis,0:axis,axis+1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0)/N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m+1] - bp[m]
A = ones((Npts,2),dtype)
A[:,0] = cast[dtype](arange(1,Npts+1)*1.0/Npts)
sl = slice(bp[m],bp[m+1])
coef,resids,rank,s = linalg.lstsq(A,newdata[sl])
newdata[sl] = newdata[sl] - dot(A,coef)
# Put data back in original shape.
tdshape = take(dshape,newdims,0)
ret = reshape(newdata,tuple(tdshape))
vals = range(1,rnk)
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret,tuple(olddims))
return ret
def lfilter_zi(b, a):
#compute the zi state from the filter parameters. see [Gust96].
#Based on:
# [Gust96] Fredrik Gustafsson, Determining the initial states in
# forward-backward filtering, IEEE Transactions on
# Signal Processing, pp. 988--992, April 1996,
# Volume 44, Issue 4
n=max(len(a),len(b))
zin = (np.eye(n-1) - np.hstack((-a[1:n,newaxis],
np.vstack((np.eye(n-2),zeros(n-2))))))
zid= b[1:n] - a[1:n]*b[0]
zi_matrix=linalg.inv(zin)*(np.matrix(zid).transpose())
zi_return=[]
#convert the result into a regular array (not a matrix)
for i in range(len(zi_matrix)):
zi_return.append(float(zi_matrix[i][0]))
return array(zi_return)
def filtfilt(b, a, x):
b, a, x = map(asarray, [b, a, x])
# FIXME: For now only accepting 1d arrays
ntaps=max(len(a),len(b))
edge=ntaps*3
if x.ndim != 1:
raise ValueError("filtfilt only accepts 1-d arrays.")
#x must be bigger than edge
if x.size < edge:
raise ValueError("Input vector needs to be bigger than "
"3 * max(len(a),len(b).")
if len(a) < ntaps:
a=r_[a,zeros(len(b)-len(a))]
if len(b) < ntaps:
b=r_[b,zeros(len(a)-len(b))]
zi = lfilter_zi(b,a)
#Grow the signal to have edges for stabilizing
#the filter with inverted replicas of the signal
s=r_[2*x[0]-x[edge:1:-1],x,2*x[-1]-x[-1:-edge:-1]]
#in the case of one go we only need one of the extrems
# both are needed for filtfilt
(y,zf)=lfilter(b,a,s,-1,zi*s[0])
(y,zf)=lfilter(b,a,flipud(y),-1,zi*y[-1])
return flipud(y[edge-1:-edge+1])
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""downsample the signal x by an integer factor q, using an order n filter
By default an order 8 Chebyshev type I filter is used or a 30 point FIR
filter with hamming window if ftype is 'fir'.
Parameters
----------
x : N-d array
the signal to be downsampled
q : int
the downsampling factor
n : int or None
the order of the filter (1 less than the length for 'fir')
ftype : {'iir' or 'fir'}
the type of the lowpass filter
axis : int
the axis along which to decimate
Returns
-------
y : N-d array
the down-sampled signal
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n+1, 1./q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8/q)
y = lfilter(b, a, x, axis=axis)
sl = [None]*y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
| gpl-3.0 | 3,102,251,955,034,431,000 | 29.527513 | 84 | 0.536615 | false |
pudo/extractors | extractors/tesseract.py | 1 | 2334 | import os
import logging
import subprocess
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from PIL import Image
from tesserwrap import Tesseract, PageSegMode
from extractors.constants import _get_languages
from extractors.cache import set_cache, get_cache
# https://tesserwrap.readthedocs.org/en/latest/#
# https://pillow.readthedocs.org/en/3.0.x/reference/Image.html
log = logging.getLogger(__name__)
TESSDATA_PREFIX = os.environ.get('TESSDATA_PREFIX')
PDFTOPPM_BIN = os.environ.get('PDFTOPPM_BIN', 'pdftoppm')
INSTANCES = {}
def extract_image(path, languages=None):
"""
Extract text from an image.
Use tesseract to extract text in the given ``languages`` from an
image file. Tesseract should support a wide range of formats, including
PNG, TIFF and JPG.
"""
with open(path, 'rb') as fh:
return extract_image_data(fh.read(), languages=languages)
def extract_image_data(data, languages=None):
"""Extract text from a binary string of data."""
if TESSDATA_PREFIX is None:
raise ValueError('Env TESSDATA_PREFIX is not set, OCR will not work.')
key, text = get_cache(data)
if text is not None:
return text
try:
img = Image.open(StringIO(data))
except Exception as ex:
log.debug('Failed to parse image internally: %r', ex)
return ''
# TODO: play with contrast and sharpening the images.
try:
languages = _get_languages(languages)
extractor = Tesseract(TESSDATA_PREFIX, lang=languages)
extractor.set_page_seg_mode(PageSegMode.PSM_AUTO_OSD)
text = extractor.ocr_image(img)
log.debug('OCR done: %s, %s characters extracted',
languages, len(text))
set_cache(key, text)
return text
except Exception as ex:
log.exception(ex)
return ''
def _extract_image_page(pdf_file, page, languages=None):
# This is a somewhat hacky way of working around some of the formats
# and compression mechanisms not supported in pdfminer. It will
# generate an image based on the given page in the PDF and then OCR
# that.
args = [PDFTOPPM_BIN, pdf_file, '-singlefile', '-gray', '-f', str(page)]
output = subprocess.check_output(args)
return extract_image_data(output, languages=languages)
| mit | -5,880,061,146,086,956,000 | 31.873239 | 78 | 0.680805 | false |
flavour/tldrmp | modules/savage/graph/base.py | 24 | 22588 | from axes import XAxis, YAxis
from ..graphics import Canvas, PrintableCanvas
from ..graphics.group import Group, Grouping
from ..graphics.shapes import Line, Rectangle, Text
from ..graphics.utils import ViewBox, Translate, Rotate, addAttr, blank, boolean
from ..graphics.color import hex_to_color, Color
from ..utils.struct import Matrix
from ..utils.struct import Vector as V
from re import match
class BaseGraph (PrintableCanvas):
def __init__ (self, canvasType, **attr):
if attr.has_key ('settings') and attr['settings']:
self.applySettings (attr['settings'])
else:
self.formatSettings (blank ())
if attr.has_key ('width'):
self.settings.width = attr['width']
if attr.has_key ('height'):
self.settings.height = attr['height']
view = ViewBox (0, 0, self.settings.width, self.settings.height)
PrintableCanvas.__init__ (self, viewBox = view, **attr)
self.addScript (self.jsUtils ())
self.addScript (self.jsHighlight ())
self.dataGroup = Grouping ()
if canvasType:
self.attachCanvas (canvasType, **attr)
else:
self.canvas = None
self.draw (self.dataGroup)
self.initialFormatting ()
def attachCanvas (self, canvasType, **attr):
self.canvas = canvasType (width = self.settings.width - self.settings.leftMargin - self.settings.rightMargin,
height = self.settings.height - self.settings.topMargin - self.settings.bottomMargin,
x = self.settings.leftMargin,
y = self.settings.topMargin,
id='canvas-root',
**attr)
self.dataGroup.draw (self.canvas)
def jsUtils (self):
return """
function registerEvent (object, event, method, scope, capture) {
if (!scope)
scope = window;
if (!capture)
capture = false;
var func = function (event) {
if (!event)
event = window.event;
return method.call (scope, event);
}
if (object.addEventListener)
object.addEventListener (event, func, capture);
else if (object.attachEvent)
object.attachEvent (event, method, func, capture);
else
return false;
return true;
}
function ViewBox (x, y, width, height) {
this.x = parseFloat (x);
this.y = parseFloat (y);
this.width = parseFloat (width);
this.height = parseFloat (height);
this.quadrant = function (v) {
var midX = this.x + (this.width / 2.0);
var midY = this.y + (this.height / 2.0);
if (v.y <= midY) {
if (v.x >= midX)
return 1;
else
return 2;
}
else {
if (v.x <= midX)
return 3;
else
return 4;
}
}
}
function getView (viewer) {
var view = viewer.getAttribute ('viewBox');
view = view.split (' ');
return new ViewBox (view[0], view[1], view[2], view[3]);
}
function vect (x, y) {
this.x = x;
this.y = y;
}
function pos (node) {
var x, y;
if (node.getAttribute ('x')) {
x = node.getAttribute ('x');
y = node.getAttribute ('y');
}
else if (node.getAttribute ('cx')) {
x = node.getAttribute ('cx');
y = node.getAttribute ('cy');
}
x = parseFloat (x);
y = parseFloat (y);
return new vect (x, y);
}
"""
def jsHighlight (self):
return """
function highlight (event) {
this.setAttribute ('fill', this.getAttribute ('highlight-fill'));
}
function unHighlight (event) {
this.setAttribute ('fill', this.getAttribute ('default-fill'));
}
function addHighlights (node) {
if (node.getAttribute) {
if (node.getAttribute ('has-highlight')) {
node.setAttribute ('default-fill', node.getAttribute ('fill'));
registerEvent (node, 'mouseover', highlight, node);
registerEvent (node, 'mouseout', unHighlight, node);
}
for (var i = 0; i < node.childNodes.length; i ++) {
addHighlights (node.childNodes[i]);
}
}
}
registerEvent (window, 'load', function () {
var root = document.getElementById ('canvas-root');
addHighlights (root);
});
"""
def applySettings (self, filenameOrDict):
if type (filenameOrDict) == str:
file = open (filenameOrDict)
buffer = file.read ()
setList = []
for child in buffer.split ('\n'):
if len (child) == 0:
continue
if child.startswith ('#'):
continue
pair = match ('^([^=]+)=(.*)$', child)
if pair is None:
print 'Warning, Bad formatting in line: ' + child
continue
key = pair.group (1)
value = pair.group (2)
setList.append ((key.strip (), value.strip ()))
settings = blank ()
for key, value in setList:
setattr (settings, key, value)
elif type (filenameOrDict) == dict:
settings = blank ()
for key, value in filenameOrDict.iteritems ():
setattr (settings, key, str (value))
else:
raise RuntimeError ('Bad type for settings')
self.formatSettings (settings)
def formatSettings (self, settings):
addAttr (settings, 'width', float, 300.0)
addAttr (settings, 'height', float, 200.0)
addAttr (settings, 'fixedWidth', float, None)
addAttr (settings, 'titleSize', float, 10.0)
addAttr (settings, 'xLabelSize', float, 8.0)
addAttr (settings, 'yLabelSize', float, 8.0)
addAttr (settings, 'y2LabelSize', float, 8.0)
addAttr (settings, 'leftMargin', float, 10.0)
addAttr (settings, 'rightMargin', float, 10.0)
addAttr (settings, 'topMargin', float, 10.0)
addAttr (settings, 'bottomMargin', float, 10.0)
addAttr (settings, 'titleSpace', float, 10.0)
addAttr (settings, 'xLabelSpace', float, 10.0)
addAttr (settings, 'yLabelSpace', float, 10.0)
addAttr (settings, 'y2LabelSpace', float, 10.0)
addAttr (settings, 'tooltipSize', float, 7.0)
self.settings = settings
def initialFormatting (self):
# Format Label Group
self.labels = Group (className = 'labels')
# Format Title
self.title = Text (text = '',
id = 'title',
textHeight= self.settings.titleSize,
horizontalAnchor = 'center')
# Format X Label
self.xlabel = Text (text = '',
id = 'xlabel',
textHeight = self.settings.xLabelSize,
verticalAnchor = 'bottom',
horizontalAnchor = 'center')
# Format Y Label
self.ylabel = Group ()
ylabelRotate = Rotate (-90)
self.ylabel.appendTransform (ylabelRotate)
self.ylabelText = Text (text = '',
id = 'ylabel',
textHeight = self.settings.yLabelSize,
horizontalAnchor = 'center')
self.ylabel.draw (self.ylabelText)
# Format Y2 Label
self.y2label = Group ()
y2labelRotate = Rotate (90, self.settings.width, 0)
self.y2label.appendTransform (y2labelRotate)
self.y2labelText = Text (text = '',
id = 'y2label',
textHeight = self.settings.y2LabelSize,
horizontalAnchor = 'center')
self.y2label.draw (self.y2labelText)
def positionLabels (self):
if self.canvas:
topY = self.settings.height - (self.canvas.height + self.canvas.y)
midX = self.canvas.x + (self.canvas.width) / 2.0
midY = topY + (self.canvas.height) / 2.0
else:
midX = self.settings.width / 2.0
midY = self.settings.height / 2.0
# Title Position
self.title.move (midX, self.settings.topMargin)
# Y Label Position
self.ylabelText.move (-midY, self.settings.leftMargin)
# X Label Position
self.xlabel.move (midX, self.settings.height - self.settings.bottomMargin)
# Y2 Label Position
self.y2labelText.move (self.settings.width + midY, self.settings.rightMargin)
def setTitle (self, title):
self.title.setText (title)
self.labels.draw (self.title)
if self.canvas:
deltaY = self.title.height + self.settings.titleSpace
self.canvas.changeSize (0, -deltaY)
def setXLabel (self, xlabel):
self.xlabel.setText (xlabel)
self.labels.draw (self.xlabel)
if self.canvas:
deltaY = self.xlabel.height + self.settings.xLabelSpace
self.canvas.move (0, deltaY)
self.canvas.changeSize (0, -deltaY)
def setYLabel (self, ylabel):
self.ylabelText.setText (ylabel)
self.labels.draw (self.ylabel)
if self.canvas:
deltaX = self.ylabelText.height + self.settings.yLabelSpace
self.canvas.move (deltaX, 0)
self.canvas.changeSize (-deltaX, 0)
def setY2Label (self, ylabel):
self.y2labelText.setText (ylabel)
self.labels.draw (self.y2label)
if self.canvas:
deltaX = self.y2labelText.height + self.settings.y2LabelSpace
self.canvas.changeSize (-deltaX, 0)
def setSVG (self):
self.finalize ()
attr = PrintableCanvas.setSVG (self)
if self.settings.fixedWidth:
height = self.settings.fixedWidth * (self.settings.height / self.settings.width)
attr.update ([('width', self.settings.fixedWidth),
('height', height)])
return attr
def finalize (self):
self.dataGroup.transform.set (-1, 1, 1)
self.dataGroup.transform.set (self.viewBox.y + self.viewBox.height, 1, 2)
self.positionLabels ()
if len (self.labels) > 0:
self.drawAt (self.labels, 1)
class UnifiedGraph (BaseGraph):
def __init__ (self, canvas, **attr):
BaseGraph.__init__ (self, canvas, **attr)
self.addScript (self.jsCanvasZone ())
self.addScript (self.jsTooltip ())
self.setProperties ()
self.xlabels = []
self.ylabels = []
self.ypositions = []
self.y2labels = []
def formatSettings (self, settings):
BaseGraph.formatSettings (self, settings)
addAttr (settings, 'xAxisSpace', float, 2.0)
addAttr (settings, 'yAxisSpace', float, 2.0)
addAttr (settings, 'y2AxisSpace', float, 2.0)
addAttr (settings, 'xAxisTextHeight', float, 6.0)
addAttr (settings, 'yAxisTextHeight', float, 6.0)
addAttr (settings, 'y2AxisTextHeight', float, 6.0)
addAttr (settings, 'bg', boolean, True)
addAttr (settings, 'bgBarDir', str, 'horizontal')
addAttr (settings, 'bgBars', int, 6)
addAttr (settings, 'bgColor1', hex_to_color, hex_to_color ('efefef'))
addAttr (settings, 'bgColor2', hex_to_color, hex_to_color ('c1c1c1'))
addAttr (settings, 'canvasBorder', boolean, True)
addAttr (settings, 'canvasBorderWidth', float, 1.0)
addAttr (settings, 'canvasBorderColor', hex_to_color, Color (0, 0, 0))
addAttr (settings, 'tooltipXOffset', float, 10.0)
addAttr (settings, 'tooltipYOffset', float, 10.0)
addAttr (settings, 'tooltipXPadding', float, 20.0)
addAttr (settings, 'tooltipYPadding', float, 10.0)
def jsCanvasZone (self):
return """
function addCanvasZone (event) {
var canvasRoot = document.getElementById ('canvas-root');
var canvasRect = document.createElementNS ('http://www.w3.org/2000/svg', 'rect');
canvasRect.id = 'canvas-rect';
var view = getView (canvasRoot);
canvasRect.setAttribute ('x', view.x);
canvasRect.setAttribute ('y', view.y);
canvasRect.setAttribute ('width', view.width);
canvasRect.setAttribute ('height', view.height);
canvasRect.setAttribute ('opacity', 0);
canvasRoot.insertBefore (canvasRect, canvasRoot.childNodes[0]);
}
registerEvent (window, 'load', addCanvasZone);
"""
def jsTooltip (self):
settings = self.root ().settings
return """
function Tooltip (root) {
var canvasRoot = root;
var tooltipGroup;
var tooltipPath;
var tooltipTextNode;
var tooltipText;
var xPadding = """ + str (settings.tooltipXPadding) + """;
var yPadding = """ + str (settings.tooltipYPadding) + """;
var xOffset = """ + str (settings.tooltipXOffset) + """;
var yOffset = """ + str (settings.tooltipYOffset) + """;
this.setText = function (value) {
tooltipText.nodeValue = value;
setPath ();
positionText ();
}
this.move = function (target) {
var v = pos (target)
""" + self.jsChangeTooltipPos () + """
var o = getOffset (target);
var transform = 'translate(' + (v.x + o.x) + ' ' + (v.y + o.y) + ')'
tooltipGroup.setAttribute ('transform', transform);
}
this.show = function () {
tooltipGroup.setAttribute ('visibility', 'visible');
}
this.hide = function (event) {
tooltipGroup.setAttribute ('visibility', 'hidden');
}
var setPath = function () {
var width = parseFloat (tooltipTextNode.getBBox().width) + xPadding;
var height = parseFloat (tooltipTextNode.getBBox ().height) + yPadding;
var data = 'M 0 0 ';
data += 'L ' + width + ' 0 ';
data += 'L ' + width + ' ' + height + ' ';
data += 'L ' + 0 + ' ' + height + ' ';
data += 'Z'
tooltipPath.setAttribute ('d', data);
}
var positionText = function () {
tooltipTextNode.setAttribute ('x', 0);
tooltipTextNode.setAttribute ('y', 0);
var box = tooltipTextNode.getBBox ();
tooltipTextNode.setAttribute ('x', xPadding / 2.0);
tooltipTextNode.setAttribute ('y', -box.y + (yPadding / 2.0));
}
var getOffset = function (target) {
var v = pos (target);
var x, y;
targetWidth;
""" + self.jsChangeTooltipPos () + """
var width = parseFloat (tooltipTextNode.getBBox().width);
var height = parseFloat (tooltipTextNode.getBBox ().height);
quad = getView (canvasRoot).quadrant (v);
switch (quad) {
case 1:
x = -(width + xPadding + xOffset);
y = yOffset;
break;
case 2:
x = xOffset;
y = yOffset;
break;
case 3:
x = xOffset;
y = -(height + yPadding + yOffset);
break;
case 4:
x = -(width + xPadding + xOffset);
y = -(height + yPadding + yOffset);
}
return new vect (x, y);
}
var tooltipDOM = function () {
tooltipGroup = document.createElementNS ('http://www.w3.org/2000/svg', 'g');
tooltipPath = document.createElementNS ('http://www.w3.org/2000/svg', 'path');
tooltipTextNode = document.createElementNS ('http://www.w3.org/2000/svg', 'text');
tooltipText = document.createTextNode ('TestNode');
tooltipPath.setAttribute ('fill', 'white');
tooltipPath.setAttribute ('stroke', 'black');
tooltipTextNode.setAttribute ('font-family', 'arial, sans-serif');
tooltipTextNode.setAttribute ('font-size', """ + str (settings.tooltipSize) + """);
tooltipTextNode.setAttribute ('text-anchor', 'start');
canvasRoot.appendChild (tooltipGroup);
tooltipGroup.appendChild (tooltipPath);
tooltipGroup.appendChild (tooltipTextNode);
tooltipTextNode.appendChild (tooltipText);
this.hide ();
setEvents.call (this, canvasRoot);
}
var onHover = function (event) {
var target = event.target;
p = pos (target);
this.setText (target.getAttribute ('tooltip-text'));
this.move (target);
this.show ();
}
var unHover = function (event) {
this.hide ();
}
var setEvents = function (node) {
if (node.getAttribute) {
if (node.getAttribute ('has-tooltip') && node.getAttribute ('has-tooltip') != 'False') {
registerEvent (node, 'mouseover', onHover, this);
registerEvent (node, 'mouseout', unHover, this);
}
for (var i = 0; i < node.childNodes.length; i ++) {
setEvents.call (this, node.childNodes[i]);
}
}
}
tooltipDOM.call (this);
}
registerEvent (window, 'load', function () {
var root = document.getElementById ('canvas-root');
var t = new Tooltip (root);
});
"""
def jsChangeTooltipPos (self):
return """
if (target.getAttribute ('width'))
targetWidth = parseFloat (target.getAttribute ('width'));
else
targetWidth = 0;
targetWidth /= 2.0;
v.x += targetWidth """
def setProperties (self):
self.xaxis = False
self.yaxis = False
self.y2axis = False
def boundingBox (self):
if not self.settings.canvasBorder:
return
bbox = Rectangle (x = self.canvas.x,
y = self.canvas.y,
width= self.canvas.width,
height = self.canvas.height)
bbox.style.strokeColor = self.settings.canvasBorderColor
bbox.style.strokeWidth = self.settings.canvasBorderWidth
bbox.style.fill = 'none'
self.dataGroup.draw (bbox)
def background (self):
if not self.settings.bg:
return
numBars = self.settings.bgBars
color1 = self.settings.bgColor1
color2 = self.settings.bgColor2
if self.settings.bgBarDir == 'vertical':
barHeight =self.canvas.height
barWidth = self.canvas.width / float (numBars)
offsetW = 1.0
offsetH = 0
else:
barHeight = self.canvas.height / float (numBars)
barWidth = self.canvas.width
offsetW = 0
offsetH = 1.0
for i in range (numBars):
rect = Rectangle (x = (self.canvas.x + barWidth * float(i) * offsetW),
y = (self.canvas.y + barHeight * float(i) * offsetH),
width = barWidth,
height = barHeight)
if i % 2 == 0:
fill = color1
else:
fill = color2
rect.style.fill = fill
rect.style.strokeWidth = 0
rect.style.opacity = .35
self.dataGroup.drawAt (rect, 0)
def setXBounds (self):
self.xbounds = (self.canvas.minX, self.canvas.maxX)
def setYBounds (self):
self.ybounds = (self.canvas.minY, self.canvas.maxY)
def setY2Bounds (self):
self.y2bounds = (self.canvas.minY2, self.canvas.maxY2)
def createXAxisSpace (self):
self.canvas.move (0, self.settings.xAxisTextHeight)
self.canvas.changeSize (0, -self.settings.xAxisTextHeight)
self.xAxisPos = self.canvas.y
self.canvas.move (0, self.settings.xAxisSpace)
self.canvas.changeSize (0, -self.settings.xAxisSpace)
def createXAxis (self):
textProperties = {'textHeight': self.settings.xAxisTextHeight,
'horizontalAnchor': 'center',
}
xaxis = XAxis (id = 'x-axis',
inf = self.canvas.x,
sup = self.canvas.x + self.canvas.width,
y = self.xAxisPos,
lower = self.xbounds[0],
upper = self.xbounds[1],
textProperties = textProperties)
xaxis.createTicks ()
if self.xlabels:
xaxis.setText (self.xlabels)
xaxis.drawTicks ()
self.dataGroup.drawAt (xaxis, 0)
def createYAxis (self):
textProperties = {'textHeight': self.settings.yAxisTextHeight,
'horizontalAnchor': 'right',
'verticalAnchor': 'middle',
}
yaxis = YAxis (inf = self.canvas.y,
sup = self.canvas.y + self.canvas.height,
x = 0,
lower = self.ybounds[0],
upper = self.ybounds[1],
textProperties = textProperties)
yaxis.createTicks (self.ypositions)
yaxis.setText (self.ylabels)
yaxis.drawTicks ()
yaxis.move (self.canvas.x + yaxis.width, 0)
self.canvas.changeSize (-yaxis.width - self.settings.yAxisSpace, 0)
self.canvas.move (yaxis.width + self.settings.yAxisSpace, 0)
self.dataGroup.drawAt (yaxis, 0)
def createY2Axis (self):
ybounds = self.y2bounds
textProperties = {'textHeight': self.settings.y2AxisTextHeight,
'horizontalAnchor': 'left',
'verticalAnchor': 'middle',
}
yaxis = YAxis (inf = self.canvas.y,
sup = self.canvas.y + self.canvas.height,
x = 0,
lower = self.y2bounds[0],
upper = self.y2bounds[1],
textProperties = textProperties)
yaxis.createTicks ()
if self.y2labels:
yaxis.setText (self.y2labels)
yaxis.drawTicks ()
yaxis.move (self.canvas.x + self.canvas.width - yaxis.width, 0)
self.canvas.changeSize (-yaxis.width - self.settings.y2AxisSpace, 0)
self.dataGroup.drawAt (yaxis, 0)
def finalize (self):
self.canvas.setBounds ()
if self.xaxis:
self.setXBounds ()
self.createXAxisSpace ()
if self.yaxis:
self.setYBounds ()
self.createYAxis ()
if self.y2axis:
self.setY2Bounds ()
self.createY2Axis ()
if self.xaxis:
self.createXAxis ()
BaseGraph.finalize (self)
self.background ()
self.boundingBox ()
self.canvas.finalize ()
| mit | 4,909,359,883,749,387,000 | 33.750769 | 120 | 0.552816 | false |
merbla/splunk-octopusdeploy-add-on | docs/conf.py | 1 | 10019 | # -*- coding: utf-8 -*-
#
# splunk-octopusdeploy-add-on documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 14 15:37:49 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'splunk-octopusdeploy-add-on'
copyright = u'2016, Matthew Erbs & Contributors'
author = u'Matthew Erbs & Contributors '
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'splunk-octopusdeploy-add-on v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'splunk-octopusdeploy-add-ondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'splunk-octopusdeploy-add-on.tex', u'splunk-octopusdeploy-add-on Documentation',
u'Matthew Erbs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'splunk-octopusdeploy-add-on', u'splunk-octopusdeploy-add-on Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'splunk-octopusdeploy-add-on', u'splunk-octopusdeploy-add-on Documentation',
author, 'splunk-octopusdeploy-add-on', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| apache-2.0 | 5,700,241,391,731,847,000 | 28.642012 | 97 | 0.695279 | false |
fifengine/fifengine | engine/python/fife/extensions/pychan/widgets/curvegraph.py | 1 | 5769 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from __future__ import absolute_import
from builtins import range
from fife import fifechan
from fife.extensions.pychan.attrs import BoolAttr, ColorAttr, IntAttr, IntListAttr
from fife.extensions.pychan.properties import ColorProperty
from .widget import Widget
class CurveGraph(Widget):
""" A curve graph widget
New Attributes
==============
- coordinates: int list: x and y coordinates
- thickness': int: Line thickness, default 1
- controll_points: bool: Adds internal controll points, default True
- opaque: bool: default False
"""
ATTRIBUTES = Widget.ATTRIBUTES + [ IntListAttr('coordinates'),
IntAttr('thickness'),
BoolAttr('controll_points'),
BoolAttr('opaque')
]
DEFAULT_HEXPAND = False
DEFAULT_VEXPAND = False
DEFAULT_THICKNESS = 1
DEFAULT_CONTROLL_POINTS = True
DEFAULT_OPAQUE = False
def __init__(self,
parent = None,
name = None,
size = None,
min_size = None,
max_size = None,
fixed_size = None,
margins = None,
padding = None,
helptext = None,
position = None,
style = None,
hexpand = None,
vexpand = None,
font = None,
base_color = None,
background_color = None,
foreground_color = None,
selection_color = None,
border_color = None,
outline_color = None,
border_size = None,
outline_size = None,
position_technique = None,
is_focusable = None,
comment = None,
opaque = None,
coordinates = None,
thickness = None,
controll_points = None):
self.real_widget = fifechan.CurveGraph()
self.opaque = self.DEFAULT_OPAQUE
self.thickness = self.DEFAULT_THICKNESS
self.controll_points = self.DEFAULT_CONTROLL_POINTS
super(CurveGraph, self).__init__(parent=parent,
name=name,
size=size,
min_size=min_size,
max_size=max_size,
fixed_size=fixed_size,
margins=margins,
padding=padding,
helptext=helptext,
position=position,
style=style,
hexpand=hexpand,
vexpand=vexpand,
font=font,
base_color=base_color,
background_color=background_color,
foreground_color=foreground_color,
selection_color=selection_color,
border_color=border_color,
outline_color=outline_color,
border_size=border_size,
outline_size=outline_size,
position_technique=position_technique,
is_focusable=is_focusable,
comment=comment)
if opaque is not None: self.opaque = opaque
if coordinates is not None: self.coordinates = coordinates
if thickness is not None: self.thickness = thickness
if controll_points is not None: self.controll_points = controll_points
def clone(self, prefix):
curveGraphClone = CurveGraph(None,
self._createNameWithPrefix(prefix),
self.size,
self.min_size,
self.max_size,
self.fixed_size,
self.margins,
self.padding,
self.helptext,
self.position,
self.style,
self.hexpand,
self.vexpand,
self.font,
self.base_color,
self.background_color,
self.foreground_color,
self.selection_color,
self.border_color,
self.outline_color,
self.border_size,
self.outline_size,
self.position_technique,
self.is_focusable,
self.comment,
self.opaque,
self.coordinates,
self.thickness,
self.controll_points)
return curveGraphClone
def _setOpaque(self, opaque): self.real_widget.setOpaque(opaque)
def _getOpaque(self): return self.real_widget.isOpaque()
opaque = property(_getOpaque, _setOpaque)
def _setCoordinates(self, coordinates):
# reset
if coordinates is None or len(coordinates) == 0:
self.real_widget.resetPointVector()
return
# int list to point vector
if isinstance(coordinates[0], int):
coords = fifechan.FcnPointVector()
for i in range(0, len(coordinates)-1, 2):
coords.append(fifechan.Point(coordinates[i], coordinates[i+1]))
self.real_widget.setPointVector(coords)
return
self.real_widget.setPointVector(coordinates)
def _getCoordinates(self): return self.real_widget.getPointVector()
coordinates = property(_getCoordinates, _setCoordinates)
def _setThickness(self, thickness): self.real_widget.setThickness(thickness)
def _getThickness(self): return self.real_widget.getThickness()
thickness = property(_getThickness, _setThickness)
def _setControllPoints(self, controll): self.real_widget.setAutomaticControllPoints(controll)
def _getControllPoints(self): return self.real_widget.isAutomaticControllPoints()
controll_points = property(_getControllPoints, _setControllPoints)
| lgpl-2.1 | -6,652,644,385,663,274,000 | 30.183784 | 94 | 0.659906 | false |
darkdarkfruit/adaptpath | adaptpath/__init__.py | 1 | 1147 | #!/usr/bin/env python
# coding: utf-8
#
""" module docstring: module description title
module description details.
"""
# -------------- import starts -------------------------
# import standard libs here. (eg: import sys)
# import 3rd libs here
# import app modules here
# -------------- import ends ---------------------------
# put vars you want to export here
# __all__ = []
# -------------- globals starts ------------------------
# put global constants here
# put global vars here
# -------------- global ends ---------------------------
# bellow is the main module body
# ======================================================
MAIN = 0
MINOR = 3
MICRO = 7
commit_hash = ''
# commit_hash = 'dac4a317976354339ea66942477'
if commit_hash:
MAIN = commit_hash
MINOR = MICRO = ''
version_tuple = (MAIN, MINOR, MICRO)
VERSION = version = __version__ = '.'.join([str(i) for i in version_tuple]).strip('.')
def get_version_tuple():
return version_tuple
def get_version():
return version
if __name__ == '__main__':
print(version_tuple)
print(get_version_tuple())
print(version)
print(get_version())
| mit | 8,048,216,411,866,563,000 | 18.440678 | 86 | 0.530078 | false |
Wuguanping/Server_Manage_Plugin | Openstack_Plugin/ironic-plugin-pike/ironic/tests/unit/conf/test_auth.py | 4 | 2394 | # Copyright 2016 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading as kaloading
from oslo_config import cfg
from ironic.conf import auth as ironic_auth
from ironic.tests import base
class AuthConfTestCase(base.TestCase):
def setUp(self):
super(AuthConfTestCase, self).setUp()
self.config(region_name='fake_region',
group='keystone')
self.test_group = 'test_group'
self.cfg_fixture.conf.register_group(cfg.OptGroup(self.test_group))
ironic_auth.register_auth_opts(self.cfg_fixture.conf, self.test_group)
self.config(auth_type='password',
group=self.test_group)
# NOTE(pas-ha) this is due to auth_plugin options
# being dynamically registered on first load,
# but we need to set the config before
plugin = kaloading.get_plugin_loader('password')
opts = kaloading.get_auth_plugin_conf_options(plugin)
self.cfg_fixture.register_opts(opts, group=self.test_group)
self.config(auth_url='http://127.0.0.1:9898',
username='fake_user',
password='fake_pass',
project_name='fake_tenant',
group=self.test_group)
def test_add_auth_opts(self):
opts = ironic_auth.add_auth_opts([])
# check that there is no duplicates
names = {o.dest for o in opts}
self.assertEqual(len(names), len(opts))
# NOTE(pas-ha) checking for most standard auth and session ones only
expected = {'timeout', 'insecure', 'cafile', 'certfile', 'keyfile',
'auth_type', 'auth_url', 'username', 'password',
'tenant_name', 'project_name', 'trust_id',
'domain_id', 'user_domain_id', 'project_domain_id'}
self.assertTrue(expected.issubset(names))
| apache-2.0 | 1,052,685,409,039,936,100 | 42.527273 | 78 | 0.646199 | false |
ramcn/demo3 | venv/lib/python3.4/site-packages/rest_framework/compat.py | 8 | 8476 | """
The `compat` module provides support for backwards compatibility with older
versions of django/python, and compatibility wrappers around optional packages.
"""
# flake8: noqa
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import urlparse as _urlparse
from django.utils import six
import django
import inspect
try:
import importlib
except ImportError:
from django.utils import importlib
def unicode_repr(instance):
# Get the repr of an instance, but ensure it is a unicode string
# on both python 3 (already the case) and 2 (not the case).
if six.PY2:
return repr(instance).decode('utf-8')
return repr(instance)
def unicode_to_repr(value):
# Coerce a unicode string to the correct repr return type, depending on
# the Python version. We wrap all our `__repr__` implementations with
# this and then use unicode throughout internally.
if six.PY2:
return value.encode('utf-8')
return value
def unicode_http_header(value):
# Coerce HTTP header value to unicode.
if isinstance(value, six.binary_type):
return value.decode('iso-8859-1')
return value
def total_seconds(timedelta):
# TimeDelta.total_seconds() is only available in Python 2.7
if hasattr(timedelta, 'total_seconds'):
return timedelta.total_seconds()
else:
return (timedelta.days * 86400.0) + float(timedelta.seconds) + (timedelta.microseconds / 1000000.0)
# OrderedDict only available in Python 2.7.
# This will always be the case in Django 1.7 and above, as these versions
# no longer support Python 2.6.
# For Django <= 1.6 and Python 2.6 fall back to SortedDict.
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
# HttpResponseBase only exists from 1.5 onwards
try:
from django.http.response import HttpResponseBase
except ImportError:
from django.http import HttpResponse as HttpResponseBase
# contrib.postgres only supported from 1.8 onwards.
try:
from django.contrib.postgres import fields as postgres_fields
except ImportError:
postgres_fields = None
# request only provides `resolver_match` from 1.5 onwards.
def get_resolver_match(request):
try:
return request.resolver_match
except AttributeError:
# Django < 1.5
from django.core.urlresolvers import resolve
return resolve(request.path_info)
# django-filter is optional
try:
import django_filters
except ImportError:
django_filters = None
if django.VERSION >= (1, 6):
def clean_manytomany_helptext(text):
return text
else:
# Up to version 1.5 many to many fields automatically suffix
# the `help_text` attribute with hardcoded text.
def clean_manytomany_helptext(text):
if text.endswith(' Hold down "Control", or "Command" on a Mac, to select more than one.'):
text = text[:-69]
return text
# Django-guardian is optional. Import only if guardian is in INSTALLED_APPS
# Fixes (#1712). We keep the try/except for the test suite.
guardian = None
if 'guardian' in settings.INSTALLED_APPS:
try:
import guardian
import guardian.shortcuts # Fixes #1624
except ImportError:
pass
def get_model_name(model_cls):
try:
return model_cls._meta.model_name
except AttributeError:
# < 1.6 used module_name instead of model_name
return model_cls._meta.module_name
# View._allowed_methods only present from 1.5 onwards
if django.VERSION >= (1, 5):
from django.views.generic import View
else:
from django.views.generic import View as DjangoView
class View(DjangoView):
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
# MinValueValidator, MaxValueValidator et al. only accept `message` in 1.8+
if django.VERSION >= (1, 8):
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.validators import MinLengthValidator, MaxLengthValidator
else:
from django.core.validators import MinValueValidator as DjangoMinValueValidator
from django.core.validators import MaxValueValidator as DjangoMaxValueValidator
from django.core.validators import MinLengthValidator as DjangoMinLengthValidator
from django.core.validators import MaxLengthValidator as DjangoMaxLengthValidator
class MinValueValidator(DjangoMinValueValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MinValueValidator, self).__init__(*args, **kwargs)
class MaxValueValidator(DjangoMaxValueValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MaxValueValidator, self).__init__(*args, **kwargs)
class MinLengthValidator(DjangoMinLengthValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MinLengthValidator, self).__init__(*args, **kwargs)
class MaxLengthValidator(DjangoMaxLengthValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MaxLengthValidator, self).__init__(*args, **kwargs)
# URLValidator only accepts `message` in 1.6+
if django.VERSION >= (1, 6):
from django.core.validators import URLValidator
else:
from django.core.validators import URLValidator as DjangoURLValidator
class URLValidator(DjangoURLValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(URLValidator, self).__init__(*args, **kwargs)
# EmailValidator requires explicit regex prior to 1.6+
if django.VERSION >= (1, 6):
from django.core.validators import EmailValidator
else:
from django.core.validators import EmailValidator as DjangoEmailValidator
from django.core.validators import email_re
class EmailValidator(DjangoEmailValidator):
def __init__(self, *args, **kwargs):
super(EmailValidator, self).__init__(email_re, *args, **kwargs)
# PATCH method is not implemented by Django
if 'patch' not in View.http_method_names:
View.http_method_names = View.http_method_names + ['patch']
# RequestFactory only provides `generic` from 1.5 onwards
from django.test.client import RequestFactory as DjangoRequestFactory
from django.test.client import FakePayload
try:
# In 1.5 the test client uses force_bytes
from django.utils.encoding import force_bytes as force_bytes_or_smart_bytes
except ImportError:
# In 1.4 the test client just uses smart_str
from django.utils.encoding import smart_str as force_bytes_or_smart_bytes
class RequestFactory(DjangoRequestFactory):
def generic(self, method, path,
data='', content_type='application/octet-stream', **extra):
parsed = _urlparse(path)
data = force_bytes_or_smart_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': force_text(parsed[4]),
'REQUEST_METHOD': six.text_type(method),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': six.text_type(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
return self.request(**r)
# Markdown is optional
try:
import markdown
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = ['headerid(level=2)']
safe_mode = False
md = markdown.Markdown(extensions=extensions, safe_mode=safe_mode)
return md.convert(text)
except ImportError:
apply_markdown = None
# `separators` argument to `json.dumps()` differs between 2.x and 3.x
# See: http://bugs.python.org/issue22767
if six.PY3:
SHORT_SEPARATORS = (',', ':')
LONG_SEPARATORS = (', ', ': ')
INDENT_SEPARATORS = (',', ': ')
else:
SHORT_SEPARATORS = (b',', b':')
LONG_SEPARATORS = (b', ', b': ')
INDENT_SEPARATORS = (b',', b': ')
| mit | -3,585,844,919,021,832,700 | 32.634921 | 107 | 0.686409 | false |
GeorgiaTechMSSE/ReliableMD | tools/moltemplate/src/extract_lammps_data.py | 30 | 4357 | #!/usr/bin/env python
lammps_data_sections = set(['Atoms',
'Masses',
'Bonds',
'Bond Coeffs',
'Angles',
'Angle Coeffs',
'Dihedrals',
'Dihedral Coeffs',
'Impropers',
'Improper Coeffs',
'BondBond Coeffs', # class2 angles
'BondAngle Coeffs', # class2 angles
'MiddleBondTorsion Coeffs', # class2 dihedrals
'EndBondTorsion Coeffs', # class2 dihedrals
'AngleTorsion Coeffs', # class2 dihedrals
'AngleAngleTorsion Coeffs', # class2 dihedrals
'BondBond13 Coeffs', # class2 dihedrals
'AngleAngle Coeffs', # class2 impropers
'Angles By Type', # new. not standard LAMMPS
'Dihedrals By Type',# new. not standard LAMMPS
'Angles By Type']) # new. not standard LAMMPS
def DeleteComments(string,
escape='\\',
comment_char='#'):
escaped_state = False
for i in range(0,len(string)):
if string[i] in escape:
if escaped_state:
escaped_state = False
else:
escaped_state = True
elif string[i] == comment_char:
if not escaped_state:
return string[0:i]
return string
def ExtractDataSection(f,
section_name,
comment_char = '#',
include_section_name = False,
return_line_nums = False):
inside_section = False
if section_name in ('header','Header'): #"Header" section includes beginning
inside_section = True
nonblank_encountered = False
nonheader_encountered = False
i = 0
for line_orig in f:
return_this_line = False
line = DeleteComments(line_orig).strip()
if line in lammps_data_sections:
nonheader_encountered = True
if section_name in ('header', 'Header'):
# The "header" section includes all lines at the beginning of the
# before any other section is encountered.
if nonheader_encountered:
return_this_line = False
else:
return_this_line = True
elif line == section_name:
inside_section = True
nonblank_encountered = False
if include_section_name:
return_this_line = True
# A block of blank lines (which dont immediately follow
# the section_name) signal the end of a section:
elif len(line) == 0:
if inside_section and include_section_name:
return_this_line = True
if nonblank_encountered:
inside_section = False
elif line[0] != comment_char:
if inside_section:
nonblank_encountered = True
return_this_line = True
if return_this_line:
if return_line_nums:
yield i
else:
yield line_orig
i += 1
if __name__ == "__main__":
import sys
lines = sys.stdin.readlines()
exclude_sections = False
if sys.argv[1] == '-n':
exclude_sections = True
del sys.argv[1]
if not exclude_sections:
for section_name in sys.argv[1:]:
for line in ExtractDataSection(lines, section_name):
sys.stdout.write(line)
else:
line_nums_exclude = set([])
for section_name in sys.argv[1:]:
for line_num in ExtractDataSection(lines,
section_name,
include_section_name=True,
return_line_nums=True):
line_nums_exclude.add(line_num)
for i in range(0, len(lines)):
if i not in line_nums_exclude:
sys.stdout.write(lines[i])
| gpl-3.0 | -5,620,557,950,438,320,000 | 35.613445 | 80 | 0.471655 | false |
JoaquimPatriarca/senpy-for-gis | gasp/gdal/manage/sampling.py | 1 | 8280 | """
Sampling tools using OGR Library
"""
from osgeo import ogr
def create_fishnet(boundary, width, height, fishnet):
"""
Create a Fishnet
"""
import os
from math import ceil
from gasp.oss.info import get_filename
from gasp.gdal import get_driver_name
from gasp.gdal import get_extent
from gasp.gdal.proj import get_shp_sref
# Get boundary extent
xmin, xmax, ymin, ymax = get_extent(boundary)
# Clean width and height
if type(width) != float:
try:
# Convert to float
width = float(width)
except:
return 'Width value is not valid. Please give a numeric value'
if type(height) != float:
try:
# Convert to float
height = float(height)
except:
return 'Height value is not valid. Please give a numeric value'
# get rows number
rows = ceil((ymax-ymin) / height)
# get columns number
cols = ceil((xmax-xmin) / width)
# Create output file
if not os.path.exists(os.path.dirname(fishnet)):
raise ValueError('The path for the output doesn\'t exist')
out_fishnet = ogr.GetDriverByName(get_driver_name(
fishnet)).CreateDataSource(fishnet)
fishnet_lyr = out_fishnet.CreateLayer(
get_filename(fishnet), get_shp_sref(boundary),
geom_type=ogr.wkbPolygon
)
feat_defn = fishnet_lyr.GetLayerDefn()
# create grid cells
# - start grid cell envelope -#
ringXleftOrigin = xmin
ringXrightOrigin = xmin + width
ringYtopOrigin = ymax
ringYbottomOrigin = ymax - height
count_cols = 0
while count_cols < cols:
count_cols += 1
# reset envelope for rows
ringYtop = ringYtopOrigin
ringYbottom = ringYbottomOrigin
count_rows = 0
while count_rows < rows:
count_rows += 1
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(ringXleftOrigin, ringYtop)
ring.AddPoint(ringXrightOrigin, ringYtop)
ring.AddPoint(ringXrightOrigin, ringYbottom)
ring.AddPoint(ringXleftOrigin, ringYbottom)
ring.AddPoint(ringXleftOrigin, ringYtop)
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
# add new geom to layer
out_feature = ogr.Feature(feat_defn)
out_feature.SetGeometry(poly)
fishnet_lyr.CreateFeature(out_feature)
out_feature = None
# new envelope for next poly
ringYtop = ringYtop - height
ringYbottom = ringYbottom - height
# new envelope for next poly
ringXleftOrigin = ringXleftOrigin + width
ringXrightOrigin = ringXrightOrigin + width
out_fishnet.Destroy()
def points_as_grid(boundary, fishnet_pnt, width=None, height=None,
nr_cols=None, nr_rows=None):
"""
Equivalent to the centroid of each cell of a fishnet grid
"""
import os
from math import ceil
from gasp.oss.info import get_filename
from gasp.gdal import get_driver_name
from gasp.gdal import get_extent
from gasp.gdal.proj import get_shp_sref
# Get boundary extent
xmin, xmax, ymin, ymax = get_extent(boundary)
# Clean width and height
if width and height:
if type(width) != float:
try:
# Convert to float
width = float(width)
except:
raise ValueError(
'Width value is not valid. Please give a numeric value'
)
if type(height) != float:
try:
# Convert to float
height = float(height)
except:
raise ValueError(
'Height value is not valid. Please give a numeric value'
)
else:
if nr_cols and nr_rows:
if type(nr_cols) != float:
try:
# convert to float
nr_cols = float(nr_cols)
except:
raise ValueError(
'Columns number value is not valid. Please give a numeric value'
)
if type(nr_rows) != float:
try:
nr_rows = float(nr_rows)
except:
raise ValueError(
'Lines number value is not valid. Please give a numeric value'
)
width = (xmax + xmin) / nr_cols
height = (ymax + ymin) / nr_rows
else:
raise ValueError('Please giver numeric values to with/height or to nr_cols/nr_rows')
# get rows number
rows = ceil((ymax-ymin) / height)
# get columns number
cols = ceil((xmax-xmin) / width)
# Create output file
if not os.path.exists(os.path.dirname(fishnet_pnt)):
return 'The path for the output doesn\'t exist'
out_fishnet = ogr.GetDriverByName(get_driver_name(
fishnet_pnt)).CreateDataSource(fishnet_pnt)
fishnet_lyr = out_fishnet.CreateLayer(
get_filename(fishnet_pnt), get_shp_sref(boundary),
geom_type=ogr.wkbPoint
)
feat_defn = fishnet_lyr.GetLayerDefn()
# create grid cells
# - start grid cell envelope -#
ringXleftOrigin = xmin
ringXrightOrigin = xmin + width
ringYtopOrigin = ymax
ringYbottomOrigin = ymax - height
count_cols = 0
while count_cols < cols:
count_cols += 1
# reset envelope for rows
ringYtop = ringYtopOrigin
ringYbottom = ringYbottomOrigin
count_rows = 0
while count_rows < rows:
count_rows += 1
pnt = ogr.Geometry(ogr.wkbPoint)
pnt.AddPoint(
(ringXleftOrigin + ringXrightOrigin) / 2.0,
(ringYbottom + ringYtop) / 2.0
)
# add new geom to layer
out_feature = ogr.Feature(feat_defn)
out_feature.SetGeometry(pnt)
fishnet_lyr.CreateFeature(out_feature)
out_feature = None
# new envelope for next poly
ringYtop = ringYtop - height
ringYbottom = ringYbottom - height
# new envelope for next poly
ringXleftOrigin = ringXleftOrigin + width
ringXrightOrigin = ringXrightOrigin + width
out_fishnet.Destroy()
def get_random_point(minX, maxX, minY, maxY):
import random
from gasp.gdal import create_point
x = minX + (random.random() * (maxX - minX))
y = minY + (random.random() * (maxY - minY))
pnt = create_point(x, y)
return pnt
def create_random_points(area_shp, number, out_random):
"""
Return a dataset with several random points
"""
from osgeo import ogr
from gasp.gdal import get_driver_name
from gasp.gdal import get_extent
# Get extent
left, right, bottom, top = get_extent(area_shp)
# To be continued
"""
def CreateRandomPoints(all_sample, number, extension):
ausences = []
shp = ogr.GetDriverByName(GDAL_GetDriverName(all_sample)).Open(all_sample, 0)
lyr = shp.GetLayer()
for i in range(number):
equal = -1
while equal != 0:
random_pnt = CreateRandomPoint(extension['min_x'], extension['max_x'], extension['min_y'], extension['max_y'])
equal = 0
for feat in lyr:
geom = feat.GetGeometryRef()
geom_wkt = geom.ExportToWkt()
coord_geom = re.findall(r"[-+]?\d*\.\d+|\d+", geom_wkt)
dist = float(abs(
((float(coord_geom[0]) - float(random_pnt[0]))**2 + (float(coord_geom[1]) - float(random_pnt[1]))**2)**0.5
))
if dist < 10.0:
equal += 1
ausences.append(random_pnt)
return ausences
"""
| gpl-3.0 | 6,152,986,095,810,671,000 | 29.666667 | 131 | 0.543599 | false |
molobrakos/home-assistant | homeassistant/components/automation/numeric_state.py | 9 | 3263 | """Offer numeric state listening automation rules."""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
CONF_VALUE_TEMPLATE, CONF_PLATFORM, CONF_ENTITY_ID,
CONF_BELOW, CONF_ABOVE, CONF_FOR)
from homeassistant.helpers.event import (
async_track_state_change, async_track_same_state)
from homeassistant.helpers import condition, config_validation as cv
TRIGGER_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_PLATFORM): 'numeric_state',
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_BELOW): vol.Coerce(float),
vol.Optional(CONF_ABOVE): vol.Coerce(float),
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_FOR): vol.All(cv.time_period, cv.positive_timedelta),
}), cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE))
_LOGGER = logging.getLogger(__name__)
async def async_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
entity_id = config.get(CONF_ENTITY_ID)
below = config.get(CONF_BELOW)
above = config.get(CONF_ABOVE)
time_delta = config.get(CONF_FOR)
value_template = config.get(CONF_VALUE_TEMPLATE)
unsub_track_same = {}
entities_triggered = set()
if value_template is not None:
value_template.hass = hass
@callback
def check_numeric_state(entity, from_s, to_s):
"""Return True if criteria are now met."""
if to_s is None:
return False
variables = {
'trigger': {
'platform': 'numeric_state',
'entity_id': entity,
'below': below,
'above': above,
}
}
return condition.async_numeric_state(
hass, to_s, below, above, value_template, variables)
@callback
def state_automation_listener(entity, from_s, to_s):
"""Listen for state changes and calls action."""
@callback
def call_action():
"""Call action with right context."""
hass.async_run_job(action({
'trigger': {
'platform': 'numeric_state',
'entity_id': entity,
'below': below,
'above': above,
'from_state': from_s,
'to_state': to_s,
}
}, context=to_s.context))
matching = check_numeric_state(entity, from_s, to_s)
if not matching:
entities_triggered.discard(entity)
elif entity not in entities_triggered:
entities_triggered.add(entity)
if time_delta:
unsub_track_same[entity] = async_track_same_state(
hass, time_delta, call_action, entity_ids=entity_id,
async_check_same_func=check_numeric_state)
else:
call_action()
unsub = async_track_state_change(
hass, entity_id, state_automation_listener)
@callback
def async_remove():
"""Remove state listeners async."""
unsub()
for async_remove in unsub_track_same.values():
async_remove()
unsub_track_same.clear()
return async_remove
| apache-2.0 | 1,879,342,320,696,157,000 | 32.295918 | 75 | 0.591174 | false |
codesparkle/youtube-dl | youtube_dl/extractor/openload.py | 3 | 3273 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_chr
from ..utils import (
determine_ext,
ExtractorError,
)
class OpenloadIE(InfoExtractor):
_VALID_URL = r'https?://(?:openload\.(?:co|io)|oload\.tv)/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)'
_TESTS = [{
'url': 'https://openload.co/f/kUEfGclsU9o',
'md5': 'bf1c059b004ebc7a256f89408e65c36e',
'info_dict': {
'id': 'kUEfGclsU9o',
'ext': 'mp4',
'title': 'skyrim_no-audio_1080.mp4',
'thumbnail': 're:^https?://.*\.jpg$',
},
}, {
'url': 'https://openload.co/embed/rjC09fkPLYs',
'info_dict': {
'id': 'rjC09fkPLYs',
'ext': 'mp4',
'title': 'movie.mp4',
'thumbnail': 're:^https?://.*\.jpg$',
'subtitles': {
'en': [{
'ext': 'vtt',
}],
},
},
'params': {
'skip_download': True, # test subtitles only
},
}, {
'url': 'https://openload.co/embed/kUEfGclsU9o/skyrim_no-audio_1080.mp4',
'only_matching': True,
}, {
'url': 'https://openload.io/f/ZAn6oz-VZGE/',
'only_matching': True,
}, {
'url': 'https://openload.co/f/_-ztPaZtMhM/',
'only_matching': True,
}, {
# unavailable via https://openload.co/f/Sxz5sADo82g/, different layout
# for title and ext
'url': 'https://openload.co/embed/Sxz5sADo82g/',
'only_matching': True,
}, {
'url': 'https://oload.tv/embed/KnG-kKZdcfY/',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage('https://openload.co/embed/%s/' % video_id, video_id)
if 'File not found' in webpage or 'deleted by the owner' in webpage:
raise ExtractorError('File not found', expected=True)
ol_id = self._search_regex(
'<span[^>]+id="[a-zA-Z0-9]+x"[^>]*>([0-9]+)</span>',
webpage, 'openload ID')
first_two_chars = int(float(ol_id[0:][:2]))
urlcode = ''
num = 2
while num < len(ol_id):
urlcode += compat_chr(int(float(ol_id[num:][:3])) -
first_two_chars * int(float(ol_id[num + 3:][:2])))
num += 5
video_url = 'https://openload.co/stream/' + urlcode
title = self._og_search_title(webpage, default=None) or self._search_regex(
r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage,
'title', default=None) or self._html_search_meta(
'description', webpage, 'title', fatal=True)
entries = self._parse_html5_media_entries(url, webpage, video_id)
subtitles = entries[0]['subtitles'] if entries else None
info_dict = {
'id': video_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'url': video_url,
# Seems all videos have extensions in their titles
'ext': determine_ext(title),
'subtitles': subtitles,
}
return info_dict
| unlicense | -2,906,746,810,198,169,000 | 32.397959 | 97 | 0.505041 | false |
citrix-openstack/build-trove | trove/openstack/common/rpc/common.py | 2 | 18526 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
import traceback
from oslo.config import cfg
import six
from trove.openstack.common.gettextutils import _ # noqa
from trove.openstack.common import importutils
from trove.openstack.common import jsonutils
from trove.openstack.common import local
from trove.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
'''RPC Envelope Version.
This version number applies to the top level structure of messages sent out.
It does *not* apply to the message payload, which must be versioned
independently. For example, when using rpc APIs, a version number is applied
for changes to the API being exposed over rpc. This version number is handled
in the rpc proxy and dispatcher modules.
This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg().
The current message format (version 2.0) is very simple. It is:
{
'oslo.version': <RPC Envelope Version as a String>,
'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
without a message envelope.
So, the current message envelope just includes the envelope version. It may
eventually contain additional information, such as a signature for the message
payload.
We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict.
'''
_RPC_ENVELOPE_VERSION = '2.0'
_VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message'
_REMOTE_POSTFIX = '_Remote'
class RPCException(Exception):
msg_fmt = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.msg_fmt
super(RPCException, self).__init__(message)
class RemoteError(RPCException):
"""Signifies that a remote class has raised an exception.
Contains a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevant info.
"""
msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__(exc_type=exc_type,
value=value,
traceback=traceback)
class Timeout(RPCException):
"""Signifies that a timeout has occurred.
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
msg_fmt = _('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"')
def __init__(self, info=None, topic=None, method=None):
"""Initiates Timeout object.
:param info: Extra info to convey to the user
:param topic: The topic that the rpc call was sent to
:param rpc_method_name: The name of the rpc method being
called
"""
self.info = info
self.topic = topic
self.method = method
super(Timeout, self).__init__(
None,
info=info or _('<unknown>'),
topic=topic or _('<unknown>'),
method=method or _('<unknown>'))
class DuplicateMessageError(RPCException):
msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
class InvalidRPCConnectionReuse(RPCException):
msg_fmt = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException):
msg_fmt = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException):
msg_fmt = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
class RpcVersionCapError(RPCException):
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
class Connection(object):
"""A connection, returned by rpc.create_connection().
This class represents a connection to the message bus used for rpc.
An instance of this class should never be created by users of the rpc API.
Use rpc.create_connection() instead.
"""
def close(self):
"""Close the connection.
This method must be called when the connection will no longer be used.
It will ensure that any resources associated with the connection, such
as a network connection, and cleaned up.
"""
raise NotImplementedError()
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer on this connection.
A consumer is associated with a message queue on the backend message
bus. The consumer will read messages from the queue, unpack them, and
dispatch them to the proxy object. The contents of the message pulled
off of the queue will determine which method gets called on the proxy
object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic. For example, all instances of nova-compute consume
from a queue called "compute". In that case, the
messages will get distributed amongst the consumers in a
round-robin fashion if fanout=False. If fanout=True,
every consumer associated with this topic will get a
copy of every message.
:param proxy: The object that will handle all incoming messages.
:param fanout: Whether or not this is a fanout topic. See the
documentation for the topic parameter for some
additional comments on this.
"""
raise NotImplementedError()
def create_worker(self, topic, proxy, pool_name):
"""Create a worker on this connection.
A worker is like a regular consumer of messages directed to a
topic, except that it is part of a set of such consumers (the
"pool") which may run in parallel. Every pool of workers will
receive a given message, but only one worker in the pool will
be asked to process it. Load is distributed across the members
of the pool in round-robin fashion.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic.
:param proxy: The object that will handle all incoming messages.
:param pool_name: String containing the name of the pool of workers
"""
raise NotImplementedError()
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
"""Register as a member of a group of consumers.
Uses given topic from the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
:param callback: Callable to be invoked for each message.
:type callback: callable accepting one argument
:param pool_name: The name of the consumer pool.
:type pool_name: str
:param topic: The routing topic for desired messages.
:type topic: str
:param exchange_name: The name of the message exchange where
the client should attach. Defaults to
the configured exchange.
:type exchange_name: str
"""
raise NotImplementedError()
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.
Spawn a thread that will be responsible for handling all incoming
messages for consumers that were set up on this connection.
Message dispatching inside of this is expected to be implemented in a
non-blocking manner. An example implementation would be having this
thread pull messages in for all of the consumers, but utilize a thread
pool for dispatching the messages to the proxy objects.
"""
raise NotImplementedError()
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
def _fix_passwords(d):
"""Sanitizes the password fields in the dictionary."""
for k in d.iterkeys():
if k.lower().find('password') != -1:
d[k] = '<SANITIZED>'
elif k.lower() in SANITIZE:
d[k] = '<SANITIZED>'
elif isinstance(d[k], dict):
_fix_passwords(d[k])
return d
return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(conf, data):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if module not in conf.allowed_rpc_exception_modules:
return RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
class CommonRpcContext(object):
def __init__(self, **kwargs):
self.values = kwargs
def __getattr__(self, key):
try:
return self.values[key]
except KeyError:
raise AttributeError(key)
def to_dict(self):
return copy.deepcopy(self.values)
@classmethod
def from_dict(cls, values):
return cls(**values)
def deepcopy(self):
return self.from_dict(self.to_dict())
def update_store(self):
local.store.context = self
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
# TODO(russellb) This method is a bit of a nova-ism. It makes
# some assumptions about the data in the request context sent
# across rpc, while the rest of this class does not. We could get
# rid of this if we changed the nova code that uses this to
# convert the RpcContext back to its native RequestContext doing
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
context = self.deepcopy()
context.values['is_admin'] = True
context.values.setdefault('roles', [])
if 'admin' not in context.values['roles']:
context.values['roles'].append('admin')
if read_deleted is not None:
context.values['read_deleted'] = read_deleted
return context
class ClientException(Exception):
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
Merely instantiating it records the current exception information, which
will be passed back to the RPC client without exceptional logging.
"""
def __init__(self):
self._exc_info = sys.exc_info()
def catch_client_exception(exceptions, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if type(e) in exceptions:
raise ClientException()
else:
raise
def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer.
"""
def outer(func):
def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs)
return inner
return outer
def version_is_compatible(imp_version, version):
"""Determine whether versions are compatible.
:param imp_version: The version implemented
:param version: The version requested by an incoming message.
"""
version_parts = version.split('.')
imp_version_parts = imp_version.split('.')
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
return False
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
return False
return True
def serialize_msg(raw_msg):
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
# information about this format.
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
return msg
def deserialize_msg(msg):
# NOTE(russellb): Hang on to your hats, this road is about to
# get a little bumpy.
#
# Robustness Principle:
# "Be strict in what you send, liberal in what you accept."
#
# At this point we have to do a bit of guessing about what it
# is we just received. Here is the set of possibilities:
#
# 1) We received a dict. This could be 2 things:
#
# a) Inspect it to see if it looks like a standard message envelope.
# If so, great!
#
# b) If it doesn't look like a standard message envelope, it could either
# be a notification, or a message from before we added a message
# envelope (referred to as version 1.0).
# Just return the message as-is.
#
# 2) It's any other non-dict type. Just return it and hope for the best.
# This case covers return values from rpc.call() from before message
# envelopes were used. (messages to call a method were always a dict)
if not isinstance(msg, dict):
# See #2 above.
return msg
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
if not all(map(lambda key: key in msg, base_envelope_keys)):
# See #1.b above.
return msg
# At this point we think we have the message envelope
# format we were expecting. (#1.a above)
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
return raw_msg
| apache-2.0 | -7,837,677,119,063,926,000 | 35.396857 | 79 | 0.645255 | false |
Ultimaker/Cura | plugins/GCodeReader/FlavorParser.py | 1 | 24193 | # Copyright (c) 2020 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import math
import re
from typing import Dict, List, NamedTuple, Optional, Union, Set
import numpy
from UM.Backend import Backend
from UM.Job import Job
from UM.Logger import Logger
from UM.Math.Vector import Vector
from UM.Message import Message
from UM.i18n import i18nCatalog
from cura.CuraApplication import CuraApplication
from cura.LayerDataBuilder import LayerDataBuilder
from cura.LayerDataDecorator import LayerDataDecorator
from cura.LayerPolygon import LayerPolygon
from cura.Scene.CuraSceneNode import CuraSceneNode
from cura.Scene.GCodeListDecorator import GCodeListDecorator
from cura.Settings.ExtruderManager import ExtruderManager
catalog = i18nCatalog("cura")
PositionOptional = NamedTuple("Position", [("x", Optional[float]), ("y", Optional[float]), ("z", Optional[float]), ("f", Optional[float]), ("e", Optional[float])])
Position = NamedTuple("Position", [("x", float), ("y", float), ("z", float), ("f", float), ("e", List[float])])
class FlavorParser:
"""This parser is intended to interpret the common firmware codes among all the different flavors"""
def __init__(self) -> None:
CuraApplication.getInstance().hideMessageSignal.connect(self._onHideMessage)
self._cancelled = False
self._message = None # type: Optional[Message]
self._layer_number = 0
self._extruder_number = 0
# All extruder numbers that have been seen
self._extruders_seen = {0} # type: Set[int]
self._clearValues()
self._scene_node = None
# X, Y, Z position, F feedrate and E extruder values are stored
self._position = Position
self._is_layers_in_file = False # Does the Gcode have the layers comment?
self._extruder_offsets = {} # type: Dict[int, List[float]] # Offsets for multi extruders. key is index, value is [x-offset, y-offset]
self._current_layer_thickness = 0.2 # default
self._filament_diameter = 2.85 # default
self._previous_extrusion_value = 0.0 # keep track of the filament retractions
CuraApplication.getInstance().getPreferences().addPreference("gcodereader/show_caution", True)
def _clearValues(self) -> None:
self._extruder_number = 0
self._extrusion_length_offset = [0] # type: List[float]
self._layer_type = LayerPolygon.Inset0Type
self._layer_number = 0
self._previous_z = 0 # type: float
self._layer_data_builder = LayerDataBuilder()
self._is_absolute_positioning = True # It can be absolute (G90) or relative (G91)
self._is_absolute_extrusion = True # It can become absolute (M82, default) or relative (M83)
@staticmethod
def _getValue(line: str, code: str) -> Optional[Union[str, int, float]]:
n = line.find(code)
if n < 0:
return None
n += len(code)
pattern = re.compile("[;\\s]")
match = pattern.search(line, n)
m = match.start() if match is not None else -1
try:
if m < 0:
return line[n:]
return line[n:m]
except:
return None
def _getInt(self, line: str, code: str) -> Optional[int]:
value = self._getValue(line, code)
try:
return int(value) # type: ignore
except:
return None
def _getFloat(self, line: str, code: str) -> Optional[float]:
value = self._getValue(line, code)
try:
return float(value) # type: ignore
except:
return None
def _onHideMessage(self, message: str) -> None:
if message == self._message:
self._cancelled = True
def _createPolygon(self, layer_thickness: float, path: List[List[Union[float, int]]], extruder_offsets: List[float]) -> bool:
countvalid = 0
for point in path:
if point[5] > 0:
countvalid += 1
if countvalid >= 2:
# we know what to do now, no need to count further
continue
if countvalid < 2:
return False
try:
self._layer_data_builder.addLayer(self._layer_number)
self._layer_data_builder.setLayerHeight(self._layer_number, path[0][2])
self._layer_data_builder.setLayerThickness(self._layer_number, layer_thickness)
this_layer = self._layer_data_builder.getLayer(self._layer_number)
if not this_layer:
return False
except ValueError:
return False
count = len(path)
line_types = numpy.empty((count - 1, 1), numpy.int32)
line_widths = numpy.empty((count - 1, 1), numpy.float32)
line_thicknesses = numpy.empty((count - 1, 1), numpy.float32)
line_feedrates = numpy.empty((count - 1, 1), numpy.float32)
line_widths[:, 0] = 0.35 # Just a guess
line_thicknesses[:, 0] = layer_thickness
points = numpy.empty((count, 3), numpy.float32)
extrusion_values = numpy.empty((count, 1), numpy.float32)
i = 0
for point in path:
points[i, :] = [point[0] + extruder_offsets[0], point[2], -point[1] - extruder_offsets[1]]
extrusion_values[i] = point[4]
if i > 0:
line_feedrates[i - 1] = point[3]
line_types[i - 1] = point[5]
if point[5] in [LayerPolygon.MoveCombingType, LayerPolygon.MoveRetractionType]:
line_widths[i - 1] = 0.1
line_thicknesses[i - 1] = 0.0 # Travels are set as zero thickness lines
else:
line_widths[i - 1] = self._calculateLineWidth(points[i], points[i-1], extrusion_values[i], extrusion_values[i-1], layer_thickness)
i += 1
this_poly = LayerPolygon(self._extruder_number, line_types, points, line_widths, line_thicknesses, line_feedrates)
this_poly.buildCache()
this_layer.polygons.append(this_poly)
return True
def _createEmptyLayer(self, layer_number: int) -> None:
self._layer_data_builder.addLayer(layer_number)
self._layer_data_builder.setLayerHeight(layer_number, 0)
self._layer_data_builder.setLayerThickness(layer_number, 0)
def _calculateLineWidth(self, current_point: Position, previous_point: Position, current_extrusion: float, previous_extrusion: float, layer_thickness: float) -> float:
# Area of the filament
Af = (self._filament_diameter / 2) ** 2 * numpy.pi
# Length of the extruded filament
de = current_extrusion - previous_extrusion
# Volumne of the extruded filament
dVe = de * Af
# Length of the printed line
dX = numpy.sqrt((current_point[0] - previous_point[0])**2 + (current_point[2] - previous_point[2])**2)
# When the extruder recovers from a retraction, we get zero distance
if dX == 0:
return 0.1
# Area of the printed line. This area is a rectangle
Ae = dVe / dX
# This area is a rectangle with area equal to layer_thickness * layer_width
line_width = Ae / layer_thickness
# A threshold is set to avoid weird paths in the GCode
if line_width > 1.2:
return 0.35
# Prevent showing infinitely wide lines
if line_width < 0.0:
return 0.0
return line_width
def _gCode0(self, position: Position, params: PositionOptional, path: List[List[Union[float, int]]]) -> Position:
x, y, z, f, e = position
if self._is_absolute_positioning:
x = params.x if params.x is not None else x
y = params.y if params.y is not None else y
z = params.z if params.z is not None else z
else:
x += params.x if params.x is not None else 0
y += params.y if params.y is not None else 0
z += params.z if params.z is not None else 0
f = params.f if params.f is not None else f
if params.e is not None:
new_extrusion_value = params.e if self._is_absolute_extrusion else e[self._extruder_number] + params.e
if new_extrusion_value > e[self._extruder_number]:
path.append([x, y, z, f, new_extrusion_value + self._extrusion_length_offset[self._extruder_number], self._layer_type]) # extrusion
self._previous_extrusion_value = new_extrusion_value
else:
path.append([x, y, z, f, new_extrusion_value + self._extrusion_length_offset[self._extruder_number], LayerPolygon.MoveRetractionType]) # retraction
e[self._extruder_number] = new_extrusion_value
# Only when extruding we can determine the latest known "layer height" which is the difference in height between extrusions
# Also, 1.5 is a heuristic for any priming or whatsoever, we skip those.
if z > self._previous_z and (z - self._previous_z < 1.5):
self._current_layer_thickness = z - self._previous_z # allow a tiny overlap
self._previous_z = z
elif self._previous_extrusion_value > e[self._extruder_number]:
path.append([x, y, z, f, e[self._extruder_number] + self._extrusion_length_offset[self._extruder_number], LayerPolygon.MoveRetractionType])
else:
path.append([x, y, z, f, e[self._extruder_number] + self._extrusion_length_offset[self._extruder_number], LayerPolygon.MoveCombingType])
return self._position(x, y, z, f, e)
# G0 and G1 should be handled exactly the same.
_gCode1 = _gCode0
def _gCode28(self, position: Position, params: PositionOptional, path: List[List[Union[float, int]]]) -> Position:
"""Home the head."""
return self._position(
params.x if params.x is not None else position.x,
params.y if params.y is not None else position.y,
params.z if params.z is not None else position.z,
position.f,
position.e)
def _gCode90(self, position: Position, params: PositionOptional, path: List[List[Union[float, int]]]) -> Position:
"""Set the absolute positioning"""
self._is_absolute_positioning = True
self._is_absolute_extrusion = True
return position
def _gCode91(self, position: Position, params: PositionOptional, path: List[List[Union[float, int]]]) -> Position:
"""Set the relative positioning"""
self._is_absolute_positioning = False
self._is_absolute_extrusion = False
return position
def _gCode92(self, position: Position, params: PositionOptional, path: List[List[Union[float, int]]]) -> Position:
"""Reset the current position to the values specified.
For example: G92 X10 will set the X to 10 without any physical motion.
"""
if params.e is not None:
# Sometimes a G92 E0 is introduced in the middle of the GCode so we need to keep those offsets for calculate the line_width
self._extrusion_length_offset[self._extruder_number] = position.e[self._extruder_number] - params.e
position.e[self._extruder_number] = params.e
self._previous_extrusion_value = params.e
else:
self._previous_extrusion_value = 0.0
return self._position(
params.x if params.x is not None else position.x,
params.y if params.y is not None else position.y,
params.z if params.z is not None else position.z,
params.f if params.f is not None else position.f,
position.e)
def processGCode(self, G: int, line: str, position: Position, path: List[List[Union[float, int]]]) -> Position:
func = getattr(self, "_gCode%s" % G, None)
line = line.split(";", 1)[0] # Remove comments (if any)
if func is not None:
s = line.upper().split(" ")
x, y, z, f, e = None, None, None, None, None
for item in s[1:]:
if len(item) <= 1:
continue
if item.startswith(";"):
continue
try:
if item[0] == "X":
x = float(item[1:])
elif item[0] == "Y":
y = float(item[1:])
elif item[0] == "Z":
z = float(item[1:])
elif item[0] == "F":
f = float(item[1:]) / 60
elif item[0] == "E":
e = float(item[1:])
except ValueError: # Improperly formatted g-code: Coordinates are not floats.
continue # Skip the command then.
params = PositionOptional(x, y, z, f, e)
return func(position, params, path)
return position
def processTCode(self, T: int, line: str, position: Position, path: List[List[Union[float, int]]]) -> Position:
self._extruder_number = T
if self._extruder_number + 1 > len(position.e):
self._extrusion_length_offset.extend([0] * (self._extruder_number - len(position.e) + 1))
position.e.extend([0] * (self._extruder_number - len(position.e) + 1))
return position
def processMCode(self, M: int, line: str, position: Position, path: List[List[Union[float, int]]]) -> Position:
pass
_type_keyword = ";TYPE:"
_layer_keyword = ";LAYER:"
def _extruderOffsets(self) -> Dict[int, List[float]]:
"""For showing correct x, y offsets for each extruder"""
result = {}
for extruder in ExtruderManager.getInstance().getActiveExtruderStacks():
result[int(extruder.getMetaData().get("position", "0"))] = [
extruder.getProperty("machine_nozzle_offset_x", "value"),
extruder.getProperty("machine_nozzle_offset_y", "value")]
return result
#
# CURA-6643
# This function needs the filename so it can be set to the SceneNode. Otherwise, if you load a GCode file and press
# F5, that gcode SceneNode will be removed because it doesn't have a file to be reloaded from.
#
def processGCodeStream(self, stream: str, filename: str) -> Optional["CuraSceneNode"]:
Logger.log("d", "Preparing to load g-code")
self._cancelled = False
# We obtain the filament diameter from the selected extruder to calculate line widths
global_stack = CuraApplication.getInstance().getGlobalContainerStack()
if not global_stack:
return None
self._filament_diameter = global_stack.extruderList[self._extruder_number].getProperty("material_diameter", "value")
scene_node = CuraSceneNode()
gcode_list = []
self._is_layers_in_file = False
self._extruder_offsets = self._extruderOffsets() # dict with index the extruder number. can be empty
##############################################################################################
## This part is where the action starts
##############################################################################################
file_lines = 0
current_line = 0
for line in stream.split("\n"):
file_lines += 1
gcode_list.append(line + "\n")
if not self._is_layers_in_file and line[:len(self._layer_keyword)] == self._layer_keyword:
self._is_layers_in_file = True
file_step = max(math.floor(file_lines / 100), 1)
self._clearValues()
self._message = Message(catalog.i18nc("@info:status", "Parsing G-code"),
lifetime=0,
title = catalog.i18nc("@info:title", "G-code Details"))
assert(self._message is not None) # use for typing purposes
self._message.setProgress(0)
self._message.show()
Logger.log("d", "Parsing g-code...")
current_position = Position(0, 0, 0, 0, [0])
current_path = [] #type: List[List[float]]
min_layer_number = 0
negative_layers = 0
previous_layer = 0
self._previous_extrusion_value = 0.0
for line in stream.split("\n"):
if self._cancelled:
Logger.log("d", "Parsing g-code file cancelled.")
return None
current_line += 1
if current_line % file_step == 0:
self._message.setProgress(math.floor(current_line / file_lines * 100))
Job.yieldThread()
if len(line) == 0:
continue
if line.find(self._type_keyword) == 0:
type = line[len(self._type_keyword):].strip()
if type == "WALL-INNER":
self._layer_type = LayerPolygon.InsetXType
elif type == "WALL-OUTER":
self._layer_type = LayerPolygon.Inset0Type
elif type == "SKIN":
self._layer_type = LayerPolygon.SkinType
elif type == "SKIRT":
self._layer_type = LayerPolygon.SkirtType
elif type == "SUPPORT":
self._layer_type = LayerPolygon.SupportType
elif type == "FILL":
self._layer_type = LayerPolygon.InfillType
elif type == "SUPPORT-INTERFACE":
self._layer_type = LayerPolygon.SupportInterfaceType
elif type == "PRIME-TOWER":
self._layer_type = LayerPolygon.PrimeTowerType
else:
Logger.log("w", "Encountered a unknown type (%s) while parsing g-code.", type)
# When the layer change is reached, the polygon is computed so we have just one layer per extruder
if self._is_layers_in_file and line[:len(self._layer_keyword)] == self._layer_keyword:
try:
layer_number = int(line[len(self._layer_keyword):])
self._createPolygon(self._current_layer_thickness, current_path, self._extruder_offsets.get(self._extruder_number, [0, 0]))
current_path.clear()
# Start the new layer at the end position of the last layer
current_path.append([current_position.x, current_position.y, current_position.z, current_position.f, current_position.e[self._extruder_number], LayerPolygon.MoveCombingType])
# When using a raft, the raft layers are stored as layers < 0, it mimics the same behavior
# as in ProcessSlicedLayersJob
if layer_number < min_layer_number:
min_layer_number = layer_number
if layer_number < 0:
layer_number += abs(min_layer_number)
negative_layers += 1
else:
layer_number += negative_layers
# In case there is a gap in the layer count, empty layers are created
for empty_layer in range(previous_layer + 1, layer_number):
self._createEmptyLayer(empty_layer)
self._layer_number = layer_number
previous_layer = layer_number
except:
pass
# This line is a comment. Ignore it (except for the layer_keyword)
if line.startswith(";"):
continue
G = self._getInt(line, "G")
if G is not None:
# When find a movement, the new posistion is calculated and added to the current_path, but
# don't need to create a polygon until the end of the layer
current_position = self.processGCode(G, line, current_position, current_path)
continue
# When changing the extruder, the polygon with the stored paths is computed
if line.startswith("T"):
T = self._getInt(line, "T")
if T is not None:
self._extruders_seen.add(T)
self._createPolygon(self._current_layer_thickness, current_path, self._extruder_offsets.get(self._extruder_number, [0, 0]))
current_path.clear()
# When changing tool, store the end point of the previous path, then process the code and finally
# add another point with the new position of the head.
current_path.append([current_position.x, current_position.y, current_position.z, current_position.f, current_position.e[self._extruder_number], LayerPolygon.MoveCombingType])
current_position = self.processTCode(T, line, current_position, current_path)
current_path.append([current_position.x, current_position.y, current_position.z, current_position.f, current_position.e[self._extruder_number], LayerPolygon.MoveCombingType])
if line.startswith("M"):
M = self._getInt(line, "M")
if M is not None:
self.processMCode(M, line, current_position, current_path)
# "Flush" leftovers. Last layer paths are still stored
if len(current_path) > 1:
if self._createPolygon(self._current_layer_thickness, current_path, self._extruder_offsets.get(self._extruder_number, [0, 0])):
self._layer_number += 1
current_path.clear()
material_color_map = numpy.zeros((8, 4), dtype = numpy.float32)
material_color_map[0, :] = [0.0, 0.7, 0.9, 1.0]
material_color_map[1, :] = [0.7, 0.9, 0.0, 1.0]
material_color_map[2, :] = [0.9, 0.0, 0.7, 1.0]
material_color_map[3, :] = [0.7, 0.0, 0.0, 1.0]
material_color_map[4, :] = [0.0, 0.7, 0.0, 1.0]
material_color_map[5, :] = [0.0, 0.0, 0.7, 1.0]
material_color_map[6, :] = [0.3, 0.3, 0.3, 1.0]
material_color_map[7, :] = [0.7, 0.7, 0.7, 1.0]
layer_mesh = self._layer_data_builder.build(material_color_map)
decorator = LayerDataDecorator()
decorator.setLayerData(layer_mesh)
scene_node.addDecorator(decorator)
gcode_list_decorator = GCodeListDecorator()
gcode_list_decorator.setGcodeFileName(filename)
gcode_list_decorator.setGCodeList(gcode_list)
scene_node.addDecorator(gcode_list_decorator)
# gcode_dict stores gcode_lists for a number of build plates.
active_build_plate_id = CuraApplication.getInstance().getMultiBuildPlateModel().activeBuildPlate
gcode_dict = {active_build_plate_id: gcode_list}
CuraApplication.getInstance().getController().getScene().gcode_dict = gcode_dict #type: ignore #Because gcode_dict is generated dynamically.
Logger.log("d", "Finished parsing g-code.")
self._message.hide()
if self._layer_number == 0:
Logger.log("w", "File doesn't contain any valid layers")
if not global_stack.getProperty("machine_center_is_zero", "value"):
machine_width = global_stack.getProperty("machine_width", "value")
machine_depth = global_stack.getProperty("machine_depth", "value")
scene_node.setPosition(Vector(-machine_width / 2, 0, machine_depth / 2))
Logger.log("d", "G-code loading finished.")
if CuraApplication.getInstance().getPreferences().getValue("gcodereader/show_caution"):
caution_message = Message(catalog.i18nc(
"@info:generic",
"Make sure the g-code is suitable for your printer and printer configuration before sending the file to it. The g-code representation may not be accurate."),
lifetime=0,
title = catalog.i18nc("@info:title", "G-code Details"))
caution_message.show()
# The "save/print" button's state is bound to the backend state.
backend = CuraApplication.getInstance().getBackend()
backend.backendStateChange.emit(Backend.BackendState.Disabled)
return scene_node
| lgpl-3.0 | -5,826,409,045,949,713,000 | 46.437255 | 194 | 0.585872 | false |
oandrew/home-assistant | homeassistant/components/light/isy994.py | 5 | 2451 | """
Support for ISY994 lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.isy994/
"""
import logging
from typing import Callable
from homeassistant.components.light import (
Light, SUPPORT_BRIGHTNESS, ATTR_BRIGHTNESS)
import homeassistant.components.isy994 as isy
from homeassistant.const import STATE_ON, STATE_OFF, STATE_UNKNOWN
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
VALUE_TO_STATE = {
False: STATE_OFF,
True: STATE_ON,
}
UOM = ['2', '51', '78']
STATES = [STATE_OFF, STATE_ON, 'true', 'false', '%']
# pylint: disable=unused-argument
def setup_platform(hass, config: ConfigType,
add_devices: Callable[[list], None], discovery_info=None):
"""Set up the ISY994 light platform."""
if isy.ISY is None or not isy.ISY.connected:
_LOGGER.error('A connection has not been made to the ISY controller.')
return False
devices = []
for node in isy.filter_nodes(isy.NODES, units=UOM,
states=STATES):
if node.dimmable or '51' in node.uom:
devices.append(ISYLightDevice(node))
add_devices(devices)
class ISYLightDevice(isy.ISYDevice, Light):
"""Representation of an ISY994 light devie."""
def __init__(self, node: object) -> None:
"""Initialize the ISY994 light device."""
isy.ISYDevice.__init__(self, node)
@property
def is_on(self) -> bool:
"""Get whether the ISY994 light is on."""
return self.state == STATE_ON
@property
def state(self) -> str:
"""Get the state of the ISY994 light."""
return VALUE_TO_STATE.get(bool(self.value), STATE_UNKNOWN)
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 light device."""
if not self._node.fastoff():
_LOGGER.debug('Unable to turn on light.')
def turn_on(self, brightness=100, **kwargs) -> None:
"""Send the turn on command to the ISY994 light device."""
if not self._node.on(val=brightness):
_LOGGER.debug('Unable to turn on light.')
@property
def state_attributes(self):
"""Flag supported attributes."""
return {ATTR_BRIGHTNESS: self.value}
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
| mit | 5,319,049,848,745,298,000 | 29.6375 | 78 | 0.640963 | false |
agileblaze/OpenStackTwoFactorAuthentication | horizon/openstack_dashboard/dashboards/project/access_and_security/keypairs/forms.py | 23 | 2663 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
NEW_LINES = re.compile(r"\r|\n")
KEYPAIR_NAME_REGEX = re.compile(r"^[\w\- ]+$", re.UNICODE)
KEYPAIR_ERROR_MESSAGES = {
'invalid': _('Key pair name may only contain letters, '
'numbers, underscores, spaces and hyphens.')}
class CreateKeypair(forms.SelfHandlingForm):
name = forms.RegexField(max_length=255,
label=_("Key Pair Name"),
regex=KEYPAIR_NAME_REGEX,
error_messages=KEYPAIR_ERROR_MESSAGES)
def handle(self, request, data):
return True # We just redirect to the download view.
class ImportKeypair(forms.SelfHandlingForm):
name = forms.RegexField(max_length=255,
label=_("Key Pair Name"),
regex=KEYPAIR_NAME_REGEX,
error_messages=KEYPAIR_ERROR_MESSAGES)
public_key = forms.CharField(label=_("Public Key"), widget=forms.Textarea(
attrs={'class': 'modal-body-fixed-width'}))
def handle(self, request, data):
try:
# Remove any new lines in the public key
data['public_key'] = NEW_LINES.sub("", data['public_key'])
keypair = api.nova.keypair_import(request,
data['name'],
data['public_key'])
messages.success(request,
_('Successfully imported public key: %s')
% data['name'])
return keypair
except Exception:
exceptions.handle(request, ignore=True)
self.api_error(_('Unable to import key pair.'))
return False
| apache-2.0 | 6,443,140,659,908,157,000 | 37.042857 | 78 | 0.608336 | false |
birm/Elemental | examples/interface/DS.py | 1 | 2769 | #
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
n0 = 25
n1 = 25
numLambdas = 5
startLambda = 0.01
endLambda = 1
display = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Place two 2D finite-difference matrices next to each other
# and make the last column dense
def ConcatFD2D(N0,N1):
A = El.DistSparseMatrix()
height = N0*N1
width = 2*N0*N1
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(11*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
x0 = s % N0
x1 = s / N0
sRel = s + N0*N1
A.QueueLocalUpdate( sLoc, s, 11 )
A.QueueLocalUpdate( sLoc, sRel, -20 )
if x0 > 0:
A.QueueLocalUpdate( sLoc, s-1, -1 )
A.QueueLocalUpdate( sLoc, sRel-1, -17 )
if x0+1 < N0:
A.QueueLocalUpdate( sLoc, s+1, 2 )
A.QueueLocalUpdate( sLoc, sRel+1, -20 )
if x1 > 0:
A.QueueLocalUpdate( sLoc, s-N0, -30 )
A.QueueLocalUpdate( sLoc, sRel-N0, -3 )
if x1+1 < N1:
A.QueueLocalUpdate( sLoc, s+N0, 4 )
A.QueueLocalUpdate( sLoc, sRel+N0, 3 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -10/height );
A.ProcessQueues()
return A
A = ConcatFD2D(n0,n1)
b = El.DistMultiVec()
El.Gaussian( b, n0*n1, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.LPAffineCtrl_d()
ctrl.mehrotraCtrl.outerEquil = True
ctrl.mehrotraCtrl.innerEquil = True
ctrl.mehrotraCtrl.scaleTwoNorm = True
ctrl.mehrotraCtrl.progress = True
for j in xrange(0,numLambdas):
lambd = startLambda + j*(endLambda-startLambda)/(numLambdas-1.)
if worldRank == 0:
print "lambda =", lambd
startDS = El.mpi.Time()
x = El.DS( A, b, lambd, ctrl )
endDS = El.mpi.Time()
if worldRank == 0:
print "DS time:", endDS-startDS, "seconds"
if display:
El.Display( x, "x" )
xOneNorm = El.EntrywiseNorm( x, 1 )
r = El.DistMultiVec()
El.Copy( b, r )
El.Multiply( El.NORMAL, -1., A, x, 1., r )
rTwoNorm = El.Nrm2( r )
t = El.DistMultiVec()
El.Zeros( t, 2*n0*n1, 1 )
El.Multiply( El.TRANSPOSE, 1., A, r, 0., t )
tTwoNorm = El.Nrm2( t )
tInfNorm = El.MaxNorm( t )
if display:
El.Display( r, "r" )
El.Display( t, "t" )
if worldRank == 0:
print "|| x ||_1 =", xOneNorm
print "|| b - A x ||_2 =", rTwoNorm
print "|| A^T (b - A x) ||_2 =", tTwoNorm
print "|| A^T (b - A x) ||_oo =", tInfNorm
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
| bsd-3-clause | 8,293,348,861,478,620,000 | 25.625 | 73 | 0.620802 | false |
koala-ai/tensorflow_nlp | nlp/textsum/predict.py | 1 | 2416 | # -*- coding:utf-8 -*-
import os
import codecs
import numpy as np
from six.moves import xrange
import tensorflow as tf
from nlp.textsum import model as seq2seq_model
from nlp.textsum.dataset import data_utils, dataset
def generate_summary(args):
sentences = []
summaries = []
input_file = codecs.open(args.predict_file, encoding='utf-8')
for line in input_file:
sentences.append(line.replace("\n", "").encode('utf-8')) # list of 'str', convert 'unicode' to 'str'
# Load vocabularies.
vocab_path = os.path.join(args.utils_dir, "vocab")
vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path)
args.vocab_size = len(vocab)
with tf.Session() as sess:
# Create model and load parameters.
model = seq2seq_model.create_model(sess, args.train_dir, args, True)
model.batch_size = 1
args.batch_size = 1
for i in range(len(sentences)):
sentence = sentences[i]
token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), vocab)
bucket_id = min([b for b in xrange(len(args.buckets))
if args.buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = \
dataset.DataSet(args, {bucket_id: [(token_ids, [])]}).next_batch(bucket_id)
# Get output logits for the sentence.
_, _, output_logits_batch = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id,
True)
output_logits = []
for item in output_logits_batch:
output_logits.append(item[0])
outputs = [int(np.argmax(logit)) for logit in output_logits]
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)] # list of IDs
summary = [tf.compat.as_str(rev_vocab[output]) for output in outputs]
print(summary)
summaries.append(summary)
print(" ".join(summary))
# Write Output to summary_dir
summary_file = codecs.open(args.result_file, 'w', encoding='utf-8')
for summary in summaries:
line = " ".join(summary) + "\n" # 'str' in 'utf-8' coding
summary_file.write(line) # write unicode to file | apache-2.0 | -5,901,662,550,133,504,000 | 36.765625 | 115 | 0.598924 | false |
fedora-infra/pkgdb2 | pkgdb2/lib/model/__init__.py | 1 | 73190 | # -*- coding: utf-8 -*-
#
# Copyright © 2013-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
"""
Mapping of python classes to Database Tables.
"""
__requires__ = ['SQLAlchemy >= 0.7', 'jinja2 >= 2.4']
import pkg_resources
import datetime
import json
import logging
import time
import sqlalchemy as sa
from sqlalchemy import create_engine
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import relation
from sqlalchemy.sql import or_
from sqlalchemy.sql import and_
from sqlalchemy.sql import not_
BASE = declarative_base()
ERROR_LOG = logging.getLogger('pkgdb2.lib.model.packages')
DEFAULT_GROUPS = {'provenpackager': {'commit': True}}
## Apparently some of our methods have too few public methods
# pylint: disable=R0903
## Others have too many attributes
# pylint: disable=R0902
## Others have too many arguments
# pylint: disable=R0913
## We use id for the identifier in our db but that's too short
# pylint: disable=C0103
## Some of the object we use here have inherited methods which apparently
## pylint does not detect.
# pylint: disable=E1101
## In our object class we do not call the parent and it's fine
# pylint: disable=W0231
def create_tables(db_url, alembic_ini=None, debug=False):
""" Create the tables in the database using the information from the
url obtained.
:arg db_url, URL used to connect to the database. The URL contains
information with regards to the database engine, the host to
connect to, the user and password and the database name.
ie: <engine>://<user>:<password>@<host>/<dbname>
:kwarg alembic_ini, path to the alembic ini file. This is necessary
to be able to use alembic correctly, but not for the unit-tests.
:kwarg debug, a boolean specifying wether we should have the verbose
output of sqlalchemy or not.
:return a session that can be used to query the database.
"""
engine = create_engine(db_url, echo=debug)
BASE.metadata.create_all(engine)
#engine.execute(collection_package_create_view(driver=engine.driver))
if db_url.startswith('sqlite:'):
## Ignore the warning about con_record
# pylint: disable=W0613
def _fk_pragma_on_connect(dbapi_con, con_record):
''' Tries to enforce referential constraints on sqlite. '''
dbapi_con.execute('pragma foreign_keys=ON')
sa.event.listen(engine, 'connect', _fk_pragma_on_connect)
if alembic_ini is not None: # pragma: no cover
# then, load the Alembic configuration and generate the
# version table, "stamping" it with the most recent rev:
## Ignore the warning missing alembic
# pylint: disable=F0401
from alembic.config import Config
from alembic import command
alembic_cfg = Config(alembic_ini)
command.stamp(alembic_cfg, "head")
scopedsession = scoped_session(sessionmaker(bind=engine))
create_status(scopedsession)
return scopedsession
def drop_tables(db_url, engine): # pragma: no cover
""" Drops the tables in the database using the information from the
url obtained.
:arg db_url, URL used to connect to the database. The URL contains
information with regards to the database engine, the host to connect
to, the user and password and the database name.
ie: <engine>://<user>:<password>@<host>/<dbname>
"""
engine = create_engine(db_url)
BASE.metadata.drop_all(engine)
def create_status(session):
""" Fill in the status tables. """
for acl in ['commit', 'watchbugzilla', 'watchcommits', 'approveacls']:
obj = PkgAcls(acl)
session.add(obj)
try:
session.commit()
except SQLAlchemyError: # pragma: no cover
session.rollback()
for status in ['Approved', 'Awaiting Review', 'Denied', 'Obsolete',
'Removed']:
obj = AclStatus(status)
session.add(obj)
try:
session.commit()
except SQLAlchemyError: # pragma: no cover
session.rollback()
for status in ['EOL', 'Active', 'Under Development']:
obj = CollecStatus(status)
session.add(obj)
try:
session.commit()
except SQLAlchemyError: # pragma: no cover
session.rollback()
for status in ['Approved', 'Removed', 'Retired', 'Orphaned']:
obj = PkgStatus(status)
session.add(obj)
try:
session.commit()
except SQLAlchemyError: # pragma: no cover
session.rollback()
for status in ['Approved', 'Denied', 'Awaiting Review',
'Blocked', 'Pending', 'Obsolete']:
obj = ActionStatus(status)
session.add(obj)
try:
session.commit()
except SQLAlchemyError: # pragma: no cover
session.rollback()
for status in ['rpms']:
obj = Namespace(status)
session.add(obj)
try:
session.commit()
except SQLAlchemyError: # pragma: no cover
session.rollback()
class PkgAcls(BASE):
""" Table storing the ACLs a package can have. """
__tablename__ = 'pkg_acls'
status = sa.Column(sa.String(50), primary_key=True)
def __init__(self, status):
""" Constructor. """
self.status = status
@classmethod
def all_txt(cls, session):
""" Return all the Acls in plain text for packages. """
return [
item.status
for item in
session.query(cls).order_by(cls.status).all()]
class PkgStatus(BASE):
""" Table storing the statuses a package can have. """
__tablename__ = 'pkg_status'
status = sa.Column(sa.String(50), primary_key=True)
def __init__(self, status):
""" Constructor. """
self.status = status
@classmethod
def all_txt(cls, session):
""" Return all the status in plain text for packages. """
return [
item.status
for item in
session.query(cls).order_by(cls.status).all()]
class AclStatus(BASE):
""" Table storing the statuses ACLs a package can have. """
__tablename__ = 'acl_status'
status = sa.Column(sa.String(50), primary_key=True)
def __init__(self, status):
""" Constructor. """
self.status = status
@classmethod
def all_txt(cls, session):
""" Return all the status in plain text for packages. """
return [
item.status
for item in
session.query(cls).order_by(cls.status).all()]
class ActionStatus(BASE):
""" Table storing the statuses for the AdminActions. """
__tablename__ = 'action_status'
status = sa.Column(sa.String(50), primary_key=True)
def __init__(self, status):
""" Constructor. """
self.status = status
@classmethod
def all_txt(cls, session):
""" Return all the status in plain text. """
return [
item.status
for item in
session.query(cls).order_by(cls.status).all()]
class CollecStatus(BASE):
""" Table storing the statuses a collection can have. """
__tablename__ = 'collection_status'
status = sa.Column(sa.String(50), primary_key=True)
def __init__(self, status):
""" Constructor. """
self.status = status
@classmethod
def all_txt(cls, session):
""" Return all the status in plain text for a collection. """
return [
item.status
for item in
session.query(cls).order_by(cls.status).all()]
class Namespace(BASE):
""" Table storing the namespaces a package can be in. """
__tablename__ = 'namespaces'
namespace = sa.Column(sa.String(50), primary_key=True)
def __init__(self, namespace):
""" Constructor. """
self.namespace = namespace
@classmethod
def all_txt(cls, session):
""" Return all the namespaces in plain text for a collection. """
return [
item.namespace
for item in
session.query(cls).order_by(cls.namespace).all()]
@classmethod
def get(cls, session, namespace):
""" Return the specified namespace if found in the DB. """
query = session.query(cls).filter(cls.namespace == namespace)
return query.first()
class PackageListingAcl(BASE):
"""Give a person or a group ACLs on a specific PackageListing.
Table -- PackageListingAcl
"""
__tablename__ = 'package_listing_acl'
id = sa.Column(sa.Integer, primary_key=True)
fas_name = sa.Column(sa.String(255), nullable=False, index=True)
packagelisting_id = sa.Column(
sa.Integer,
sa.ForeignKey(
'package_listing.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False)
acl = sa.Column(
sa.String(50),
sa.ForeignKey('pkg_acls.status', onupdate='CASCADE'),
nullable=False,
index=True)
status = sa.Column(
sa.String(50),
sa.ForeignKey('acl_status.status', onupdate='CASCADE'),
nullable=False,
index=True)
date_created = sa.Column(sa.DateTime, nullable=False,
default=datetime.datetime.utcnow)
packagelist = relation('PackageListing')
__table_args__ = (
sa.UniqueConstraint('fas_name', 'packagelisting_id', 'acl'),
)
@classmethod
def all(cls, session):
""" Return the list of all Collections present in the database.
:arg cls: the class object
:arg session: the database session used to query the information.
"""
return session.query(cls).all()
@classmethod
def get_top_maintainers(cls, session, limit=10):
""" Return the username and number of commits ACLs ordered by number
of commits.
:arg session: session with which to connect to the database
:arg limit: the number of top maintainer to return, defaults to 10.
"""
query = session.query(
PackageListingAcl.fas_name,
sa.func.count(
sa.func.distinct(PackageListing.package_id)
).label('cnt')
).filter(
PackageListingAcl.packagelisting_id == PackageListing.id
).filter(
PackageListing.package_id == Package.id
).filter(
PackageListing.collection_id == Collection.id
).filter(
Package.status == 'Approved'
).filter(
PackageListing.status == 'Approved'
).filter(
PackageListingAcl.acl == 'commit'
).filter(
PackageListingAcl.status == 'Approved'
).filter(
Collection.status != 'EOL'
).group_by(
PackageListingAcl.fas_name
).order_by(
'cnt DESC'
).limit(limit)
return query.all()
@classmethod
def get_acl_packager(
cls, session, packager, acls=None, eol=False, poc=None,
offset=None, limit=None, count=False):
""" Retrieve the ACLs associated with a packager.
:arg session: the database session used to connect to the
database.
:arg packager: the username of the packager to retrieve the ACls
of.
:kwarg acls: one or more ACLs to restrict the query for.
:kwarg eol: a boolean to specify whether to include results for
EOL collections or not. Defaults to False.
If True, it will return results for all collections
(including EOL).
If False, it will return results only for non-EOL collections.
:kwarg poc: a boolean specifying whether the results should be
restricted to ACL for which the provided packager is the point
of contact or not. Defaults to None.
If ``True`` it will only return ACLs for packages on which the
provided packager is point of contact.
If ``False`` it will only return ACLs for packages on which the
provided packager is not the point of contact.
If ``None`` it will not filter the ACLs returned based on the
point of contact of the package (thus every packages is
returned).
:kwarg offset: the offset to apply to the results
:kwarg limit: the number of results to return
:kwarg count: a boolean to return the result of a COUNT query
if true, returns the data if false (default).
"""
if isinstance(acls, basestring):
acls = [acls]
query = session.query(
cls, PackageListing
).filter(
PackageListingAcl.fas_name == packager
)
if acls:
query = query.filter(
PackageListingAcl.acl.in_(acls)
)
if not eol:
query = query.filter(
PackageListingAcl.packagelisting_id == PackageListing.id
).filter(
PackageListing.collection_id == Collection.id
).filter(
Collection.status != 'EOL'
)
if poc is not None:
if poc is True:
query = query.filter(
PackageListingAcl.packagelisting_id == PackageListing.id
).filter(
PackageListing.point_of_contact == packager
)
else:
query = query.filter(
PackageListingAcl.packagelisting_id == PackageListing.id
).filter(
PackageListing.point_of_contact != packager
)
if count:
return query.count()
query = query.order_by(PackageListingAcl.id)
if offset:
query = query.offset(offset)
if limit:
query = query.limit(limit)
return query.all()
@classmethod
def get_acl_package(cls, session, user, namespace, package,
status="Awaiting Review"):
""" Return the pending ACLs for the specified package owned by
user.
:arg session: the database session used to connect to the
database.
:arg user: the username of the packager whose ACL are asked for
this package.
:arg namespace: the namespace of the package.
:arg package: name of the package for which are returned the
requested ACLs.
:kwarg status: status of the ACLs to be returned for the desired
package of the specified packager.
"""
# Get all the packages of this person
stmt = session.query(
Package.id
).filter(
Package.name == package
).filter(
Package.namespace == namespace
).subquery()
stmt2 = session.query(PackageListing.id).filter(
PackageListing.package_id == stmt
).subquery()
query = session.query(cls).filter(
PackageListingAcl.packagelisting_id.in_(stmt2)
).filter(
PackageListingAcl.fas_name == user
)
if status:
query = query.filter(
cls.status == status
)
return query.all()
@classmethod
def get(cls, session, user, packagelisting_id, acl):
""" Retrieve the PersonPackageListing which associates a person
with a package in a certain collection.
:arg session: the database session used to connect to the
database
:arg user: the username
:arg packagelisting_id: the identifier of the PackageListing
entry.
:arg acl: the ACL that person has on that package
:arg status: the status of the ACL
"""
return session.query(
PackageListingAcl
).filter(
PackageListingAcl.fas_name == user
).filter(
PackageListingAcl.packagelisting_id == packagelisting_id
).filter(
PackageListingAcl.acl == acl
).first()
@classmethod
def create(cls, session, user, packagelisting_id, acl, status):
""" Creates the PersonPackageListing which associates a person
with a package in a certain collection.
:arg session: the database session used to connect to the
database
:arg user: the username
:arg packagelisting_id: the identifier of the PackageListing
entry.
:arg acl: the ACL that person has on that package
:arg status: the status of the ACL
"""
personpkg = PackageListingAcl(
fas_name=user,
packagelisting_id=packagelisting_id,
acl=acl,
status=status)
session.add(personpkg)
session.flush()
return personpkg
@classmethod
def get_pending_acl(cls, session, user=None):
""" Return for all the packages of which `user` is point of
contact the ACL which have status 'Awaiting Review'.
:arg session: the database session used to connect to the
database
:arg user: the username of the person for which we are checking the
pending ACLs.
"""
# Match the other criteria
query = session.query(
cls
).filter(
cls.status == 'Awaiting Review'
).filter(
cls.packagelisting_id == PackageListing.id
).filter(
PackageListing.status == 'Approved'
).filter(
PackageListing.package_id == Package.id
).filter(
PackageListing.collection_id == Collection.id
).filter(
Collection.status != 'EOL'
).order_by(
Package.name, Collection.branchname, cls.fas_name, cls.acl
)
if user is not None:
subquery = session.query(
PackageListingAcl.packagelisting_id
).filter(
PackageListingAcl.acl == 'approveacls'
).filter(
PackageListingAcl.fas_name == user
).filter(
PackageListingAcl.status == 'Approved'
)
subquery2 = session.query(
PackageListing.id
).filter(
PackageListing.point_of_contact == user
).filter(
PackageListing.status == 'Approved'
)
query = query.filter(
or_(
PackageListing.id.in_(subquery.subquery()),
PackageListing.id.in_(subquery2.subquery()),
)
)
return query.all()
def __init__(self, fas_name, packagelisting_id, acl, status):
""" Constructor.
:arg fas_name: the fas name of the user
:arg packagelisting_id: the identifier of the PackageListing entry
to which this ACL is associated
:arg acl: the actual ACL to add, should be present in the PkgAcls
table.
:arg status: the status of the ACL, should be present in the
AclStatus table.
"""
self.fas_name = fas_name
self.packagelisting_id = packagelisting_id
self.acl = acl
self.status = status
def __repr__(self):
""" The string representation of this object.
"""
return 'PackageListingAcl(id:%r, %r, PackageListing:%r, Acl:%s, ' \
'%s)' % (
self.id, self.fas_name, self.packagelisting_id, self.acl,
self.status)
def to_json(self, _seen=None, pkglist=True):
""" Return a dictionnary representation of this object.
"""
_seen = _seen or []
cls = type(self)
_seen.append(cls)
infos = dict(
fas_name=self.fas_name,
acl=self.acl,
status=self.status,
)
if type(self.packagelist) not in _seen and pkglist:
infos['packagelist'] = self.packagelist.to_json(_seen)
return infos
class Collection(BASE):
"""A Collection of packages.
Table -- Collection
"""
__tablename__ = 'collection'
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
name = sa.Column(sa.Text, nullable=False)
version = sa.Column(sa.Text, nullable=False)
status = sa.Column(
sa.String(50),
sa.ForeignKey('collection_status.status', onupdate='CASCADE'),
nullable=False)
owner = sa.Column(sa.String(32), nullable=False)
branchname = sa.Column(sa.String(32), unique=True, nullable=False)
dist_tag = sa.Column(sa.String(32), unique=True, nullable=False)
koji_name = sa.Column(sa.Text)
allow_retire = sa.Column(sa.Boolean, default=False, nullable=False)
date_created = sa.Column(sa.DateTime, nullable=False,
default=datetime.datetime.utcnow)
date_updated = sa.Column(sa.DateTime, nullable=False,
default=sa.func.now(),
onupdate=sa.func.now())
__table_args__ = (
sa.UniqueConstraint('name', 'version'),
)
def __init__(self, name, version, status, owner,
branchname=None, dist_tag=None, koji_name=None,
allow_retire=False):
self.name = name
self.version = version
self.status = status
self.owner = owner
self.branchname = branchname
self.dist_tag = dist_tag
self.koji_name = koji_name
self.allow_retire = allow_retire
def __repr__(self):
""" The string representation of this object.
"""
return 'Collection(%r, %r, %r, owner:%r)' % (
self.name, self.version, self.status, self.owner)
def to_json(self, _seen=None):
""" Used by fedmsg to serialize Collections in messages.
"""
return dict(
name=self.name,
version=self.version,
branchname=self.branchname,
status=self.status,
koji_name=self.koji_name,
dist_tag=self.dist_tag,
allow_retire=self.allow_retire,
date_created=self.date_created.strftime('%Y-%m-%d %H:%M:%S'),
date_updated=self.date_updated.strftime('%Y-%m-%d %H:%M:%S'),
)
@classmethod
def by_name(cls, session, branch_name):
"""Return the Collection that matches the simple name
:arg branch_name: branch name for a Collection
:returns: The Collection that matches the name
:raises sqlalchemy.InvalidRequestError: if the simple name is not found
simple_name will be looked up as the Branch name.
"""
collection = session.query(cls).filter(
Collection.branchname == branch_name).one()
return collection
@classmethod
def all(cls, session):
""" Return the list of all Collections present in the database.
:arg cls: the class object
:arg session: the database session used to query the information.
"""
return session.query(cls).order_by(cls.name).all()
@classmethod
def search(cls, session, clt_name, clt_status=None, offset=None,
limit=None, count=False):
""" Return the Collections matching the criteria.
:arg cls: the class object
:arg session: the database session used to query the information.
:arg clt_name: pattern to retrict the Collection queried
:kwarg clt_status: the status of the Collection
:kwarg offset: the offset to apply to the results
:kwarg limit: the number of results to return
:kwarg count: a boolean to return the result of a COUNT query
if true, returns the data if false (default).
"""
query = session.query(Collection)
if '%' in clt_name:
query = query.filter(
Collection.branchname.like(clt_name)
)
else:
query = query.filter(
Collection.branchname == clt_name
)
if clt_status:
query = query.filter(Collection.status == clt_status)
if count:
return query.count()
query = query.order_by(Collection.branchname)
if offset:
query = query.offset(offset)
if limit:
query = query.limit(limit)
return query.all()
class PackageListing(BASE):
"""This associates a package with a particular collection.
Table -- PackageListing
"""
__tablename__ = 'package_listing'
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
package_id = sa.Column(
sa.Integer,
sa.ForeignKey(
'package.id', ondelete="CASCADE", onupdate="CASCADE"),
nullable=False)
point_of_contact = sa.Column(sa.Text, nullable=False, index=True)
collection_id = sa.Column(
sa.Integer,
sa.ForeignKey(
'collection.id', ondelete="CASCADE", onupdate="CASCADE"),
nullable=False)
status = sa.Column(
sa.String(50),
sa.ForeignKey('pkg_status.status', onupdate='CASCADE'),
nullable=False,
index=True)
critpath = sa.Column(sa.Boolean, default=False, nullable=False)
status_change = sa.Column(sa.DateTime, nullable=False,
default=datetime.datetime.utcnow,
onupdate=sa.func.now())
__table_args__ = (
sa.UniqueConstraint('package_id', 'collection_id'),
)
package = relation("Package")
collection = relation("Collection")
acls = relation(
PackageListingAcl,
backref=backref('packagelisting'),
)
def __init__(self, point_of_contact, status, package_id=None,
collection_id=None, critpath=False):
self.package_id = package_id
self.collection_id = collection_id
self.point_of_contact = point_of_contact
self.status = status
self.critpath = critpath
packagename = association_proxy('package', 'name')
def __repr__(self):
""" The string representation of this object.
"""
return 'PackageListing(id:%r, %r, %r, packageid=%r, collectionid=%r' \
')' % (
self.id, self.point_of_contact, self.status,
self.package_id, self.collection_id)
def to_json(self, _seen=None, acls=True, package=True,
not_provenpackager=None):
""" Return a dictionary representation of this object. """
_seen = _seen or []
_seen.append(type(self))
result = dict(
point_of_contact=self.point_of_contact,
critpath=self.critpath,
status=self.status,
status_change=time.mktime(self.status_change.timetuple()),
)
if package and self.package:
result['package'] = self.package.to_json(_seen)
if self.collection:
result['collection'] = self.collection.to_json(_seen)
if acls and self.acls and not type(self.acls[0]) in _seen:
tmp = []
for acl in self.acls:
tmp.append(acl.to_json(_seen + [type(self)]))
if not_provenpackager \
and self.package.name not in not_provenpackager:
tmp.append(
{
"status": "Approved",
"fas_name": "group::provenpackager",
"acl": "commit"
}
)
if tmp:
result['acls'] = tmp
return result
def branch(self, session, branch_to):
"""Clone the permissions on this PackageListing to another `Branch`.
:kwarg branch_to: the Collection object to branch to (ie: new
Fedora or new EPEL).
"""
# Create new PackageListing
pkg_listing = PackageListing(
point_of_contact=self.point_of_contact,
status=self.status,
package_id=self.package.id,
collection_id=branch_to.id,
critpath=self.critpath,
)
session.add(pkg_listing)
session.flush()
# Propagates the ACLs
for acl in self.acls:
pkg_list_acl = PackageListingAcl(
fas_name=acl.fas_name,
packagelisting_id=pkg_listing.id,
acl=acl.acl,
status=acl.status)
session.add(pkg_list_acl)
session.flush()
@classmethod
def by_package_id(cls, session, pkgid):
""" Return the PackageListing object based on the Package ID.
:arg pkgid: Integer, identifier of the package in the Package
table
"""
return session.query(cls).filter(
PackageListing.package_id == pkgid
).order_by(
PackageListing.collection_id
).all()
@classmethod
def by_pkgid_collectionid(cls, session, pkgid, collectionid):
"""Return the PackageListing for the provided package in the
specified collection.
:arg pkgid: Integer, identifier of the package in the Package
table
:arg collectionid: Integer, identifier of the collection in the
Collection table
:returns: The PackageListing that matches this package identifier
and collection iddentifier
:raises sqlalchemy.InvalidRequestError: if the simple name is not found
"""
return session.query(cls).filter(
PackageListing.package_id == pkgid
).filter(
PackageListing.collection_id == collectionid
).first()
@classmethod
def by_collectionid(cls, session, collectionid):
"""Return all the PackageListing for the specified collection.
:arg collectionid: Integer, identifier of the collection in the
Collection table
:returns: The PackageListing that matches the collection iddentifier
:raises sqlalchemy.InvalidRequestError: if the simple name is not found
"""
return session.query(cls).filter(
PackageListing.collection_id == collectionid
).all()
@classmethod
def search(cls, session, pkg_name, clt_id, pkg_owner=None,
pkg_status=None, critpath=None, offset=None, limit=None,
count=False):
"""
Return the list of packages matching the given criteria
:arg session: session with which to connect to the database
:arg pkg_name: the name of the package
:arg clt_id: the identifier of the collection
:kwarg pkg_owner: name of the new owner of the package
:kwarg pkg_status: status of the package
:kwarg critpath: a boolean to restrict the search to critpatch
packages
:kwarg offset: the offset to apply to the results
:kwarg limit: the number of results to return
:kwarg count: a boolean to return the result of a COUNT query
if true, returns the data if false (default).
"""
# Get all the packages matching the name
stmt = session.query(Package)
if '%' in pkg_name:
stmt = stmt.filter(Package.name.like(pkg_name))
else:
stmt = stmt.filter(Package.name == pkg_name)
stmt = stmt.subquery()
# Match the other criteria
query = session.query(cls).filter(
PackageListing.package_id == stmt.c.id
)
if clt_id:
query = query.filter(PackageListing.collection_id == clt_id)
if pkg_owner:
query = query.filter(
PackageListing.point_of_contact == pkg_owner)
if pkg_status:
query = query.filter(PackageListing.status == pkg_status)
if critpath is not None:
query = query.filter(PackageListing.critpath == critpath)
if count:
return query.count()
if offset:
query = query.offset(offset)
if limit:
query = query.limit(limit)
return query.all()
@classmethod
def search_packagers(cls, session, pattern, eol=False, offset=None,
limit=None, count=False):
""" Return all the packagers whose name match the pattern.
Are packagers user having at least one commit ACL on one package.
:arg session: session with which to connect to the database
:arg pattern: pattern the point_of_contact of the package should
match
:kwarg eol: a boolean to specify whether to include results for
EOL collections or not. Defaults to False.
If True, it will return results for all collections
(including EOL).
If False, it will return results only for non-EOL collections.
:kwarg offset: the offset to apply to the results
:kwarg limit: the number of results to return
:kwarg count: a boolean to return the result of a COUNT query
if true, returns the data if false (default).
"""
query = session.query(
sa.func.distinct(PackageListingAcl.fas_name)
).filter(
PackageListingAcl.status == 'Approved'
).order_by(
PackageListingAcl.fas_name
)
if '%' in pattern:
query = query.filter(PackageListingAcl.fas_name.like(pattern))
else:
query = query.filter(PackageListingAcl.fas_name == pattern)
if not eol:
query = query.filter(
PackageListingAcl.packagelisting_id == PackageListing.id
).filter(
PackageListing.collection_id == Collection.id
).filter(
Collection.status != 'EOL'
)
if count:
return query.count()
if offset:
query = query.offset(offset)
if limit:
query = query.limit(limit)
return query.all()
@classmethod
def get_top_poc(cls, session, limit=10):
""" Return the username and number of commits ACLs ordered by number
of commits.
:arg session: session with which to connect to the database
:arg limit: the number of top maintainer to return, defaults to 10.
"""
query = session.query(
PackageListing.point_of_contact,
sa.func.count(
sa.func.distinct(PackageListing.package_id)
).label('cnt')
).filter(
PackageListing.status == 'Approved'
).filter(
PackageListing.package_id == Package.id
).filter(
Package.status == 'Approved'
).filter(
PackageListing.collection_id == Collection.id
).filter(
Collection.status != 'EOL'
).group_by(
PackageListing.point_of_contact
).order_by(
'cnt DESC'
).limit(limit)
return query.all()
@classmethod
def get_critpath_packages(cls, session, branch=None):
""" Return the list of packages marked as being critpath.
:arg session: session with which to connect to the database
:kwarg branch: the branchname to restrict the critpath package to.
"""
query = session.query(
cls
).filter(
cls.status == 'Approved'
).filter(
cls.critpath == True
).order_by(
cls.package_id,
cls.collection_id
)
if branch is not None:
query = query.filter(
cls.collection_id == Collection.id
).filter(
Collection.branchname == branch
)
return query.all()
class Package(BASE):
"""Software we are packaging.
This is equal to the software in one of our revision control directories.
It is unversioned and not associated with a particular collection.
Table -- Package
"""
__tablename__ = 'package'
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
name = sa.Column(sa.Text, nullable=False, index=True)
summary = sa.Column(sa.Text, nullable=False)
description = sa.Column(sa.Text, nullable=True)
review_url = sa.Column(sa.Text)
upstream_url = sa.Column(sa.Text)
monitor = sa.Column(sa.String(10), default=True, nullable=False)
koschei = sa.Column(sa.Boolean(), default=False, nullable=False)
status = sa.Column(
sa.String(50),
sa.ForeignKey('pkg_status.status', onupdate='CASCADE'),
nullable=False)
namespace = sa.Column(
sa.String(50),
sa.ForeignKey(
'namespaces.namespace',
onupdate='CASCADE',
ondelete='CASCADE'),
nullable=False, default='rpms',
)
listings = relation(PackageListing)
date_created = sa.Column(sa.DateTime, nullable=False,
default=datetime.datetime.utcnow)
__table_args__ = (
sa.UniqueConstraint('name', 'namespace'),
)
@property
def sorted_listings(self):
""" Return associated listings reverse sorted by collection name.
"""
def comparator(a, b):
""" Compare two collections and return the result of the
comparison relying on collection name and version.
"""
b, a = a.collection, b.collection
return cmp(a.name + a.version, b.name + b.version)
return sorted(self.listings, cmp=comparator)
@classmethod
def by_name(cls, session, namespace, pkgname):
""" Return the package associated to the given name.
:raises sqlalchemy.InvalidRequestError: if the package name is
not found
"""
return session.query(cls).filter(
Package.name == pkgname
).filter(
Package.namespace == namespace
).one()
@property
def requests_open(self):
""" Returns the list of open requests (Pending or Awaiting Review)
"""
requests = [
req
for req in self.requests
if req.status in ['Pending', 'Awaiting Review']
]
return requests
@property
def requests_pending(self):
""" Returns the list of pending branch requests
"""
requests = [
req
for req in self.requests
if req.status == 'Pending'
]
return requests
@property
def requests_awaiting_review(self):
""" Returns the list of awaiting review requests
"""
requests = [
req
for req in self.requests
if req.status == 'Awaiting Review'
]
return requests
@property
def retired_everywhere(self):
""" Returns whether the package is retired on all active branches
or not.
"""
active = True
for pkglist in self.listings:
if pkglist.collection.status != 'EOL' and pkglist.status != 'Retired':
active = False
return active
@property
def monitoring_status(self):
""" Return the monitoring status of the package be it either True,
False or nobuild.
"""
monitor = self.monitor
if str(monitor).lower() in ['1', 'true']:
monitor = True
elif str(monitor).lower() in ['0', 'false']:
monitor = False
return monitor
def __hash__(self):
""" Returns the name of the package as hash. """
ord3 = lambda arg: '%.3d' % ord(arg)
return int(''.join([ord3(char) for char in self.name]))
def __repr__(self):
""" The string representation of this object.
"""
return 'Package(%r, %r, %r, upstreamurl=%r, reviewurl=%r)' % (
self.name, self.summary, self.status,
self.upstream_url, self.review_url)
def create_listing(self, collection, point_of_contact, statusname,
critpath=False):
"""Create a new PackageListing branch on this Package.
:arg collection: Collection that the new PackageListing lives on
:arg owner: The owner of the PackageListing
:arg statusname: Status to set the PackageListing to
:kwarg critpath: a boolean specifying if the package is marked as
being in critpath.
:returns: The new PackageListing object.
This creates a new PackageListing for this Package.
The PackageListing has default values set for group acls.
"""
pkg_listing = PackageListing(point_of_contact=point_of_contact,
status=statusname,
collection_id=collection.id,
critpath=critpath)
pkg_listing.package_id = self.id
return pkg_listing
@classmethod
def all(cls, session):
""" Return the list of all Packages present in the database.
:arg cls: the class object
:arg session: the database session used to query the information.
"""
return session.query(cls).all()
@classmethod
def get_monitored(cls, session):
""" Return the list of all Packages present in the database and
listed are `monitor`.
:arg cls: the class object
:arg session: the database session used to query the information.
"""
return session.query(
cls
).filter(
Package.monitor.in_(['1', 'true', 'True', 'nobuild'])
).order_by(
Package.name
).all()
@classmethod
def get_koschei_monitored(cls, session):
""" Return the list of all Packages present in the database and
marked to be monitored by koschei.
:arg cls: the class object
:arg session: the database session used to query the information.
"""
return session.query(
cls
).filter(
Package.koschei == True
).order_by(
Package.name
).all()
@classmethod
def get_latest_package(cls, session, limit=10):
""" Return the list of the most recent packages added to the
database.
:arg session: session with which to connect to the database.
:kwarg limit: the number of packages to return.
:returns: a list of ``Package`` ordered from the most recently
added to the oldest.
:rtype: list(Package)
"""
query = session.query(
Package
).order_by(
Package.date_created.desc()
).limit(limit)
return query.all()
@classmethod
def search(
cls, session, namespace, pkg_name, pkg_poc=None, pkg_status=None,
pkg_branch=None, orphaned=None, critpath=None, eol=False,
offset=None, limit=None, count=False, case_sensitive=True):
""" Search the Packages for the one fitting the given pattern.
:arg session: session with which to connect to the database
:arg pkg_name: the name of the package
:kwarg pkg_poc: name of the new point of contact for the package
:kwarg pkg_status: status of the package
:kwarg pkg_branch: branchname of the collection to search.
:kwarg orphaned: a boolean specifying if the search should be
restricted to only orphaned or not-orphaned packages.
:kwarg critpath: Boolean to retrict the search to critpath packages.
:kwarg eol: a boolean to specify whether to include results for
EOL collections or not. Defaults to False.
If True, it will return results for all collections
(including EOL).
If False, it will return results only for non-EOL collections.
:kwarg namespace: the namespace of the packages to restrict with.
:kwarg offset: the offset to apply to the results
:kwarg limit: the number of results to return
:kwarg count: a boolean to return the result of a COUNT query
if true, returns the data if false (default).
:kwarg case_sensitive: a boolean to specify doing a case insensitive
search. Defaults to True.
"""
query = session.query(
sa.func.distinct(Package.id)
)
if '%' not in pkg_name and case_sensitive:
query = query.filter(
Package.name == pkg_name
)
elif '%' in pkg_name and case_sensitive:
query = query.filter(
Package.name.like(pkg_name)
)
elif '%' not in pkg_name and not case_sensitive:
query = query.filter(
sa.func.lower(Package.name) == sa.func.lower(pkg_name)
)
else:
query = query.filter(
Package.name.ilike(pkg_name)
)
if namespace:
query = query.filter(
Package.namespace == namespace
)
if pkg_poc:
query = query.filter(
PackageListing.package_id == Package.id
).filter(
PackageListing.point_of_contact == pkg_poc
).filter(
Collection.status != 'EOL'
)
if pkg_status:
query = query.filter(
PackageListing.package_id == Package.id
).filter(
PackageListing.status == pkg_status
).filter(
PackageListing.collection_id == Collection.id
).filter(
Collection.status != 'EOL'
)
if pkg_branch:
query = query.filter(
PackageListing.package_id == Package.id
).filter(
PackageListing.collection_id == Collection.id
).filter(
Collection.branchname == pkg_branch
)
if orphaned is not None:
if orphaned is True:
query = query.filter(
PackageListing.package_id == Package.id
).filter(
PackageListing.status == 'Orphaned'
)
else:
query = query.filter(
PackageListing.package_id == Package.id
).filter(
PackageListing.status != 'Orphaned'
)
if critpath is not None:
query = query.filter(
PackageListing.package_id == Package.id
).filter(
PackageListing.critpath == critpath
)
if not eol:
query = query.filter(
PackageListing.package_id == Package.id
).filter(
PackageListing.collection_id == Collection.id
).filter(
Collection.status != 'EOL'
)
final_query = session.query(
Package
).filter(
Package.id.in_(query.subquery())
).order_by(
Package.name
)
if count:
return final_query.count()
if offset:
final_query = final_query.offset(offset)
if limit:
final_query = final_query.limit(limit)
return final_query.all()
@classmethod
def count_collection(cls, session):
""" Return the number of packages present in each collection.
:arg session: session with which to connect to the database
"""
query = session.query(
Collection.branchname,
sa.func.count(sa.func.distinct(Package.id))
).filter(
PackageListing.package_id == Package.id
).filter(
PackageListing.collection_id == Collection.id
).filter(
Package.status == 'Approved'
).filter(
Collection.status != 'EOL'
).filter(
PackageListing.status == 'Approved'
).group_by(
Collection.branchname
).order_by(
Collection.branchname
)
return query.all()
@classmethod
def count_fedora_collection(cls, session):
""" Return the number of packages present in each Fedora collection.
:arg session: session with which to connect to the database
"""
query = session.query(
Collection.version,
sa.func.count(sa.func.distinct(Package.id))
).filter(
PackageListing.package_id == Package.id
).filter(
PackageListing.collection_id == Collection.id
).filter(
Package.status == 'Approved'
).filter(
Collection.name == 'Fedora'
).filter(
PackageListing.status == 'Approved'
).group_by(
Collection.branchname, Collection.version
).order_by(
Collection.version
)
return query.all()
@classmethod
def get_package_of_user(
cls, session, user, pkg_status=None, poc=True, eol=False):
""" Return the list of packages on which a given user has commit
rights and is poc (unless specified otherwise).
:arg session: session with which to connect to the database.
:arg user: the FAS username of the user of interest.
:kwarg pkg_status: the status of the packages considered.
:kwarg poc: boolean to specify if the results should be restricted
to packages where ``user`` is the point of contact or packages
where ``user`` is not the point of contact.
:kwarg eol: a boolean to specify wether the output should include
End Of Life releases or not.
"""
query = session.query(
Package,
Collection
).filter(
Package.id == PackageListing.package_id
).filter(
PackageListing.id == PackageListingAcl.packagelisting_id
).filter(
PackageListing.collection_id == Collection.id
).filter(
PackageListing.status == 'Approved'
).filter(
PackageListingAcl.fas_name == user
).filter(
PackageListingAcl.acl == 'commit'
).filter(
PackageListingAcl.status == 'Approved'
).order_by(
Package.name, Collection.branchname
)
if eol is False:
query = query.filter(Collection.status != 'EOL')
if pkg_status:
query = query.filter(Package.status == pkg_status)
if poc:
query = query.filter(PackageListing.point_of_contact == user)
else:
query = query.filter(PackageListing.point_of_contact != user)
return query.all()
@classmethod
def get_package_watch_by_user(
cls, session, user, pkg_status=None, eol=False):
""" Return the list of packages watch by a given user.
:arg session: session with which to connect to the database.
:arg user: the FAS username of the user of interest.
:kwarg pkg_status: the status of the packages considered.
:kwarg eol: a boolean to specify wether the output should include
End Of Life releases or not.
"""
query = session.query(
Package,
Collection
).filter(
Package.id == PackageListing.package_id
).filter(
PackageListing.id == PackageListingAcl.packagelisting_id
).filter(
PackageListing.collection_id == Collection.id
).filter(
PackageListing.status == 'Approved'
).filter(
PackageListingAcl.fas_name == user
).filter(
PackageListingAcl.acl.in_(['watchbugzilla', 'watchcommits'])
).filter(
PackageListingAcl.status == 'Approved'
).order_by(
Package.name, Collection.branchname
)
if eol is False:
query = query.filter(Collection.status != 'EOL')
if pkg_status:
query = query.filter(Package.status == pkg_status)
return query.all()
@classmethod
def get_retired(cls, session, collection):
""" Return the list of all Packages present in the database that are
retired on all the active branch of the specified collection.
:arg cls: the class object
:arg session: the database session used to query the information.
"""
subq = session.query(
Package.id
).filter(
Package.id == PackageListing.package_id
).filter(
PackageListing.status != 'Retired'
).filter(
PackageListing.collection_id == Collection.id
).filter(
Collection.name == collection
).filter(
Collection.status != 'EOL'
).subquery()
subq2 = session.query(
Package.id
).filter(
Package.id == PackageListing.package_id
).filter(
PackageListing.status == 'Retired'
).filter(
PackageListing.collection_id == Collection.id
).filter(
Collection.name == collection
).filter(
Collection.status != 'EOL'
).subquery()
query = session.query(
Package
).filter(
Package.id.in_(subq2)
).filter(
not_(Package.id.in_(subq))
).order_by(
Package.name
)
return query.all()
def to_json(self, _seen=None, acls=True, package=True, collection=None):
""" Return a dictionnary representation of the object.
"""
_seen = _seen or []
cls = type(self)
## pylint complains about timetuple() but it is a method
# pylint: disable=E1102
result = {
'name': self.name,
'namespace': self.namespace,
'summary': self.summary,
'description': self.description,
'status': self.status,
'review_url': self.review_url,
'upstream_url': self.upstream_url,
'creation_date': time.mktime(self.date_created.timetuple()),
'monitor': self.monitoring_status,
'koschei_monitor': self.koschei,
}
_seen.append(cls)
# Protect against infinite recursion
result['acls'] = []
if acls and PackageListing not in _seen:
if isinstance(collection, basestring):
collection = [collection]
for pkg in self.listings:
if collection:
if pkg.collection.branchname in collection:
result['acls'].append(
pkg.to_json(_seen, package=package, acls=acls))
else:
result['acls'].append(
pkg.to_json(_seen, package=package, acls=acls))
return result
class Log(BASE):
"""Base Log record.
This is a Log record. All logs will be entered via a subclass of this.
Table -- Log
"""
__tablename__ = 'log'
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
user = sa.Column(sa.String(32), nullable=False, index=True)
change_time = sa.Column(sa.DateTime, nullable=False,
default=datetime.datetime.utcnow, index=True)
package_id = sa.Column(
sa.Integer,
sa.ForeignKey(
'package.id', ondelete='SET NULL', onupdate='CASCADE'),
nullable=True,
index=True)
description = sa.Column(sa.Text, nullable=False)
def __init__(self, user, package_id, description):
self.user = user
self.package_id = package_id
self.description = description
def __repr__(self):
""" The string representation of this object.
"""
return 'Log(user=%r, description=%r, change_time=%r)' % (
self.user, self.description,
self.change_time.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def search(cls, session, package_id=None, packager=None,
from_date=None, limit=None,
offset=None, count=False):
""" Return the list of the last Log entries present in the database.
:arg cls: the class object
:arg session: the database session used to query the information.
:kwarg package: retrict the logs to a certain package.
:kwarg packager: restrict the logs to a certain user/packager.
:kwarg from_date: a date from which to retrieve the logs.
:kwarg limit: limit the result to X row
:kwarg offset: start the result at row X
:kwarg count: a boolean to return the result of a COUNT query
if true, returns the data if false (default).
"""
query = session.query(
cls
)
if package_id:
query = query.filter(cls.package_id == package_id)
if packager:
query = query.filter(cls.user == packager)
if from_date:
query = query.filter(cls.change_time <= from_date)
query = query.order_by(cls.change_time.desc())
if count:
return query.count()
if offset:
query = query.offset(offset)
if limit:
query = query.limit(limit)
return query.all()
@classmethod
def insert(cls, session, user, package, description):
""" Insert the given log entry into the database.
:arg session: the session to connect to the database with
:arg user: the username of the user doing the action
:arg package: the `Package` object of the package changed
:arg description: a short textual description of the action
performed
"""
if package:
log = Log(user, package.id, description)
else:
log = Log(user, None, description)
session.add(log)
session.flush()
class AdminAction(BASE):
"""This table stores the actions asked by user and requiring an
intervention from an admin (often a rel-eng person).
Table -- admin_actions
"""
__tablename__ = 'admin_actions'
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
package_id = sa.Column(
sa.Integer,
sa.ForeignKey(
'package.id', ondelete="CASCADE", onupdate="CASCADE"),
nullable=True)
collection_id = sa.Column(
sa.Integer,
sa.ForeignKey(
'collection.id', ondelete="CASCADE", onupdate="CASCADE"),
nullable=False)
_status = sa.Column(
sa.String(50),
sa.ForeignKey('action_status.status', onupdate='CASCADE'),
name='status',
nullable=False,
index=True)
user = sa.Column(sa.Text, nullable=False, index=True)
action = sa.Column(sa.Text, nullable=False, index=True)
info = sa.Column(sa.Text, nullable=True)
message = sa.Column(sa.Text, nullable=True)
date_created = sa.Column(sa.DateTime, nullable=False,
default=datetime.datetime.utcnow)
date_change = sa.Column(sa.DateTime, nullable=False,
default=datetime.datetime.utcnow,
onupdate=sa.func.now())
__table_args__ = (
sa.UniqueConstraint(
'user', 'action', 'package_id', 'collection_id'),
)
package = relation(
"Package",
backref=backref("requests", order_by=collection_id)
)
collection = relation(
"Collection",
foreign_keys=[collection_id], remote_side=[Collection.id],
)
@property
def info_data(self):
""" Return the dict stored as string in the database as an actual
dict object.
"""
if self.info:
return json.loads(self.info)
else:
return {}
@property
def status(self):
""" Returns the status of the admin action. """
if self._status == 'Pending':
if (datetime.datetime.utcnow() - self.date_created).days < 7:
return self._status
else:
return 'Awaiting Review'
return self._status
def to_json(self, _seen=None, acls=True, package=True, collection=None):
""" Return a dictionnary representation of the object.
"""
_seen = _seen or []
## pylint complains about timetuple() but it is a method
# pylint: disable=E1102
pkg = None
if self.package:
pkg = self.package.to_json(acls=False)
result = {
'id': self.id,
'action': self.action,
'user': self.user,
'status': self.status,
'package': pkg,
'collection': self.collection.to_json(),
'date_created': time.mktime(self.date_created.timetuple()),
'date_updated': time.mktime(self.date_change.timetuple()),
'info': self.info_data,
'message': self.message,
}
return result
@classmethod
def search(cls, session, package_id=None, collection_id=None,
packager=None, action=None, user=None,
status=None, offset=None, limit=None, count=False,
order='asc'):
""" Return the list of actions present in the database and
matching these criterias.
:arg cls: the class object
:arg session: the database session used to query the information.
:kwarg package: retrict the logs to a certain package.
:kwarg packager: restrict the logs to a certain user/packager.
:kwarg action: a type of action to search for.
:kwarg status: restrict the requests returned to the ones with this
status.
:kwarg limit: limit the result to X row
:kwarg offset: start the result at row X
:kwarg count: a boolean to return the result of a COUNT query
if true, returns the data if false (default).
:kwarg order: the order in which to return the requests, default to
``asc`` meaning from the oldest to the most recent, can be
``desc`` meaning from the most recent to the oldest.
"""
query = session.query(
cls
)
if package_id:
query = query.filter(cls.package_id == package_id)
if collection_id:
query = query.filter(cls.collection_id == collection_id)
if packager:
query = query.filter(cls.user == packager)
if action:
query = query.filter(cls.action == action)
if user:
query = query.filter(cls.user == user)
if status:
if status not in ['Awaiting Review', 'Pending']:
query = query.filter(cls._status == status)
# Pending and Awaiting Review status are peculiar.
# After 7 days a Pending request is automatically converted
# to Awaiting Review.
# This gives 7 days to the packagers with approveacls on the
# package to block or set the request to Awaiting Review (ie
# ask rel-eng to review it)
elif status == 'Pending':
# To be pending a request should be Pending and less than
# 7 days old
query = query.filter(
and_(
cls._status == status,
cls.date_created > (datetime.datetime.utcnow(
).date() - datetime.timedelta(days=6)),
)
)
else:
# To be Awaiting Review, a request should be Awaiting Review
# or Pending and 7 days old or more.
query = query.filter(
or_(
cls._status == status,
and_(
cls._status == 'Pending',
cls.date_created < (datetime.datetime.utcnow(
).date() - datetime.timedelta(days=6)),
)
)
)
if order == 'desc':
query = query.order_by(cls.date_created.desc())
else:
query = query.order_by(cls.date_created.asc())
if count:
return query.count()
if offset:
query = query.offset(offset)
if limit:
query = query.limit(limit)
return query.all()
@classmethod
def get(cls, session, action_id):
""" Return the admin action object having the specified identifier.
:arg cls: the class object
:arg session: the database session used to query the information.
:arg action_id: the identifier of the Admin Action object to return.
"""
query = session.query(cls).filter(cls.id == action_id)
return query.first()
def notify(session, eol=False, name=None, version=None, acls=None):
""" Return the user that should be notify for each package.
:arg session: the session to connect to the database with.
:kwarg eol: a boolean to specify wether the output should include End
Of Life releases or not.
:kwarg name: restricts the output to a specific collection name.
:kwarg version: restricts the output to a specific collection version.
:kwarg acls: a list of ACLs to filter the package/user to retrieve.
If no acls is specified it defaults to
``['watchcommits', 'watchbugzilla', 'commit']`` which means that it
will return any person having one of these three acls for each
package in the database.
If the acls specified is ``all`` then all ACLs are used.
"""
if acls is None:
acls = ['watchcommits', 'watchbugzilla', 'commit']
elif acls == 'all':
acls = ['watchcommits', 'watchbugzilla', 'commit', 'approveacl']
elif isinstance(acls, basestring):
acls = [acls]
query = session.query(
Package,
PackageListingAcl.fas_name
).join(
PackageListing,
PackageListingAcl
).filter(
Package.id == PackageListing.package_id
).filter(
PackageListingAcl.packagelisting_id == PackageListing.id
).filter(
PackageListing.collection_id == Collection.id
).filter(
Package.status == 'Approved'
).filter(
PackageListing.point_of_contact != 'orphan'
).filter(
PackageListingAcl.acl.in_(acls)
).filter(
PackageListingAcl.status == 'Approved'
).group_by(
Package.name, PackageListingAcl.fas_name, Package.id
).order_by(
Package.namespace,
Package.name,
)
if eol is False:
query = query.filter(Collection.status != 'EOL')
if name:
query = query.filter(Collection.name == name)
if version:
query = query.filter(Collection.version == version)
return query.all()
def bugzilla(session, name=None):
""" Return information for each package to sync with bugzilla.
:arg session: the session to connect to the database with.
:kwarg name: restricts the output to a specific collection name.
"""
query = session.query(
Collection.name, # 0
Collection.version, # 1
Package.name, # 2
Package.summary, # 3
PackageListing.point_of_contact, # 4
PackageListingAcl.fas_name, # 5
Collection.branchname, # 6
Package.namespace, # 7
).filter(
Package.id == PackageListing.package_id
).filter(
PackageListingAcl.packagelisting_id == PackageListing.id
).filter(
PackageListing.collection_id == Collection.id
).filter(
Package.status == 'Approved'
).filter(
Collection.status != 'EOL'
).filter(
PackageListingAcl.acl.in_(
['watchbugzilla'])
).filter(
PackageListingAcl.status == 'Approved'
).group_by(
Collection.name, Package.namespace, Package.name,
PackageListing.point_of_contact, PackageListingAcl.fas_name,
Package.summary, Collection.branchname, Collection.version
).order_by(
Package.name
)
if name:
query = query.filter(Collection.name == name)
return query.all()
def vcs_acls(session, eol=False, collection=None, namespace=None):
""" Return information for each package to sync with git.
:arg session: the session to connect to the database with.
:kwarg eol: A boolean specifying whether to include information about
End Of Life collections or not. Defaults to ``False``.
:kwarg collection: Restrict the VCS info to a specific collection
:kwarg namespace: Restrict the VCS info returned to a given namespace
"""
query = session.query(
Package.name, # 0
PackageListingAcl.fas_name, # 1
Collection.branchname, # 2
Package.namespace, # 2
).filter(
Package.id == PackageListing.package_id
).filter(
PackageListingAcl.packagelisting_id == PackageListing.id
).filter(
PackageListing.collection_id == Collection.id
).filter(
PackageListing.status.in_(['Approved', 'Orphaned'])
)
if collection is not None:
query = query.filter(
Collection.branchname == collection
)
if not eol:
query = query.filter(
Collection.status != 'EOL')
if namespace is not None:
query = query.filter(
Package.namespace == namespace
)
query = query.filter(
PackageListingAcl.acl == 'commit'
).filter(
PackageListingAcl.status == 'Approved'
).group_by(
Package.namespace, Package.name, PackageListingAcl.fas_name,
Collection.branchname,
).order_by(
Package.name
)
data = query.all()
sub = set([(it[0], it[2]) for it in data])
query2 = session.query(
Package.name, # 0
Collection.branchname, # 1
Package.namespace, # 2
).filter(
Package.id == PackageListing.package_id
).filter(
PackageListing.collection_id == Collection.id
).filter(
PackageListing.status.in_(['Approved', 'Orphaned'])
).group_by(
Package.namespace,
Package.name,
Collection.branchname,
).order_by(
Package.name,
Collection.branchname
)
if collection is not None:
query2 = query2.filter(
Collection.branchname == collection
)
if not eol:
query2 = query2.filter(
Collection.status != 'EOL')
if namespace is not None:
query2 = query2.filter(
Package.namespace == namespace
)
sub2 = set(query2.all())
for entry in sub2 - sub:
data.append([entry[0], None, entry[1], entry[2]])
return data
def get_groups(session):
""" Return the list of FAS groups involved in maintaining packages in
the database.
"""
query_poc = session.query(
sa.distinct(PackageListing.point_of_contact)
).filter(
PackageListing.point_of_contact.like('group::%')
).filter(
PackageListing.collection_id == Collection.id
).filter(
PackageListing.status == 'Approved'
).filter(
Collection.status != 'EOL'
)
query_acl = session.query(
sa.distinct(PackageListingAcl.fas_name)
).filter(
PackageListingAcl.fas_name.like('group::%')
).filter(
PackageListing.collection_id == Collection.id
).filter(
PackageListingAcl.packagelisting_id == PackageListing.id
).filter(
PackageListing.status == 'Approved'
).filter(
Collection.status != 'EOL'
)
groups = []
for group in query_poc.union(query_acl).all():
groups.append(group[0].split('group::')[1])
return groups
| gpl-2.0 | -7,781,255,379,381,222,000 | 31.499556 | 82 | 0.57868 | false |
gdhungana/desispec | py/desispec/brick.py | 2 | 5057 | """
desispec.brick
==============
Code for calculating bricks, which are a tiling of the sky with the following
properties:
- bricks form rows in dec like a brick wall; edges are constant RA or dec
- they are rectangular with longest edge shorter or equal to bricksize
- circles at the poles with diameter=bricksize
- there are an even number of bricks per row
Use this with caution! In most cases you should be propagating brick
info from input targeting, not recalculating brick locations and names.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
class Bricks(object):
"""Bricks Object
"""
def __init__(self, bricksize=0.5):
"""Create Bricks object such that all bricks have longest size < bricksize
"""
#- Brick row centers and edges
center_dec = np.arange(-90.0, +90.0+bricksize/2, bricksize)
edges_dec = np.arange(-90.0-bricksize/2, +90.0+bricksize, bricksize)
nrow = len(center_dec)
#- How many columns per row: even number, no bigger than bricksize
ncol_per_row = np.zeros(nrow, dtype=int)
for i in range(nrow):
declo = np.abs(center_dec[i])-bricksize/2
n = (360/bricksize * np.cos(declo*np.pi/180))
ncol_per_row[i] = int(np.ceil(n/2)*2)
#- special cases at the poles
ncol_per_row[0] = 1
ncol_per_row[-1] = 1
#- ra
center_ra = list()
edges_ra = list()
for i in range(nrow):
edges = np.linspace(0, 360, ncol_per_row[i]+1)
edges_ra.append( edges )
center_ra.append( 0.5*(edges[0:-1]+edges[1:]) )
### dra = edges[1]-edges[0]
### center_ra.append(dra/2 + np.arange(ncol_per_row[i])*dra)
#- More special cases at the poles
edges_ra[0] = edges_ra[-1] = np.array([0, 360])
center_ra[0] = center_ra[-1] = np.array([180,])
#- Brick names [row, col]
brickname = list()
for i in range(nrow):
pm = 'p' if center_dec[i] >= 0 else 'm'
dec = center_dec[i]
names = list()
for j in range(ncol_per_row[i]):
ra = center_ra[i][j]
names.append('{:04d}{}{:03d}'.format(int(ra*10), pm, int(abs(dec)*10)))
brickname.append(names)
self._bricksize = bricksize
self._ncol_per_row = ncol_per_row
self._brickname = brickname
self._center_dec = center_dec
self._edges_dec = edges_dec
self._center_ra = center_ra
self._edges_ra = edges_ra
def brickname(self, ra, dec):
"""Return string name of brick that contains (ra, dec) [degrees]
Args:
ra (float) : Right Ascension in degrees
dec (float) : Declination in degrees
Returns:
brick name string
"""
inra, indec = ra, dec
dec = np.atleast_1d(dec)
ra = np.atleast_1d(ra)
irow = ((dec+90.0+self._bricksize/2)/self._bricksize).astype(int)
names = list()
for i in range(len(ra)):
ncol = self._ncol_per_row[irow[i]]
j = int(ra[i]/360 * ncol)
names.append(self._brickname[irow[i]][j])
if np.isscalar(inra):
return names[0]
else:
return np.array(names)
def brick_radec(self, ra, dec):
"""Return center (ra,dec) of brick that contains input (ra, dec) [deg]
"""
inra, indec = ra, dec
dec = np.asarray(dec)
ra = np.asarray(ra)
irow = ((dec+90.0+self._bricksize/2)/self._bricksize).astype(int)
jcol = (ra/360 * self._ncol_per_row[irow]).astype(int)
if np.isscalar(inra):
xra = self._center_ra[irow][jcol]
xdec = self._center_dec[irow]
else:
xra = np.array([self._center_ra[i][j] for i,j in zip(irow, jcol)])
xdec = self._center_dec[irow]
return xra, xdec
_bricks = None
def brickname(ra, dec):
"""Return brick name of brick covering (ra, dec) [degrees]
"""
global _bricks
if _bricks is None:
_bricks = Bricks()
return _bricks.brickname(ra, dec)
#
# THIS CODE SHOULD BE MOVED TO A TEST.
#
if __name__ == '__main__':
import os
from astropy.io import fits
d = fits.getdata(os.getenv('HOME')+'/temp/bricks-0.50.fits')
b = Bricks(0.5)
ntest = 10000
ra = np.random.uniform(0, 360, size=ntest)
dec = np.random.uniform(-90, 90, size=ntest)
bricknames = b.brickname(ra, dec)
for row in range(len(b._center_dec)):
n = len(d.BRICKROW[d.BRICKROW==row])
if n != b._ncol_per_row[row]:
print(row, n, len(b._center_ra[row]))
for i in range(ntest):
ii = np.where( (d.DEC1 <= dec[i]) & (dec[i] < d.DEC2) & (d.RA1 <= ra[i]) & (ra[i] < d.RA2) )[0][0]
if bricknames[i] != d.BRICKNAME[ii]:
print(bricknames[i], d.BRICKNAME[ii], ra[i], dec[i], b.brick_radec(ra[i], dec[i]), (d.RA[ii], d.DEC[ii]))
| bsd-3-clause | -6,347,747,486,980,550,000 | 32.713333 | 117 | 0.556456 | false |
jszopi/repESP | tests/test_calc_field.py | 1 | 5401 | from repESP.calc_fields import esp_from_charges, voronoi, calc_rms_error, calc_relative_rms_error
from repESP.charges import *
from repESP.esp_util import parse_gaussian_esp
from repESP.fields import *
from repESP.types import *
from repESP.gaussian_format import get_charges_from_log, MkChargeSectionParser
from my_unittest import TestCase
class SmallTestCase(TestCase):
def setUp(self) -> None:
self.mesh = Mesh([
Coords((1, 1, 1)),
Coords((-1, 0, -0.9))
])
self.gridMesh = GridMesh(
origin=Coords((0.1, 0.2, 0.3)),
axes=GridMesh.Axes((
GridMesh.Axis(
vector=Coords((0.2, 0, 0)),
point_count=3
),
GridMesh.Axis(
vector=Coords((0, 0.3, 0)),
point_count=3
),
GridMesh.Axis(
vector=Coords((0, 0, 0.4)),
point_count=3
),
))
)
self.molecule = Molecule(
atoms=[
AtomWithCoords(atomic_number=1, coords=Coords((0, 1, 0.5))),
AtomWithCoords(atomic_number=1, coords=Coords((-0.4, 0.2, 0.5)))
]
)
class TestEspFromCharges(SmallTestCase):
def setUp(self) -> None:
super().setUp()
self.molecule_with_charges = Molecule([
AtomWithCoordsAndCharge(
atom.atomic_number,
atom.coords,
charge
)
for atom, charge in zip(
self.molecule.atoms,
[Charge(x) for x in [0.5, -0.9]]
)
])
def test_non_grid_esp(self) -> None:
result = esp_from_charges(self.mesh, self.molecule_with_charges)
expected = Field(
self.mesh,
[
Esp(-0.08590039),
Esp(-0.33459064)
]
)
self.assertEqual(result.mesh, expected.mesh)
self.assertListsAlmostEqual(result.values, expected.values)
def test_grid_esp(self) -> None:
expected_floats = [
-1.06932877, -1.06932877, -0.65481332, -0.54712186, -0.54712186,
-0.44070511, 0.55035405, 0.55035405, -0.13294273, -0.66644219,
-0.66644219, -0.49727391, -0.33189403, -0.33189403, -0.33066481,
0.25868003, 0.25868003, -0.10389610, -0.45771121, -0.45771121,
-0.38483669, -0.24786530, -0.24786530, -0.26261985, 0.05220646,
0.05220646, -0.10743320
]
expected = Field(self.gridMesh, [Esp(x) for x in expected_floats])
result = esp_from_charges(self.gridMesh, self.molecule_with_charges)
self.assertEqual(result.mesh, expected.mesh)
self.assertListsAlmostEqual(result.values, expected.values)
class TestVoronoi(SmallTestCase):
def setUp(self) -> None:
super().setUp()
def test_non_grid_esp(self) -> None:
result = voronoi(self.mesh, self.molecule)
expected = Field(
self.mesh,
[
(0, Dist(1.11803398)),
(1, Dist(1.53622914))
]
)
self.assertAlmostEqualRecursive(expected, result)
def test_grid_esp(self) -> None:
expected_floats = [
0.53851648, 0.53851648, 0.78102496, 0.54772255, 0.54772255,
0.78740078, 0.30000000, 0.30000000, 0.64031242, 0.72801098,
0.72801098, 0.92195444, 0.61644140, 0.61644140, 0.83666002,
0.41231056, 0.41231056, 0.70000000, 0.92195444, 0.92195444,
1.08166538, 0.73484692, 0.73484692, 0.92736184, 0.57445626,
0.57445626, 0.80622577
]
expected_closest_atom = [
1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1,
1, 0, 0, 0, 0, 0, 0
]
expected = Field(
self.gridMesh,
list(zip(
expected_closest_atom,
[Dist(x) for x in expected_floats]
))
)
result = voronoi(self.gridMesh, self.molecule)
self.assertAlmostEqualRecursive(expected, result)
class TestCalcStats(TestCase):
def setUp(self) -> None:
with open("data/methane/methane_mk.esp") as f:
gaussian_esp = parse_gaussian_esp(f)
self.esp_values = gaussian_esp.field.values
molecule = gaussian_esp.molecule
with open("data/methane/methane_mk.log") as f:
charges = get_charges_from_log(f, MkChargeSectionParser(), verify_against=molecule)
molecule_with_charges = Molecule([
AtomWithCoordsAndCharge(
atom.atomic_number,
atom.coords,
charge
)
for atom, charge in zip(molecule.atoms, charges)
])
self.rep_esp_values = esp_from_charges(gaussian_esp.field.mesh, molecule_with_charges).values
def test_rms(self) -> None:
self.assertAlmostEqual(
calc_rms_error(self.esp_values, self.rep_esp_values),
0.00069, # from Gaussian log
places=5
)
def test_rrms(self) -> None:
self.assertAlmostEqual(
calc_relative_rms_error(self.esp_values, self.rep_esp_values),
0.35027, # from Gaussian log
places=5
)
| gpl-3.0 | 1,535,987,271,763,700,700 | 29.514124 | 101 | 0.535827 | false |
borchert/metadata-tracking | scripts/decipher_arcgis_server_url_mgc.py | 1 | 1852 | <<<<<<< HEAD
import arcpy
path_to_lyr = ""
group_lyr = arcpy.mapping.Layer(path_to_lyr)
for index, i in enumerate(arcpy.mapping.ListLayers(group_lyr)):
if i.visible:
print index-1, i.longName
=======
print "importing arcpy"
import arcpy
print "done importing arcpy"
import os
import shutil
import zipfile
from glob import glob
import pdb
BASE_IN = r"C:\workspace\metadata-tracking\mn-geospatial-commons"
#BASE_OUT = r"C:\workspace\temp_gdrs"
"""
for root, dirs, files in os.walk(BASE_IN):
if "ags_mapserver" in dirs:
path_to_lyr = os.path.join(root, "ags_mapserver")
g = glob(os.path.join(path_to_lyr, "*.lyr"))
resource_name = os.path.split(root)[-1]
print resource_name
os.mkdir(os.path.join(BASE_OUT, resource_name))
shutil.copyfile(os.path.join(root, "dataResource.xml"), os.path.join(BASE_OUT, resource_name, "dataResource.xml"))
shutil.copyfile(os.path.join(root, "metadata", "metadata.xml"), os.path.join(BASE_OUT, resource_name, "metadata.xml"))
shutil.copyfile(g[0], os.path.join(BASE_OUT, resource_name, os.path.split(g[0])[-1]))
"""
path_to_lyr = ""
for root, dirs,files in arcpy.da.Walk(BASE_IN, datatype=["Layer"]):
if len(files) > 0:
for i in files:
lyr = arcpy.mapping.Layer(os.path.join(root,i))
if lyr.isGroupLayer:
for index, ly in enumerate(arcpy.mapping.ListLayers(lyr)):
if ly.visible:
#print lyr.serviceProperties["URL"] + "/" + ly.longName.replace("\\","/") + "/" + str(index-1)
layer_index_txt = open(os.path.join(root,"lyr_index"+str(index - 1)+".txt"), "wb")
layer_index_txt.write(str(index-1))
layer_index_txt.close()
>>>>>>> mn-geo-commons-ags-parse
| gpl-2.0 | 1,741,071,960,235,151,000 | 38.404255 | 127 | 0.602052 | false |
Lujeni/ansible | lib/ansible/parsing/yaml/objects.py | 31 | 4272 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import yaml
from ansible.module_utils.six import text_type
from ansible.module_utils._text import to_bytes, to_text, to_native
class AnsibleBaseYAMLObject(object):
'''
the base class used to sub-class python built-in objects
so that we can add attributes to them during yaml parsing
'''
_data_source = None
_line_number = 0
_column_number = 0
def _get_ansible_position(self):
return (self._data_source, self._line_number, self._column_number)
def _set_ansible_position(self, obj):
try:
(src, line, col) = obj
except (TypeError, ValueError):
raise AssertionError(
'ansible_pos can only be set with a tuple/list '
'of three values: source, line number, column number'
)
self._data_source = src
self._line_number = line
self._column_number = col
ansible_pos = property(_get_ansible_position, _set_ansible_position)
class AnsibleMapping(AnsibleBaseYAMLObject, dict):
''' sub class for dictionaries '''
pass
class AnsibleUnicode(AnsibleBaseYAMLObject, text_type):
''' sub class for unicode objects '''
pass
class AnsibleSequence(AnsibleBaseYAMLObject, list):
''' sub class for lists '''
pass
# Unicode like object that is not evaluated (decrypted) until it needs to be
# TODO: is there a reason these objects are subclasses for YAMLObject?
class AnsibleVaultEncryptedUnicode(yaml.YAMLObject, AnsibleBaseYAMLObject):
__UNSAFE__ = True
__ENCRYPTED__ = True
yaml_tag = u'!vault'
@classmethod
def from_plaintext(cls, seq, vault, secret):
if not vault:
raise vault.AnsibleVaultError('Error creating AnsibleVaultEncryptedUnicode, invalid vault (%s) provided' % vault)
ciphertext = vault.encrypt(seq, secret)
avu = cls(ciphertext)
avu.vault = vault
return avu
def __init__(self, ciphertext):
'''A AnsibleUnicode with a Vault attribute that can decrypt it.
ciphertext is a byte string (str on PY2, bytestring on PY3).
The .data attribute is a property that returns the decrypted plaintext
of the ciphertext as a PY2 unicode or PY3 string object.
'''
super(AnsibleVaultEncryptedUnicode, self).__init__()
# after construction, calling code has to set the .vault attribute to a vaultlib object
self.vault = None
self._ciphertext = to_bytes(ciphertext)
@property
def data(self):
if not self.vault:
# FIXME: raise exception?
return self._ciphertext
return to_text(self.vault.decrypt(self._ciphertext))
@data.setter
def data(self, value):
self._ciphertext = value
def __repr__(self):
return repr(self.data)
# Compare a regular str/text_type with the decrypted hypertext
def __eq__(self, other):
if self.vault:
return other == self.data
return False
def __hash__(self):
return id(self)
def __ne__(self, other):
if self.vault:
return other != self.data
return True
def __str__(self):
return to_native(self.data, errors='surrogate_or_strict')
def __unicode__(self):
return to_text(self.data, errors='surrogate_or_strict')
def encode(self, encoding=None, errors=None):
return self.data.encode(encoding, errors)
| gpl-3.0 | 5,833,911,417,925,662,000 | 30.182482 | 125 | 0.659878 | false |
SergeyMakarenko/fbthrift | thrift/test/py.asyncio/test_suite_asyncio.py | 2 | 8838 | #!/usr/bin/env python3
import asyncio
import functools
import logging
import time
import unittest
from ThriftTest import ThriftTest
from ThriftTest.ttypes import Xception, Xtruct
from thrift.server.TAsyncioServer import (
ThriftClientProtocolFactory,
ThriftAsyncServerFactory,
)
from thrift.transport.THeaderTransport import CLIENT_TYPE
loop = asyncio.get_event_loop()
loop.set_debug(True)
logging.getLogger('asyncio').setLevel(logging.DEBUG)
class TestHandler(ThriftTest.Iface):
def __init__(self, use_async=False):
self.onewaysQueue = asyncio.Queue(loop=loop)
if use_async:
self.testOneway = self.fireOnewayAsync
self.testString = self.fireStringAsync
else:
self.testOneway = self.fireOnewayCoro
self.testString = self.fireStringCoro
def testVoid(self):
pass
@asyncio.coroutine
def fireStringCoro(self, s):
yield from asyncio.sleep(0)
return s
async def fireStringAsync(self, s):
await asyncio.sleep(0)
return s
def testByte(self, b):
return b
def testI16(self, i16):
return i16
def testI32(self, i32):
return i32
def testI64(self, i64):
return i64
def testDouble(self, dub):
return dub
def testStruct(self, thing):
return thing
def testException(self, s):
if s == 'Xception':
x = Xception()
x.errorCode = 1001
x.message = s
raise x
elif s == "throw_undeclared":
raise ValueError("foo")
@asyncio.coroutine
def fireOnewayCoro(self, seconds):
t = time.time()
yield from asyncio.sleep(seconds)
yield from self.onewaysQueue.put((t, time.time(), seconds))
async def fireOnewayAsync(self, seconds):
t = time.time()
await asyncio.sleep(seconds)
await self.onewaysQueue.put((t, time.time(), seconds))
def testNest(self, thing):
return thing
def testMap(self, thing):
return thing
def testSet(self, thing):
return thing
def testList(self, thing):
return thing
def testEnum(self, thing):
return thing
def testTypedef(self, thing):
return thing
def async_test(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
nonlocal f
if not asyncio.iscoroutinefunction(f):
f = asyncio.coroutine(f)
loop.run_until_complete(f(*args, **kwargs))
return wrapper
class ThriftCoroTestCase(unittest.TestCase):
CLIENT_TYPE = None
@async_test
def setUp(self):
global loop
self.host = '127.0.0.1'
self.handler = TestHandler(use_async=False)
self.server = yield from ThriftAsyncServerFactory(
self.handler, interface=self.host, port=0, loop=loop,
)
self.port = self.server.sockets[0].getsockname()[1]
self.transport, self.protocol = yield from loop.create_connection(
ThriftClientProtocolFactory(
ThriftTest.Client,
client_type=self.CLIENT_TYPE),
host=self.host,
port=self.port,
)
self.client = self.protocol.client
@async_test
def tearDown(self):
self.protocol.close()
self.transport.close()
self.protocol.close()
self.server.close()
@async_test
def testVoid(self):
result = yield from self.client.testVoid()
self.assertEqual(result, None)
@async_test
def testString(self):
result = yield from self.client.testString('Python')
self.assertEqual(result, 'Python')
@async_test
def testByte(self):
result = yield from self.client.testByte(63)
self.assertEqual(result, 63)
@async_test
def testI32(self):
result = yield from self.client.testI32(-1)
self.assertEqual(result, -1)
result = yield from self.client.testI32(0)
self.assertEqual(result, 0)
@async_test
def testI64(self):
result = yield from self.client.testI64(-34359738368)
self.assertEqual(result, -34359738368)
@async_test
def testDouble(self):
result = yield from self.client.testDouble(-5.235098235)
self.assertAlmostEqual(result, -5.235098235)
@async_test
def testStruct(self):
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = yield from self.client.testStruct(x)
self.assertEqual(y.string_thing, "Zero")
self.assertEqual(y.byte_thing, 1)
self.assertEqual(y.i32_thing, -3)
self.assertEqual(y.i64_thing, -5)
@async_test
def testException(self):
yield from self.client.testException('Safe')
try:
yield from self.client.testException('Xception')
self.fail("Xception not raised")
except Xception as x:
self.assertEqual(x.errorCode, 1001)
self.assertEqual(x.message, 'Xception')
try:
yield from self.client.testException("throw_undeclared")
self.fail("exception not raised")
except Exception: # type is undefined
pass
@async_test
def testOneway(self):
yield from self.client.testOneway(2)
start, end, seconds = yield from self.handler.onewaysQueue.get()
self.assertAlmostEqual(seconds, (end - start), places=1)
@async_test
def testClose(self):
self.assertTrue(self.protocol.transport.isOpen())
self.protocol.close()
self.assertFalse(self.protocol.transport.isOpen())
class ThriftAsyncTestCase(unittest.TestCase):
CLIENT_TYPE = None
@async_test
async def setUp(self):
global loop
self.host = '127.0.0.1'
self.handler = TestHandler(use_async=True)
self.server = await ThriftAsyncServerFactory(
self.handler, interface=self.host, port=0, loop=loop,
)
self.port = self.server.sockets[0].getsockname()[1]
self.transport, self.protocol = await loop.create_connection(
ThriftClientProtocolFactory(
ThriftTest.Client,
client_type=self.CLIENT_TYPE),
host=self.host,
port=self.port,
)
self.client = self.protocol.client
@async_test
async def tearDown(self):
self.protocol.close()
self.transport.close()
self.server.close()
@async_test
async def testVoid(self):
result = await self.client.testVoid()
self.assertEqual(result, None)
@async_test
async def testString(self):
result = await self.client.testString('Python')
self.assertEqual(result, 'Python')
@async_test
async def testByte(self):
result = await self.client.testByte(63)
self.assertEqual(result, 63)
@async_test
async def testI32(self):
result = await self.client.testI32(-1)
self.assertEqual(result, -1)
result = await self.client.testI32(0)
self.assertEqual(result, 0)
@async_test
async def testI64(self):
result = await self.client.testI64(-34359738368)
self.assertEqual(result, -34359738368)
@async_test
async def testDouble(self):
result = await self.client.testDouble(-5.235098235)
self.assertAlmostEqual(result, -5.235098235)
@async_test
async def testStruct(self):
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = await self.client.testStruct(x)
self.assertEqual(y.string_thing, "Zero")
self.assertEqual(y.byte_thing, 1)
self.assertEqual(y.i32_thing, -3)
self.assertEqual(y.i64_thing, -5)
@async_test
async def testException(self):
await self.client.testException('Safe')
try:
await self.client.testException('Xception')
self.fail("Xception not raised")
except Xception as x:
self.assertEqual(x.errorCode, 1001)
self.assertEqual(x.message, 'Xception')
try:
await self.client.testException("throw_undeclared")
self.fail("exception not raised")
except Exception: # type is undefined
pass
@async_test
async def testOneway(self):
await self.client.testOneway(2)
start, end, seconds = await self.handler.onewaysQueue.get()
self.assertAlmostEqual(seconds, (end - start), places=1)
class FramedThriftCoroTestCase(ThriftCoroTestCase):
CLIENT_TYPE = CLIENT_TYPE.FRAMED_DEPRECATED
class FramedThriftAsyncTestCase(ThriftAsyncTestCase):
CLIENT_TYPE = CLIENT_TYPE.FRAMED_DEPRECATED
| apache-2.0 | -2,144,875,494,225,995,000 | 26.880126 | 74 | 0.618126 | false |
prashantpawar/supybot-rothbot | plugins/Unix/test.py | 15 | 3038 | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
from supybot.test import *
if os.name == 'posix':
class UnixTestCase(PluginTestCase):
plugins = ('Unix',)
if utils.findBinaryInPath('aspell') is not None or \
utils.findBinaryInPath('ispell') is not None:
def testSpell(self):
self.assertRegexp('spell Strike', 'correctly')
# ispell won't find any results. aspell will make some
# suggestions.
self.assertRegexp('spell z0opadfnaf83nflafl230kasdf023hflasdf',
'not find|Possible spellings')
self.assertNotError('spell Strizzike')
self.assertError('spell foo bar baz')
self.assertError('spell -')
self.assertError('spell .')
self.assertError('spell ?')
self.assertNotError('spell whereever')
self.assertNotRegexp('spell foo', 'whatever')
def testErrno(self):
self.assertRegexp('errno 12', '^ENOMEM')
self.assertRegexp('errno ENOMEM', '#12')
def testProgstats(self):
self.assertNotError('progstats')
def testCrypt(self):
self.assertNotError('crypt jemfinch')
if utils.findBinaryInPath('fortune') is not None:
def testFortune(self):
self.assertNotError('fortune')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause | 7,836,869,204,612,315,000 | 43.676471 | 79 | 0.678407 | false |
joelpx/reverse | plasma/lib/arch/arm/process_ast.py | 4 | 3981 | #!/usr/bin/env python3
#
# PLASMA : Generate an indented asm code (pseudo-C) with colored syntax.
# Copyright (C) 2015 Joel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from capstone.arm import ARM_OP_IMM, ARM_INS_CMP, ARM_CC_AL, ARM_INS_TST
from plasma.lib.ast import (Ast_Branch, Ast_Loop, Ast_IfGoto, Ast_Ifelse,
Ast_AndIf, Ast_If_cond)
from plasma.lib.arch.arm.output import ASSIGNMENT_OPS
FUSE_OPS = set(ASSIGNMENT_OPS)
FUSE_OPS.add(ARM_INS_CMP)
FUSE_OPS.add(ARM_INS_TST)
def fuse_inst_with_if(ctx, ast):
if isinstance(ast, Ast_Branch):
types_ast = (Ast_Ifelse, Ast_IfGoto, Ast_AndIf, Ast_If_cond)
for i, n in enumerate(ast.nodes):
# TODO : try to do the same thing as x86
if isinstance(n, list):
if n[-1].id in FUSE_OPS and i+1 < len(ast.nodes) and \
isinstance(ast.nodes[i+1], types_ast):
ast.nodes[i+1].fused_inst = n[-1]
ctx.all_fused_inst.add(n[-1].address)
else: # ast
fuse_inst_with_if(ctx, n)
# elif isinstance(ast, Ast_If_cond):
# fuse_inst_with_if(ctx, ast.br)
elif isinstance(ast, Ast_Ifelse):
fuse_inst_with_if(ctx, ast.br_next)
fuse_inst_with_if(ctx, ast.br_next_jump)
elif isinstance(ast, Ast_Loop):
fuse_inst_with_if(ctx, ast.branch)
def convert_cond_to_if(ctx, ast):
def add_node(i, last_cond, br_lst):
if br_lst:
if last_cond == ARM_CC_AL:
added_nodes[i].append(br_lst)
else:
br = Ast_Branch()
br.add(br_lst)
added_nodes[i].append(Ast_If_cond(last_cond, br))
if isinstance(ast, Ast_Branch):
# Temporary dict, because we can't modify nodes while we are
# looping, we store new nodes here with the corresponding index
added_nodes = {}
for i, n in enumerate(ast.nodes):
if isinstance(n, list):
# This will split the current block in other branch if
# we found conditional instructions.
blk = n
added_nodes[i] = []
last_cond = blk[0].cc
br = []
# Fuse instructions with same condition in a same branch
for inst in blk:
if inst.cc == last_cond:
br.append(inst)
else:
add_node(i, last_cond, br)
br = [inst]
last_cond = inst.cc
add_node(i, last_cond, br)
else: # ast
convert_cond_to_if(ctx, n)
# Now we update the nodes list. If we have split a block n
# we remove it, and add new nodes.
idx_keys = list(added_nodes.keys())
idx_keys.sort()
for i in reversed(idx_keys):
if len(added_nodes[i]) > 1:
del ast.nodes[i]
# node is a list (blk of instructions) or Ast_If_cond
for k, node in enumerate(added_nodes[i]):
ast.nodes.insert(i+k, node)
elif isinstance(ast, Ast_Ifelse):
convert_cond_to_if(ctx, ast.br_next_jump)
convert_cond_to_if(ctx, ast.br_next)
elif isinstance(ast, Ast_Loop):
convert_cond_to_if(ctx, ast.branch)
| gpl-3.0 | 7,938,188,517,390,258,000 | 35.190909 | 73 | 0.56845 | false |
googledatalab/datalab | tools/cli/commands/delete.py | 2 | 3961 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for implementing the `datalab delete` command."""
from __future__ import absolute_import
from . import utils
description = ("""`{0} {1}` deletes the given Datalab instance's
Google Compute Engine VM.
By default, the persistent disk's auto-delete configuration determines
whether or not that disk is also deleted.
If you wish to override that setting, you can pass in one of either the
`--delete-disk` flag or the `--keep-disk` flag.
For more information on disk auto-deletion, see
https://cloud.google.com/compute/docs/disks/persistent-disks#updateautodelete
""")
_DELETE_DISK_HELP = ("""Whether or not to delete the instance's persistent disk
regardless of the disks' auto-delete configuration.""")
_KEEP_DISK_HELP = ("""Whether or not to keep the instance's persistent disk
regardless of the disks' auto-delete configuration.""")
_DELETE_BASE_PROMPT = ("""The following instance will be deleted:
- [{}] in [{}]
The corresponding notebooks disk {}.
""")
def flags(parser):
"""Add command line flags for the `delete` subcommand.
Args:
parser: The argparse parser to which to add the flags.
"""
parser.add_argument(
'instance',
metavar='NAME',
help='name of the instance to delete')
auto_delete_override = parser.add_mutually_exclusive_group()
auto_delete_override.add_argument(
'--delete-disk',
dest='delete_disk',
action='store_true',
help=_DELETE_DISK_HELP)
auto_delete_override.add_argument(
'--keep-disk',
dest='keep_disk',
action='store_true',
help=_KEEP_DISK_HELP)
return
def run(args, gcloud_compute, gcloud_zone=None, **unused_kwargs):
"""Implementation of the `datalab delete` subcommand.
Args:
args: The Namespace instance returned by argparse
gcloud_compute: Function that can be used to invoke `gcloud compute`
gcloud_zone: The zone that gcloud is configured to use
Raises:
subprocess.CalledProcessError: If a nested `gcloud` calls fails
"""
instance = args.instance
utils.maybe_prompt_for_zone(args, gcloud_compute, instance)
base_cmd = ['instances', 'delete', '--quiet']
if args.zone:
base_cmd.extend(['--zone', args.zone])
instance_zone = args.zone
else:
instance_zone = gcloud_zone
if args.delete_disk:
base_cmd.extend(['--delete-disks', 'data'])
notebooks_disk_message_part = 'will be deleted'
elif args.keep_disk:
base_cmd.extend(['--keep-disks', 'data'])
notebooks_disk_message_part = 'will not be deleted'
else:
disk_cfg = utils.instance_notebook_disk(args, gcloud_compute, instance)
if not disk_cfg:
notebooks_disk_message_part = 'is not attached'
elif disk_cfg['autoDelete']:
notebooks_disk_message_part = 'will be deleted'
else:
notebooks_disk_message_part = 'will not be deleted'
message = _DELETE_BASE_PROMPT.format(
instance, instance_zone, notebooks_disk_message_part)
if not utils.prompt_for_confirmation(
args=args,
message=message,
accept_by_default=True):
print('Deletion aborted by user; Exiting.')
return
print('Deleting {0}'.format(instance))
gcloud_compute(args, base_cmd + [instance])
return
| apache-2.0 | 5,583,663,357,053,489,000 | 31.467213 | 79 | 0.6718 | false |
bitmovin/bitmovin-python | examples/encoding/create_encoding_dash_cenc_hls_fairplay.py | 1 | 27209 | import datetime
from bitmovin import Bitmovin, Encoding, S3Output, H264CodecConfiguration, \
AACCodecConfiguration, H264Profile, StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, \
FMP4Muxing, MuxingStream, DashManifest, DRMFMP4Representation, FMP4RepresentationType, Period, \
VideoAdaptationSet, AudioAdaptationSet, ContentProtection, S3Input, HlsManifest, VariantStream, \
AudioMedia, FairPlayDRM, TSMuxing
from bitmovin import CENCDRM as CENCDRMResource
from bitmovin.resources.models import CENCPlayReadyEntry, CENCWidevineEntry
from bitmovin.errors import BitmovinError
from bitmovin.resources.models.encodings.drms.cenc_marlin_entry import CENCMarlinEntry
API_KEY = '<YOUR_API_KEY>'
S3_INPUT_ACCESSKEY = '<YOUR_S3_OUTPUT_ACCESSKEY>'
S3_INPUT_SECRETKEY = '<YOUR_S3_OUTPUT_SECRETKEY>'
S3_INPUT_BUCKETNAME = '<YOUR_S3_OUTPUT_BUCKETNAME>'
S3_INPUT_PATH = '<YOUR_S3_INPUT_PATH>'
S3_OUTPUT_ACCESSKEY = '<YOUR_S3_OUTPUT_ACCESSKEY>'
S3_OUTPUT_SECRETKEY = '<YOUR_S3_OUTPUT_SECRETKEY>'
S3_OUTPUT_BUCKETNAME = '<YOUR_S3_OUTPUT_BUCKETNAME>'
CENC_KEY = '<YOUR_CENC_KEY>'
CENC_KID = '<YOUR_CENC_KID>'
CENC_WIDEVINE_PSSH = '<YOUR_CENC_WIDEVINE_PSSH>'
CENC_PLAYREADY_LA_URL = '<YOUR_PLAYREADY_LA_URL>'
FAIRPLAY_KEY = '<YOUR_FAIRPLAY_KEY>'
FAIRPLAY_IV = '<YOUR_FAIRPLAY_IV>'
FAIRPLAY_URI = '<YOUR_FAIRPLAY_LICENSING_URL>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = 'your/output/base/path/{}/'.format(date_component)
def main():
bitmovin = Bitmovin(api_key=API_KEY)
s3_input = S3Input(access_key=S3_INPUT_ACCESSKEY,
secret_key=S3_INPUT_SECRETKEY,
bucket_name=S3_INPUT_BUCKETNAME,
name='Sample S3 Input')
s3_input = bitmovin.inputs.S3.create(s3_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
encoding = Encoding(name='Python Encoding with DASH CENC and Fairplay')
encoding = bitmovin.encodings.Encoding.create(encoding).resource
video_codec_configuration_480p = H264CodecConfiguration(name='example_video_codec_configuration_480p',
bitrate=1200000,
rate=None,
height=480,
profile=H264Profile.HIGH)
video_codec_configuration_480p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_480p).resource
video_codec_configuration_360p = H264CodecConfiguration(name='example_video_codec_configuration_360p',
bitrate=800000,
rate=None,
height=360,
profile=H264Profile.HIGH)
video_codec_configuration_360p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_360p).resource
video_codec_configuration_240p = H264CodecConfiguration(name='example_video_codec_configuration_240p',
bitrate=400000,
rate=None,
height=240,
profile=H264Profile.HIGH)
video_codec_configuration_240p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_240p).resource
audio_codec_configuration_stereo = AACCodecConfiguration(name='example_audio_codec_configuration_stereo',
bitrate=128000,
rate=48000)
audio_codec_configuration_stereo = \
bitmovin.codecConfigurations.AAC.create(audio_codec_configuration_stereo).resource
video_input_stream = StreamInput(input_id=s3_input.id,
input_path=S3_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream_en_stereo = StreamInput(input_id=s3_input.id,
input_path=S3_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
video_stream_480p = Stream(codec_configuration_id=video_codec_configuration_480p.id,
input_streams=[video_input_stream],
name='Sample Stream 480p')
video_stream_480p = bitmovin.encodings.Stream.create(object_=video_stream_480p,
encoding_id=encoding.id).resource
video_stream_360p = Stream(codec_configuration_id=video_codec_configuration_360p.id,
input_streams=[video_input_stream],
name='Sample Stream 360p')
video_stream_360p = bitmovin.encodings.Stream.create(object_=video_stream_360p,
encoding_id=encoding.id).resource
video_stream_240p = Stream(codec_configuration_id=video_codec_configuration_240p.id,
input_streams=[video_input_stream],
name='Sample Stream 240p')
video_stream_240p = bitmovin.encodings.Stream.create(object_=video_stream_240p,
encoding_id=encoding.id).resource
audio_stream_en_stereo = Stream(codec_configuration_id=audio_codec_configuration_stereo.id,
input_streams=[audio_input_stream_en_stereo],
name='Sample Audio Stream EN Stereo')
audio_stream_en_stereo = bitmovin.encodings.Stream.create(object_=audio_stream_en_stereo,
encoding_id=encoding.id).resource
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
video_muxing_stream_480p = MuxingStream(video_stream_480p.id)
video_muxing_stream_360p = MuxingStream(video_stream_360p.id)
video_muxing_stream_240p = MuxingStream(video_stream_240p.id)
audio_muxing_stream_en_stereo = MuxingStream(audio_stream_en_stereo.id)
widevine_drm = CENCWidevineEntry(pssh=CENC_WIDEVINE_PSSH)
play_ready_drm = CENCPlayReadyEntry(la_url=CENC_PLAYREADY_LA_URL)
video_muxing_480p_dash_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/dash/480p/',
acl=[acl_entry])
video_muxing_480p = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[video_muxing_stream_480p],
name='FMP4 Muxing 480p')
video_muxing_480p = bitmovin.encodings.Muxing.FMP4.create(object_=video_muxing_480p,
encoding_id=encoding.id).resource
cenc_480p = CENCDRMResource(key=CENC_KEY,
kid=CENC_KID,
widevine=widevine_drm,
playReady=play_ready_drm,
marlin=CENCMarlinEntry(),
outputs=[video_muxing_480p_dash_output],
name='Cenc')
cenc_480p = bitmovin.encodings.Muxing.FMP4.DRM.CENC.create(object_=cenc_480p,
encoding_id=encoding.id,
muxing_id=video_muxing_480p.id).resource
video_muxing_480p_ts = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[video_muxing_stream_480p],
name='TS Muxing 480p')
video_muxing_480p_ts = bitmovin.encodings.Muxing.TS.create(object_=video_muxing_480p_ts,
encoding_id=encoding.id).resource
video_muxing_480p_hls_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/hls/480p/',
acl=[acl_entry])
fair_play_480p = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[video_muxing_480p_hls_output],
name='FairPlay 480p')
fair_play_480p = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_480p,
encoding_id=encoding.id,
muxing_id=video_muxing_480p_ts.id).resource
video_muxing_360p_dash_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/dash/360p/',
acl=[acl_entry])
video_muxing_360p = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[video_muxing_stream_360p],
name='FMP4 Muxing 360p')
video_muxing_360p = bitmovin.encodings.Muxing.FMP4.create(object_=video_muxing_360p,
encoding_id=encoding.id).resource
cenc_360p = CENCDRMResource(key=CENC_KEY,
kid=CENC_KID,
widevine=widevine_drm,
playReady=play_ready_drm,
marlin=CENCMarlinEntry(),
outputs=[video_muxing_360p_dash_output],
name='Cenc')
cenc_360p = bitmovin.encodings.Muxing.FMP4.DRM.CENC.create(object_=cenc_360p,
encoding_id=encoding.id,
muxing_id=video_muxing_360p.id).resource
video_muxing_360p_hls_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/hls/360p/',
acl=[acl_entry])
video_muxing_360p_ts = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[video_muxing_stream_360p],
name='TS Muxing 360p')
video_muxing_360p_ts = bitmovin.encodings.Muxing.TS.create(object_=video_muxing_360p_ts,
encoding_id=encoding.id).resource
fair_play_360p = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[video_muxing_360p_hls_output],
name='FairPlay 360p')
fair_play_360p = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_360p,
encoding_id=encoding.id,
muxing_id=video_muxing_360p_ts.id).resource
video_muxing_240p_dash_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/dash/240p/',
acl=[acl_entry])
video_muxing_240p = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[video_muxing_stream_240p],
name='FMP4 Muxing 240p')
video_muxing_240p = bitmovin.encodings.Muxing.FMP4.create(object_=video_muxing_240p,
encoding_id=encoding.id).resource
cenc_240p = CENCDRMResource(key=CENC_KEY,
kid=CENC_KID,
widevine=widevine_drm,
playReady=play_ready_drm,
marlin=CENCMarlinEntry(),
outputs=[video_muxing_240p_dash_output],
name='Cenc')
cenc_240p = bitmovin.encodings.Muxing.FMP4.DRM.CENC.create(object_=cenc_240p,
encoding_id=encoding.id,
muxing_id=video_muxing_240p.id).resource
video_muxing_240p_hls_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/hls/240p/',
acl=[acl_entry])
video_muxing_240p_ts = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[video_muxing_stream_240p],
name='TS Muxing 240p')
video_muxing_240p_ts = bitmovin.encodings.Muxing.TS.create(object_=video_muxing_240p_ts,
encoding_id=encoding.id).resource
fair_play_240p = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[video_muxing_240p_hls_output],
name='FairPlay 240p')
fair_play_240p = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_240p,
encoding_id=encoding.id,
muxing_id=video_muxing_240p_ts.id).resource
audio_muxing_dash_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'audio/dash/en/',
acl=[acl_entry])
audio_muxing_en_stereo = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[audio_muxing_stream_en_stereo],
name='Sample Audio Muxing EN Stereo')
audio_muxing_en_stereo = bitmovin.encodings.Muxing.FMP4.create(object_=audio_muxing_en_stereo,
encoding_id=encoding.id).resource
cenc_audio = CENCDRMResource(key=CENC_KEY,
kid=CENC_KID,
widevine=widevine_drm,
playReady=play_ready_drm,
marlin=CENCMarlinEntry(),
outputs=[audio_muxing_dash_output],
name='Cenc')
cenc_audio = bitmovin.encodings.Muxing.FMP4.DRM.CENC.create(object_=cenc_audio,
encoding_id=encoding.id,
muxing_id=audio_muxing_en_stereo.id).resource
audio_muxing_hls_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'audio/hls/en/',
acl=[acl_entry])
audio_muxing_en_stereo_ts = TSMuxing(segment_length=4,
segment_naming='seg_%number%.m4s',
streams=[audio_muxing_stream_en_stereo],
name='Sample TS Audio Muxing EN Stereo')
audio_muxing_en_stereo_ts = bitmovin.encodings.Muxing.TS.create(object_=audio_muxing_en_stereo_ts,
encoding_id=encoding.id).resource
fair_play_audio = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[audio_muxing_hls_output],
name='FairPlay Audio')
fair_play_audio = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_audio,
encoding_id=encoding.id,
muxing_id=audio_muxing_en_stereo_ts.id).resource
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
###################################################################################################################
manifest_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
dash_manifest = DashManifest(manifest_name='stream.mpd',
outputs=[manifest_output],
name='Sample DASH Manifest')
dash_manifest = bitmovin.manifests.DASH.create(dash_manifest).resource
period = Period()
period = bitmovin.manifests.DASH.add_period(object_=period, manifest_id=dash_manifest.id).resource
video_adaptation_set = VideoAdaptationSet()
video_adaptation_set = bitmovin.manifests.DASH.add_video_adaptation_set(object_=video_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
video_content_protection = ContentProtection(encoding_id=encoding.id,
muxing_id=video_muxing_480p.id,
drm_id=cenc_480p.id)
bitmovin.manifests.DASH.add_content_protection_to_adaptionset(object_=video_content_protection,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id)
fmp4_representation_480p = DRMFMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=video_muxing_480p.id,
drm_id=cenc_480p.id,
segment_path='video/dash/480p/')
fmp4_representation_480p = bitmovin.manifests.DASH.add_drm_fmp4_representation(
object_=fmp4_representation_480p,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id
).resource
fmp4_representation_360p = DRMFMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=video_muxing_360p.id,
drm_id=cenc_360p.id,
segment_path='video/dash/360p/')
fmp4_representation_360p = bitmovin.manifests.DASH.add_drm_fmp4_representation(
object_=fmp4_representation_360p,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id
).resource
fmp4_representation_240p = DRMFMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=video_muxing_240p.id,
drm_id=cenc_240p.id,
segment_path='video/dash/240p/')
fmp4_representation_240p = bitmovin.manifests.DASH.add_drm_fmp4_representation(
object_=fmp4_representation_240p,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id
).resource
audio_adaptation_set = AudioAdaptationSet(lang='EN')
audio_adaptation_set = bitmovin.manifests.DASH.add_audio_adaptation_set(object_=audio_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
audio_content_protection = ContentProtection(encoding_id=encoding.id,
muxing_id=audio_muxing_en_stereo.id,
drm_id=cenc_audio.id)
bitmovin.manifests.DASH.add_content_protection_to_adaptionset(object_=audio_content_protection,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=audio_adaptation_set.id)
drm_cenc_fmp4_representation_audio = DRMFMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=audio_muxing_en_stereo.id,
drm_id=cenc_audio.id,
segment_path='audio/dash/en/')
drm_cenc_fmp4_representation_audio = bitmovin.manifests.DASH.add_drm_fmp4_representation(
object_=drm_cenc_fmp4_representation_audio,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=audio_adaptation_set.id
).resource
bitmovin.manifests.DASH.start(manifest_id=dash_manifest.id)
###################################################################################################################
hls_manifest = HlsManifest(manifest_name='stream.m3u8',
outputs=[manifest_output],
name='Sample HLS FairPlay Manifest')
hls_manifest = bitmovin.manifests.HLS.create(hls_manifest).resource
audio_media = AudioMedia(name='English',
group_id='audio_group',
segment_path='audio/hls/en/',
encoding_id=encoding.id,
stream_id=audio_stream_en_stereo.id,
muxing_id=audio_muxing_en_stereo_ts.id,
drm_id=fair_play_audio.id,
language='en',
uri='audio.m3u8')
audio_media = bitmovin.manifests.HLS.AudioMedia.create(manifest_id=hls_manifest.id,
object_=audio_media).resource
variant_stream_480p = VariantStream(audio=audio_media.groupId,
segment_path='video/hls/480p/',
uri='video_480p.m3u8',
encoding_id=encoding.id,
stream_id=video_stream_480p.id,
muxing_id=video_muxing_480p_ts.id,
drm_id=fair_play_480p.id)
variant_stream_480p = bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream_480p)
variant_stream_360p = VariantStream(audio=audio_media.groupId,
segment_path='video/hls/360p/',
uri='video_360p.m3u8',
encoding_id=encoding.id,
stream_id=video_stream_360p.id,
muxing_id=video_muxing_360p_ts.id,
drm_id=fair_play_360p.id)
variant_stream_360p = bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream_360p)
variant_stream_240p = VariantStream(audio=audio_media.groupId,
segment_path='video/hls/240p/',
uri='video_240p.m3u8',
encoding_id=encoding.id,
stream_id=video_stream_240p.id,
muxing_id=video_muxing_240p_ts.id,
drm_id=fair_play_240p.id)
variant_stream_240p = bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream_240p)
bitmovin.manifests.HLS.start(manifest_id=hls_manifest.id)
###################################################################################################################
try:
bitmovin.manifests.DASH.wait_until_finished(manifest_id=dash_manifest.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for manifest creation to finish: {}".format(bitmovin_error))
try:
bitmovin.manifests.HLS.wait_until_finished(manifest_id=hls_manifest.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for HLS manifest creation to finish: {}".format(bitmovin_error))
if __name__ == '__main__':
main()
| unlicense | -1,757,390,193,045,422,300 | 59.870246 | 119 | 0.482818 | false |
GiggleLiu/poorman_nn | poornn/spconv.py | 1 | 16879 | '''
Convolution using sparse matrix.
'''
from __future__ import division
import numpy as np
import pdb
import time
from scipy import sparse as sps
from .lib.spconv import lib as fspconv
from .lib.spconv_cc import lib as fspconv_cc
from .utils import scan2csc, tuple_prod, spscan2csc,\
masked_concatenate, dtype2token, typed_randn
from .linears import LinearBase
__all__ = ['SPConv']
class SPConv(LinearBase):
'''
Convolution layer.
Args:
weight (ndarray): dimensions are aranged as\
(feature_out, feature_in, kernel_x, ...), in 'F' order.
bias (1darray): length of num_feature_out.
strides (tuple, default=(1,1,...)): displace for convolutions.
boudnary ('P'|'O', default='P'): boundary type,
* 'P', periodic boundary condiction.
* 'O', open boundary condition.
is_unitary (bool, default=False): keep unitary if True,\
here, unitary is defined in the map `U: img_in -> feature_out`.
var_mask (tuple<bool>, len=2, default=(True,True)):\
variable mask for weight and bias.
Attributes:
weight (ndarray): dimensions are aranged as (feature_out,\
feature_in, kernel_x, ...), in 'F' order.
bias (1darray): length of num_feature_out.
strides (tuple): displace for convolutions.
boudnary ('P'|'O'): boundary type,
* 'P', periodic boundary condiction.
* 'O', open boundary condition.
is_unitary (bool): keep unitary if True, here, unitary is defined\
in the map `U: img_in -> feature_out`.
var_mask (tuple<bool>, len=2): variable mask for weight and bias.
(Derived):
csc_indptr (1darray): column pointers for convolution matrix.
csc_indices (1darray): row indicator for input array.
weight_indices (1darray): row indicator for filter array\
(if not contiguous).
'''
__display_attrs__ = ['strides', 'boundary',
'kernel_shape', 'is_unitary', 'var_mask']
def __init__(self, input_shape, itype, weight, bias,
strides=None, boundary="P",
w_contiguous=True, var_mask=(1, 1),
is_unitary=False, acc_version='v2', **kwargs):
if isinstance(weight, tuple):
weight = 0.1 * typed_randn(kwargs.get('dtype', itype), weight)
super(SPConv, self).__init__(input_shape, itype=itype,
weight=weight, bias=bias,
var_mask=var_mask)
img_nd = self.weight.ndim - 2
if strides is None:
strides = (1,) * img_nd
self.strides = tuple(strides)
self.boundary = boundary
self.w_contiguous = w_contiguous
self.is_unitary = is_unitary
self.acc_version = acc_version
kernel_shape = self.weight.shape[2:]
self.csc_indptr, self.csc_indices, self.img_out_shape, self.offset_table = scan2csc(
kernel_shape, input_shape[-img_nd:], strides, boundary)
self.output_shape = input_shape[:-img_nd - 1] + \
(self.num_feature_out,) + self.img_out_shape
# use the correct fortran subroutine.
dtype_token = dtype2token(
np.find_common_type((self.itype, self.dtype), ()))
batch_token = '' if len(self.input_shape) - len(strides) > 1 else '1'
if not w_contiguous:
self.weight_indices = np.asarray(np.tile(np.arange(tuple_prod(
kernel_shape), dtype='int32'), tuple_prod(img_out_shape)),
order='F') + 1 # pointer to filter data
func_f = eval('fspconv.forward%s_general%s' % (batch_token, dtype_token))
func_b = eval('fspconv.backward%s_general%s' % (batch_token, dtype_token))
self._fforward = lambda *args, **kwargs: func_f(
*args, weight_indices=self.weight_indices, **kwargs)
self._fbackward = lambda *args, **kwargs: func_b(
*args, weight_indices=self.weight_indices, **kwargs)
else:
self._fforward = eval('fspconv.forward%s_contiguous%s' % (batch_token, dtype_token))
self._fbackward = eval('fspconv.backward%s_contiguous%s' % (batch_token, dtype_token))
# cc operation
self._fforward_cc1 = eval('fspconv_cc.forward%s_%s%s' % (batch_token, 'v1', dtype_token))
self._fforward_cc2 = eval('fspconv_cc.forward%s_%s%s' % (batch_token, 'v2', dtype_token))
# make it unitary
self.is_unitary = is_unitary
if is_unitary:
self.be_unitary()
self.check_unitary()
@property
def img_nd(self):
'''Dimension of input image.'''
return len(self.strides)
@property
def num_feature_in(self):
'''Dimension of input feature.'''
return self.weight.shape[1]
@property
def num_feature_out(self):
'''Dimension of input feature.'''
return self.weight.shape[0]
@property
def kernel_shape(self):
return self.weight.shape[2:]
def be_unitary(self):
weight = self.weight.reshape(self.weight.shape[:2] + (-1,), order='F')
self.weight = np.asarray(
np.transpose([np.linalg.qr(weight[:, i].T)[0].T
for i in range(
self.num_feature_in)],
axes=(1, 0, 2)), order='F')
self.is_unitary = True
def check_unitary(self, tol=1e-6):
# check weight shape
if self.weight.shape[2] < self.weight.shape[0]:
raise ValueError('output shape greater than input shape error!')
# get unitary error
err = 0
for i in range(self.num_feature_in):
weight = self.weight[:, i]
err += abs(weight.dot(weight.T.conj()) -
np.eye(weight.shape[0])).mean()
err/=self.num_feature_in
if self.is_unitary and err > tol:
raise ValueError('non-unitary matrix error, error = %s!' % err)
return err
def set_variables(self, variables):
nw = self.weight.size if self.var_mask[0] else 0
var1, var2 = variables[:nw], variables[nw:]
weight_data = self.weight.data if sps.issparse(
self.weight) else self.weight.ravel(order='F')
if self.is_unitary and self.var_mask[0]:
W = self.weight.reshape(self.weight.shape[:2] + (-1,), order='F')
dG = var1.reshape(W.shape, order='F') - W
dA = np.einsum('ijk,kjl->ijl', W.T.conj(), dG)
dA = dA - dA.T.conj()
B = np.eye(dG.shape[2])[:, None] - dA / 2
Binv = np.transpose(np.linalg.inv(
np.transpose(B, axes=(1, 0, 2))), axes=(1, 0, 2))
Y = np.einsum('ijk,kjl->ijl', W, B.T.conj())
Y = np.einsum('ijk,kjl->ijl', Y, Binv)
self.weight[...] = Y.reshape(self.weight.shape, order='F')
elif self.var_mask[0]:
weight_data[:] = var1
if self.var_mask[1]:
self.bias[:] = var2
def forward(self, x, **kwargs):
'''
Args:
x (ndarray): (num_batch, nfi, img_in_dims), input in 'F' order.
Returns:
ndarray, (num_batch, nfo, img_out_dims), output in 'F' order.
'''
# flatten inputs/outputs
x = x.reshape(x.shape[:-self.img_nd] + (-1,), order='F')
_fltr_flatten = self.weight.reshape(
self.weight.shape[:2] + (-1,), order='F')
y = self._fforward(x, csc_indptr=self.csc_indptr,
csc_indices=self.csc_indices,
fltr_data=_fltr_flatten,
bias=self.bias,
max_nnz_row=_fltr_flatten.shape[-1])
y = y.reshape(self.output_shape, order='F')
return y
def forward_cc(self, locs, dx, y0):
# flatten inputs/outputs
_fltr_flatten = self.weight.reshape(
self.weight.shape[:2] + (-1,), order='F')
y = y0.reshape(y0.shape[:-self.img_nd]+(-1,), order='F')
if self.acc_version=='v1':
self._fforward_cc1(locs, dx, y, csc_indptr=self.csc_indptr,
csc_indices=self.csc_indices,
fltr_data=_fltr_flatten)
elif self.acc_version=='v2':
self._fforward_cc2(locs, dx, y, _fltr_flatten, self.offset_table, img_shape = self.input_shape[-self.img_nd:],
boundary=1 if self.boundary=='P' else 0, kernel_shape = self.weight.shape[-self.img_nd:])
else:
raise NotImplementedError()
y = y.reshape(self.output_shape, order='F')
return y
def backward(self, xy, dy, **kwargs):
'''
Args:
xy ((ndarray, ndarray)):
* x -> (num_batch, nfi, img_in_dims), input in 'F' order.
* y -> (num_batch, nfo, img_out_dims), output in 'F' order.
dy (ndarray): (num_batch, nfo, img_out_dims),\
gradient of output in 'F' order.
mask (booleans): (do_xgrad, do_wgrad, do_bgrad).
Returns:
tuple(1darray, ndarray): dw, dx
'''
x, y = xy
xpre = x.shape[:-self.img_nd]
ypre = xpre[:-1] + (self.num_feature_out,)
do_xgrad = True
mask = self.var_mask
# flatten inputs/outputs
x = x.reshape(xpre + (-1,), order='F')
dy = dy.reshape(ypre + (-1,), order='F')
_fltr_flatten = self.weight.reshape(
self.weight.shape[:2] + (-1,), order='F')
dx, dweight, dbias = self._fbackward(dy,
x, self.csc_indptr, self.csc_indices,
fltr_data=_fltr_flatten,
do_xgrad=do_xgrad,
do_wgrad=mask[0],
do_bgrad=mask[1],
max_nnz_row=_fltr_flatten.shape[-1])
return masked_concatenate([dweight.ravel(order='F'), dbias], mask),\
dx.reshape(self.input_shape, order='F')
class SPSP(SPConv):
'''
Attributes:
input_shape ((batch, feature_in, img_x, img_y, ...),\
or (feature_in, img_x, img_y): ...)
cscmat (csc_matrix): with row indices (feature_in, img_x, img_y, ...),\
and column indices (feature_out, img_x', img_y', ...)
bias (1darray): (feature_out), in fortran order.
strides (tuple): displace for convolutions.
Attributes (Derived):
csc_indptr (1darray): column pointers for convolution matrix.
csc_indices (1darray): row indicator for input array.
weight_indices (1darray): row indicator for filter array\
(if not contiguous).
'''
def __init__(self, input_shape, itype, cscmat, bias, strides=None,
var_mask=(1, 1)):
from .lib.spsp import lib as fspsp
self.cscmat = cscmat
self.bias = bias
self.var_mask = var_mask
self.strides = tuple(strides)
img_nd = len(self.strides)
if strides is None:
strides = (1,) * img_nd
self.boundary = 'P'
if tuple_prod(input_shape[1:]) != cscmat.shape[0]:
raise ValueError('csc matrix input shape mismatch!\
%s get, but %s desired.' % (
cscmat.shape[1], tuple_prod(input_shape[1:])))
# self.csc_indptr, self.csc_indices,
img_in_shape = input_shape[2:]
self.csc_indptr, self.csc_indices, self.img_out_shape = spscan2csc(
kernel_shape, input_shape[-img_nd:], strides, boundary)
self.img_out_shape = tuple(
[img_is // stride for img_is, stride in zip(
img_in_shape, strides)])
output_shape = input_shape[:1] + \
(self.num_feature_out,) + self.img_out_shape
super(SPSP, self).__init__(input_shape, output_shape, itype=itype)
if self.num_feature_out * tuple_prod(self.img_out_shape
) != cscmat.shape[1]:
raise ValueError('csc matrix output shape mismatch! \
%s get, but %s desired.' % (
cscmat.shape[1], self.num_feature_out * i,
tuple_prod(self.img_out_shape)))
# use the correct fortran subroutine.
dtype_token = dtype2token(
np.find_common_type((self.itype, self.dtype), ()))
# select function
self._fforward = eval('fspsp.forward_conv%s' % dtype_token)
self._fbackward = eval('fspsp.backward_conv%s' % dtype_token)
@property
def img_nd(self):
'''Dimension of input image.'''
return len(self.strides)
@property
def num_feature_in(self):
'''Dimension of input feature.'''
return self.input.shape[1]
@property
def num_feature_out(self):
'''Dimension of input feature.'''
return self.bias.shape[0]
def forward(self, x, **kwargs):
x = x.reshape(xpre + (-1,), order='F')
y = self._fforward(x, csc_indptr=self.csc_indptr,
csc_indices=self.csc_indices,
fltr_data=_fltr_flatten,
bias=self.bias, max_nnz_row=_fltr_flatten.shape[-1])
y = y.reshape(self.output_shape, order='F')
return y
def backward(self, xy, dy, **kwargs):
x = x.reshape(xpre + (-1,), order='F')
dy = dy.reshape(ypre + (-1,), order='F')
mask = self.var_mask
dx, dweight, dbias =\
self._fbackward(dy, x,
self.csc_indptr, self.csc_indices,
fltr_data=_fltr_flatten,
do_xgrad=True,
do_wgrad=mask[0],
do_bgrad=mask[1],
max_nnz_row=_fltr_flatten.shape[-1])
return masked_concatenate([dweight.ravel(order='F'),
dbias], mask),\
dx.reshape(self.input_shape, order='F')
class SPConvProd(LinearBase):
'''
Convolutional product layer, the version with variables.
'''
__display_attrs__ = ['strides', 'boundary', 'kernel_shape', 'var_mask']
def __init__(self, input_shape, dtype, weight,
bias, strides=None, boundary='O', var_mask=(1, 1), **kwargs):
super(SPConvProd, self).__init__(input_shape, dtype=dtype,
weight=weight, bias=bias,
var_mask=var_mask)
self.boundary = boundary
img_nd = self.weight.ndim - 2
if strides is None:
strides = (1,) * img_nd
self.strides = strides
kernel_shape = self.weight.shape[2:]
img_in_shape = input_shape[-img_nd:]
self.csc_indptr, self.csc_indices, self.img_out_shape, offset_table = scan2csc(
kernel_shape, img_in_shape, strides=strides, boundary=boundary)
output_shape = input_shape[:-img_nd] + self.img_out_shape
# use the correct fortran subroutine.
dtype_token = dtype2token(
np.find_common_type((self.itype, self.dtype), ()))
# use the correct function
self._fforward = eval('fspconvprod.forward_%s' % dtype_token)
self._fbackward = eval('fspconvprod.backward_%s' % dtype_token)
@property
def img_nd(self):
return len(self.strides)
def forward(self, x, **kwargs):
'''
Args:
x (ndarray): (num_batch, nfi, img_in_dims), input in 'F' order.
Returns:
ndarray, (num_batch, nfo, img_out_dims), output in 'F' order.
'''
x_nd, img_nd = x.ndim, self.img_nd
img_dim = tuple_prod(self.input_shape[-img_nd:])
y = self._fforward(x.reshape([-1, img_dim], order='F'),
csc_indptr=self.csc_indptr,
weight=self.weight,
csc_indices=self.csc_indices
).reshape(self.output_shape, order='F')
return y
def backward(self, xy, dy, **kwargs):
'''It will shed a mask on dy'''
x, y = xy
x_nd, img_nd = x.ndim, self.img_nd
img_dim_in = tuple_prod(self.input_shape[-img_nd:])
img_dim_out = tuple_prod(self.output_shape[-img_nd:])
dx = self._fbackward(x=x.reshape([-1, img_dim_in], order='F'),
dy=dy.reshape([-1, img_dim_out], order='F'),
y=y.reshape([-1, img_dim_out], order='F'),
weight=self.weight, csc_indptr=self.csc_indptr,
csc_indices=self.csc_indices
).reshape(self.input_shape, order='F')
return EMPTY_VAR, dx
| mit | -5,701,644,049,804,180,000 | 38.99763 | 122 | 0.532792 | false |
endolith/scipy | scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py | 12 | 24893 | import time
import numpy as np
from scipy.sparse.linalg import LinearOperator
from .._differentiable_functions import VectorFunction
from .._constraints import (
NonlinearConstraint, LinearConstraint, PreparedConstraint, strict_bounds)
from .._hessian_update_strategy import BFGS
from ..optimize import OptimizeResult
from .._differentiable_functions import ScalarFunction
from .equality_constrained_sqp import equality_constrained_sqp
from .canonical_constraint import (CanonicalConstraint,
initial_constraints_as_canonical)
from .tr_interior_point import tr_interior_point
from .report import BasicReport, SQPReport, IPReport
TERMINATION_MESSAGES = {
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`xtol` termination condition is satisfied.",
3: "`callback` function requested termination."
}
class HessianLinearOperator:
"""Build LinearOperator from hessp"""
def __init__(self, hessp, n):
self.hessp = hessp
self.n = n
def __call__(self, x, *args):
def matvec(p):
return self.hessp(x, p, *args)
return LinearOperator((self.n, self.n), matvec=matvec)
class LagrangianHessian:
"""The Hessian of the Lagrangian as LinearOperator.
The Lagrangian is computed as the objective function plus all the
constraints multiplied with some numbers (Lagrange multipliers).
"""
def __init__(self, n, objective_hess, constraints_hess):
self.n = n
self.objective_hess = objective_hess
self.constraints_hess = constraints_hess
def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)):
H_objective = self.objective_hess(x)
H_constraints = self.constraints_hess(x, v_eq, v_ineq)
def matvec(p):
return H_objective.dot(p) + H_constraints.dot(p)
return LinearOperator((self.n, self.n), matvec)
def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints,
start_time, tr_radius, constr_penalty, cg_info):
state.nit += 1
state.nfev = objective.nfev
state.njev = objective.ngev
state.nhev = objective.nhev
state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
if not last_iteration_failed:
state.x = x
state.fun = objective.f
state.grad = objective.g
state.v = [c.fun.v for c in prepared_constraints]
state.constr = [c.fun.f for c in prepared_constraints]
state.jac = [c.fun.J for c in prepared_constraints]
# Compute Lagrangian Gradient
state.lagrangian_grad = np.copy(state.grad)
for c in prepared_constraints:
state.lagrangian_grad += c.fun.J.T.dot(c.fun.v)
state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf)
# Compute maximum constraint violation
state.constr_violation = 0
for i in range(len(prepared_constraints)):
lb, ub = prepared_constraints[i].bounds
c = state.constr[i]
state.constr_violation = np.max([state.constr_violation,
np.max(lb - c),
np.max(c - ub)])
state.execution_time = time.time() - start_time
state.tr_radius = tr_radius
state.constr_penalty = constr_penalty
state.cg_niter += cg_info["niter"]
state.cg_stop_cond = cg_info["stop_cond"]
return state
def update_state_ip(state, x, last_iteration_failed, objective,
prepared_constraints, start_time,
tr_radius, constr_penalty, cg_info,
barrier_parameter, barrier_tolerance):
state = update_state_sqp(state, x, last_iteration_failed, objective,
prepared_constraints, start_time, tr_radius,
constr_penalty, cg_info)
state.barrier_parameter = barrier_parameter
state.barrier_tolerance = barrier_tolerance
return state
def _minimize_trustregion_constr(fun, x0, args, grad,
hess, hessp, bounds, constraints,
xtol=1e-8, gtol=1e-8,
barrier_tol=1e-8,
sparse_jacobian=None,
callback=None, maxiter=1000,
verbose=0, finite_diff_rel_step=None,
initial_constr_penalty=1.0, initial_tr_radius=1.0,
initial_barrier_parameter=0.1,
initial_barrier_tolerance=0.1,
factorization_method=None,
disp=False):
"""Minimize a scalar function subject to constraints.
Parameters
----------
gtol : float, optional
Tolerance for termination by the norm of the Lagrangian gradient.
The algorithm will terminate when both the infinity norm (i.e., max
abs value) of the Lagrangian gradient and the constraint violation
are smaller than ``gtol``. Default is 1e-8.
xtol : float, optional
Tolerance for termination by the change of the independent variable.
The algorithm will terminate when ``tr_radius < xtol``, where
``tr_radius`` is the radius of the trust region used in the algorithm.
Default is 1e-8.
barrier_tol : float, optional
Threshold on the barrier parameter for the algorithm termination.
When inequality constraints are present, the algorithm will terminate
only when the barrier parameter is less than `barrier_tol`.
Default is 1e-8.
sparse_jacobian : {bool, None}, optional
Determines how to represent Jacobians of the constraints. If bool,
then Jacobians of all the constraints will be converted to the
corresponding format. If None (default), then Jacobians won't be
converted, but the algorithm can proceed only if they all have the
same format.
initial_tr_radius: float, optional
Initial trust radius. The trust radius gives the maximum distance
between solution points in consecutive iterations. It reflects the
trust the algorithm puts in the local approximation of the optimization
problem. For an accurate local approximation the trust-region should be
large and for an approximation valid only close to the current point it
should be a small one. The trust radius is automatically updated throughout
the optimization process, with ``initial_tr_radius`` being its initial value.
Default is 1 (recommended in [1]_, p. 19).
initial_constr_penalty : float, optional
Initial constraints penalty parameter. The penalty parameter is used for
balancing the requirements of decreasing the objective function
and satisfying the constraints. It is used for defining the merit function:
``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``,
where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all
the constraints. The merit function is used for accepting or rejecting
trial points and ``constr_penalty`` weights the two conflicting goals
of reducing objective function and constraints. The penalty is automatically
updated throughout the optimization process, with
``initial_constr_penalty`` being its initial value. Default is 1
(recommended in [1]_, p 19).
initial_barrier_parameter, initial_barrier_tolerance: float, optional
Initial barrier parameter and initial tolerance for the barrier subproblem.
Both are used only when inequality constraints are present. For dealing with
optimization problems ``min_x f(x)`` subject to inequality constraints
``c(x) <= 0`` the algorithm introduces slack variables, solving the problem
``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality
constraints ``c(x) + s = 0`` instead of the original problem. This subproblem
is solved for decreasing values of ``barrier_parameter`` and with decreasing
tolerances for the termination, starting with ``initial_barrier_parameter``
for the barrier parameter and ``initial_barrier_tolerance`` for the
barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19).
Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated
with the same prefactor.
factorization_method : string or None, optional
Method to factorize the Jacobian of the constraints. Use None (default)
for the auto selection or one of:
- 'NormalEquation' (requires scikit-sparse)
- 'AugmentedSystem'
- 'QRFactorization'
- 'SVDFactorization'
The methods 'NormalEquation' and 'AugmentedSystem' can be used only
with sparse constraints. The projections required by the algorithm
will be computed using, respectively, the the normal equation and the
augmented system approaches explained in [1]_. 'NormalEquation'
computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem'
performs the LU factorization of an augmented system. They usually
provide similar results. 'AugmentedSystem' is used by default for
sparse matrices.
The methods 'QRFactorization' and 'SVDFactorization' can be used
only with dense constraints. They compute the required projections
using, respectively, QR and SVD factorizations. The 'SVDFactorization'
method can cope with Jacobian matrices with deficient row rank and will
be used whenever other factorization methods fail (which may imply the
conversion of sparse matrices to a dense format when required).
By default, 'QRFactorization' is used for dense matrices.
finite_diff_rel_step : None or array_like, optional
Relative step size for the finite difference approximation.
maxiter : int, optional
Maximum number of algorithm iterations. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
* 3 : display progress during iterations (more complete report).
disp : bool, optional
If True (default), then `verbose` will be set to 1 if it was 0.
Returns
-------
`OptimizeResult` with the fields documented below. Note the following:
1. All values corresponding to the constraints are ordered as they
were passed to the solver. And values corresponding to `bounds`
constraints are put *after* other constraints.
2. All numbers of function, Jacobian or Hessian evaluations correspond
to numbers of actual Python function calls. It means, for example,
that if a Jacobian is estimated by finite differences, then the
number of Jacobian evaluations will be zero and the number of
function evaluations will be incremented by all calls during the
finite difference estimation.
x : ndarray, shape (n,)
Solution found.
optimality : float
Infinity norm of the Lagrangian gradient at the solution.
constr_violation : float
Maximum constraint violation at the solution.
fun : float
Objective function at the solution.
grad : ndarray, shape (n,)
Gradient of the objective function at the solution.
lagrangian_grad : ndarray, shape (n,)
Gradient of the Lagrangian function at the solution.
nit : int
Total number of iterations.
nfev : integer
Number of the objective function evaluations.
njev : integer
Number of the objective function gradient evaluations.
nhev : integer
Number of the objective function Hessian evaluations.
cg_niter : int
Total number of the conjugate gradient method iterations.
method : {'equality_constrained_sqp', 'tr_interior_point'}
Optimization method used.
constr : list of ndarray
List of constraint values at the solution.
jac : list of {ndarray, sparse matrix}
List of the Jacobian matrices of the constraints at the solution.
v : list of ndarray
List of the Lagrange multipliers for the constraints at the solution.
For an inequality constraint a positive multiplier means that the upper
bound is active, a negative multiplier means that the lower bound is
active and if a multiplier is zero it means the constraint is not
active.
constr_nfev : list of int
Number of constraint evaluations for each of the constraints.
constr_njev : list of int
Number of Jacobian matrix evaluations for each of the constraints.
constr_nhev : list of int
Number of Hessian evaluations for each of the constraints.
tr_radius : float
Radius of the trust region at the last iteration.
constr_penalty : float
Penalty parameter at the last iteration, see `initial_constr_penalty`.
barrier_tolerance : float
Tolerance for the barrier subproblem at the last iteration.
Only for problems with inequality constraints.
barrier_parameter : float
Barrier parameter at the last iteration. Only for problems
with inequality constraints.
execution_time : float
Total execution time.
message : str
Termination message.
status : {0, 1, 2, 3}
Termination status:
* 0 : The maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `xtol` termination condition is satisfied.
* 3 : `callback` function requested termination.
cg_stop_cond : int
Reason for CG subproblem termination at the last iteration:
* 0 : CG subproblem not evaluated.
* 1 : Iteration limit was reached.
* 2 : Reached the trust-region boundary.
* 3 : Negative curvature detected.
* 4 : Tolerance was satisfied.
References
----------
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
Trust region methods. 2000. Siam. pp. 19.
"""
x0 = np.atleast_1d(x0).astype(float)
n_vars = np.size(x0)
if hess is None:
if callable(hessp):
hess = HessianLinearOperator(hessp, n_vars)
else:
hess = BFGS()
if disp and verbose == 0:
verbose = 1
if bounds is not None:
finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub,
bounds.keep_feasible, n_vars)
else:
finite_diff_bounds = (-np.inf, np.inf)
# Define Objective Function
objective = ScalarFunction(fun, x0, args, grad, hess,
finite_diff_rel_step, finite_diff_bounds)
# Put constraints in list format when needed.
if isinstance(constraints, (NonlinearConstraint, LinearConstraint)):
constraints = [constraints]
# Prepare constraints.
prepared_constraints = [
PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds)
for c in constraints]
# Check that all constraints are either sparse or dense.
n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints)
if 0 < n_sparse < len(prepared_constraints):
raise ValueError("All constraints must have the same kind of the "
"Jacobian --- either all sparse or all dense. "
"You can set the sparsity globally by setting "
"`sparse_jacobian` to either True of False.")
if prepared_constraints:
sparse_jacobian = n_sparse > 0
if bounds is not None:
if sparse_jacobian is None:
sparse_jacobian = True
prepared_constraints.append(PreparedConstraint(bounds, x0,
sparse_jacobian))
# Concatenate initial constraints to the canonical form.
c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical(
n_vars, prepared_constraints, sparse_jacobian)
# Prepare all canonical constraints and concatenate it into one.
canonical_all = [CanonicalConstraint.from_PreparedConstraint(c)
for c in prepared_constraints]
if len(canonical_all) == 0:
canonical = CanonicalConstraint.empty(n_vars)
elif len(canonical_all) == 1:
canonical = canonical_all[0]
else:
canonical = CanonicalConstraint.concatenate(canonical_all,
sparse_jacobian)
# Generate the Hessian of the Lagrangian.
lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess)
# Choose appropriate method
if canonical.n_ineq == 0:
method = 'equality_constrained_sqp'
else:
method = 'tr_interior_point'
# Construct OptimizeResult
state = OptimizeResult(
nit=0, nfev=0, njev=0, nhev=0,
cg_niter=0, cg_stop_cond=0,
fun=objective.f, grad=objective.g,
lagrangian_grad=np.copy(objective.g),
constr=[c.fun.f for c in prepared_constraints],
jac=[c.fun.J for c in prepared_constraints],
constr_nfev=[0 for c in prepared_constraints],
constr_njev=[0 for c in prepared_constraints],
constr_nhev=[0 for c in prepared_constraints],
v=[c.fun.v for c in prepared_constraints],
method=method)
# Start counting
start_time = time.time()
# Define stop criteria
if method == 'equality_constrained_sqp':
def stop_criteria(state, x, last_iteration_failed,
optimality, constr_violation,
tr_radius, constr_penalty, cg_info):
state = update_state_sqp(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info)
if verbose == 2:
BasicReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
SQPReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.cg_stop_cond)
state.status = None
state.niter = state.nit # Alias for callback (backward-compatibility)
if callback is not None and callback(np.copy(state.x), state):
state.status = 3
elif state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif state.tr_radius < xtol:
state.status = 2
elif state.nit >= maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
elif method == 'tr_interior_point':
def stop_criteria(state, x, last_iteration_failed, tr_radius,
constr_penalty, cg_info, barrier_parameter,
barrier_tolerance):
state = update_state_ip(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info, barrier_parameter, barrier_tolerance)
if verbose == 2:
BasicReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
IPReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.barrier_parameter,
state.cg_stop_cond)
state.status = None
state.niter = state.nit # Alias for callback (backward compatibility)
if callback is not None and callback(np.copy(state.x), state):
state.status = 3
elif state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif (state.tr_radius < xtol
and state.barrier_parameter < barrier_tol):
state.status = 2
elif state.nit >= maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
if verbose == 2:
BasicReport.print_header()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_header()
elif method == 'tr_interior_point':
IPReport.print_header()
# Call inferior function to do the optimization
if method == 'equality_constrained_sqp':
def fun_and_constr(x):
f = objective.fun(x)
c_eq, _ = canonical.fun(x)
return f, c_eq
def grad_and_jac(x):
g = objective.grad(x)
J_eq, _ = canonical.jac(x)
return g, J_eq
_, result = equality_constrained_sqp(
fun_and_constr, grad_and_jac, lagrangian_hess,
x0, objective.f, objective.g,
c_eq0, J_eq0,
stop_criteria, state,
initial_constr_penalty, initial_tr_radius,
factorization_method)
elif method == 'tr_interior_point':
_, result = tr_interior_point(
objective.fun, objective.grad, lagrangian_hess,
n_vars, canonical.n_ineq, canonical.n_eq,
canonical.fun, canonical.jac,
x0, objective.f, objective.g,
c_ineq0, J_ineq0, c_eq0, J_eq0,
stop_criteria,
canonical.keep_feasible,
xtol, state, initial_barrier_parameter,
initial_barrier_tolerance,
initial_constr_penalty, initial_tr_radius,
factorization_method)
# Status 3 occurs when the callback function requests termination,
# this is assumed to not be a success.
result.success = True if result.status in (1, 2) else False
result.message = TERMINATION_MESSAGES[result.status]
# Alias (for backward compatibility with 1.1.0)
result.niter = result.nit
if verbose == 2:
BasicReport.print_footer()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_footer()
elif method == 'tr_interior_point':
IPReport.print_footer()
if verbose >= 1:
print(result.message)
print("Number of iterations: {}, function evaluations: {}, "
"CG iterations: {}, optimality: {:.2e}, "
"constraint violation: {:.2e}, execution time: {:4.2} s."
.format(result.nit, result.nfev, result.cg_niter,
result.optimality, result.constr_violation,
result.execution_time))
return result
| bsd-3-clause | -481,272,644,859,089,400 | 44.675229 | 86 | 0.59571 | false |
schenc3/InteractiveROSETTA | InteractiveROSETTA/scripts/biotools.py | 1 | 36895 | ### AUTHOR: Evan H. Baugh
### Affiliation: New York University
import wx
import wx.grid
import wx.lib.scrolledpanel
import os
import os.path
import time
import platform
import multiprocessing
import Bio.PDB
import webbrowser
import datetime
from threading import Thread
from tools import *
from io_tools import *
class BioToolsPanel(wx.lib.scrolledpanel.ScrolledPanel):
### parent: A handle to the parent of this panel (a panel in the protocols window)
### The protocols frame is the grandparent
### W: The width of protocols frame, you should not need to change the sizes of this panel
### H: The height of the protocols frame, again do not attempt to change the size osof this panel
### W, H are there is case you need the values
def __init__(self, parent, W, H):
wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, id=-1, pos=(10, 60), size=(340, H-330), name="ProtFixbb")
winh = H-330
self.SetBackgroundColour("#333333")
self.parent = parent
# This is the message that is displayed underneath the primary sequence
self.runMsg = "Running an example job..."
# Module Title
if (platform.system() == "Windows"):
self.lblProt = wx.StaticText(self, -1, "Biological Tools", (25, 15), (270, 25), wx.ALIGN_CENTRE)
self.lblProt.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblProt = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/lblTitle.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(25, 15), size=(270, 25))
else:
self.lblProt = wx.StaticText(self, -1, "Template Module", (70, 15), style=wx.ALIGN_CENTRE)
self.lblProt.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
resizeTextControlForUNIX(self.lblProt, 0, self.GetSize()[0])
self.lblProt.SetForegroundColour("#FFFFFF")
# Help button, shows the documentation in the help folder
if (platform.system() == "Darwin"):
# You shouldn't need to change the location of this image because it will use the default one
self.HelpBtn = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/HelpBtn.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(295, 10), size=(25, 25))
else:
self.HelpBtn = wx.Button(self, id=-1, label="?", pos=(295, 10), size=(25, 25))
self.HelpBtn.SetForegroundColour("#0000FF")
self.HelpBtn.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.HelpBtn.Bind(wx.EVT_BUTTON, self.showHelp)
self.HelpBtn.SetToolTipString("Display the help file for this window")
# Brief description of the module
if (platform.system() == "Windows"):
self.lblInst = wx.StaticText(self, -1, "Here is a short description", (0, 45), (320, 25), wx.ALIGN_CENTRE)
self.lblInst.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
elif (platform.system() == "Darwin"):
self.lblInst = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/lblInst.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(0, 45), size=(320, 25))
else:
self.lblInst = wx.StaticText(self, -1, "Here is a short description", (5, 45), style=wx.ALIGN_CENTRE)
self.lblInst.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
resizeTextControlForUNIX(self.lblInst, 0, self.GetSize()[0])
self.lblInst.SetForegroundColour("#FFFFFF")
### =================================================================================
### INSERT YOUR CONTROLS HERE
### ...
### ...
### ...
### =================================================================================
### =================================================================================
### HERE ARE SOME EXAMPLES OF CONTROLS FOR YOUR REFERENCE:
### DELETE THIS BLOCK WHEN YOU ARE DOING THIS FOR REAL
### TEXT LABEL, DOES NOT ALLOW USER INPUT
if (platform.system() == "Windows"):
self.lblLabel = wx.StaticText(self, -1, "I am a label", pos=(10, 80), size=(320, 20), style=wx.ALIGN_LEFT)
self.lblLabel.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblLabel = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/lblLabel.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 80), size=(320, 20))
else:
self.lblLabel = wx.StaticText(self, -1, "I am a label", pos=(10, 80), style=wx.ALIGN_LEFT)
self.lblLabel.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.lblLabel.SetForegroundColour("#FFFFFF")
### TEXT ENTRY BOX
self.txtTextBox = wx.TextCtrl(self, -1, pos=(0, 110), size=(320, 25), style=wx.TE_MULTILINE)
self.txtTextBox.SetValue("Enter some text here")
self.txtTextBox.SetToolTipString("I am a Text Box")
### STANDARD TEXT BUTTON, THIS EXAMPLE SETS UP A FUNCTION TO TOGGLE BETWEEN STATES
if (platform.system() == "Darwin"):
self.btnButton = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnButton_1.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(110, 140), size=(100, 25))
else:
self.btnButton = wx.Button(self, id=-1, label="State 1", pos=(110, 140), size=(100, 25))
self.btnButton.SetForegroundColour("#000000")
self.btnButton.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.btnButton.Bind(wx.EVT_BUTTON, self.toggleExample)
self.btnButton.SetToolTipString("I am a button")
self.toggleState = 1
### BITMAP BUTTON, DISPLAYS A MESSAGE DIALOG WHEN CLICKED
self.btnBitmapButton = wx.BitmapButton(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/colorwheel.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(110, 170), size=(100, 25))
self.btnBitmapButton.Bind(wx.EVT_BUTTON, self.bitmapButtonClick)
self.btnBitmapButton.SetToolTipString("I am a bitmap button")
### GRID CONTROL, DISPLAYS VARIOUS SELECTIONS IN THE ACTIVATE FUNCTION
self.grdGrid = wx.grid.Grid(self)
self.grdGrid.CreateGrid(0, 3)
self.grdGrid.SetSize((320, 200))
self.grdGrid.SetPosition((0, 200))
self.grdGrid.SetLabelFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.grdGrid.DisableDragColSize()
self.grdGrid.DisableDragRowSize()
self.grdGrid.SetColLabelValue(0, "Residues")
self.grdGrid.SetColLabelValue(1, "Chains")
self.grdGrid.SetColLabelValue(2, "Models")
self.grdGrid.SetRowLabelSize(20)
self.grdGrid.SetColSize(0, 100)
self.grdGrid.SetColSize(1, 100)
self.grdGrid.SetColSize(2, 100)
### STANDARD TEXT BUTTON, THIS EXAMPLE SETS UP A FUNCTION TO TOGGLE BETWEEN STATES
if (platform.system() == "Darwin"):
self.btnServer = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnServer_Off.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(110, 430), size=(100, 25))
else:
self.btnServer = wx.Button(self, id=-1, label="Server Off", pos=(110, 430), size=(100, 25))
self.btnServer.SetForegroundColour("#000000")
self.btnServer.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.btnServer.Bind(wx.EVT_BUTTON, self.toggleServer)
self.btnServer.SetToolTipString("I am a server toggle button")
self.serverOn = False
### END EXAMPLE CONTROLS BLOCK
### END DELETION POINT
### =================================================================================
# This is the submit button
# It calls the function "submitClick"
# You can change its coordinates but make sure it is the farthest thing down the panel
# so the scrolled panel gets implemented correctly
if (platform.system() == "Darwin"):
self.btnSubmit = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnSubmit.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(110, 460), size=(100, 25))
else:
self.btnSubmit = wx.Button(self, id=-1, label="Submit!", pos=(110, 460), size=(100, 25))
self.btnSubmit.SetForegroundColour("#000000")
self.btnSubmit.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.btnSubmit.Bind(wx.EVT_BUTTON, self.submitClick)
self.btnSubmit.SetToolTipString("Submit the job")
self.buttonState = "Submit!"
#lastControl = <insert_your_lowest_control_here>
# lastControl needs to be specified so the scrollable area gets calculated properly
lastControl = self.btnSubmit
self.scrollh = lastControl.GetPosition()[1] + lastControl.GetSize()[1] + 5
self.SetScrollbars(1, 1, 320, self.scrollh)
self.winscrollpos = 0
self.Bind(wx.EVT_SCROLLWIN, self.scrolled)
# This function displays the help page for the module
# It should be located in help/biotools.html
def showHelp(self, event):
# Open the help page
if (platform.system() == "Darwin"):
try:
browser = webbrowser.get("Safari")
except:
print "Could not load Safari! The help files are located at " + self.helpdir
return
browser.open(self.parent.parent.scriptdir + "/help/index.html")
else:
webbrowser.open(self.parent.parent.scriptdir + "/help/index.html")
def scrolled(self, event):
self.winscrollpos = self.GetScrollPos(wx.VERTICAL)
event.Skip()
def activate(self):
# This function gets called everytime the protocol gains focuses
# Here is some code that stores current selection information
# You can keep it and use it to your advantage if you so choose
selectiondata = self.seqWin.getSelectedResidues()
self.selectedResidues = []
self.selectedChains = []
self.selectedModels = []
topLefts = self.seqWin.SeqViewer.GetSelectionBlockTopLeft()
bottomRights = self.seqWin.SeqViewer.GetSelectionBlockBottomRight()
for i in range(0, len(topLefts)):
for r in range(topLefts[i][0], bottomRights[i][0]+1):
for c in range(topLefts[i][1], bottomRights[i][1]+1):
poseindx = self.seqWin.getPoseIndex(r)
resi = self.seqWin.indxToSeqPos[r][c][1] # Integer returned
temp = self.seqWin.IDs[r]
model = temp[0:len(temp)-2]
chain = temp[len(temp)-1]
resn = self.seqWin.sequences[r][c]
self.selectedResidues.append([resn, resi, model, chain, r, c, poseindx])
if (not(temp in self.selectedChains)):
self.selectedChains.append(temp)
if (not(model in self.selectedModels)):
self.selectedModels.append(model)
### Each row of self.selectedResidues has the following information:
### 0: resn = One-letter amino acid code
### 1: resi = PDB index of residue
### 2: model = The name of the whole structure the residue is in
### 3: chain = The one letter chain ID
### 4: r = The row in the Sequence Viewer
### 5: c = The column in the Sequence Viewer
### 6: poseindx = The location of the BioPython structure object in self.seqWin.poses
### =================================================================================
# THIS UPDATES THE EXAMPLE GRID WITH THE SELECTION INFORMATION
# YOU MAY REMOVE THIS WHEN YOU REMOVE THE EXAMPLE GRID
if (self.grdGrid.NumberRows > 0):
self.grdGrid.DeleteRows(0, self.grdGrid.NumberRows())
self.grdGrid.AppendRows(len(self.selectedResidues))
i = 0
for [resn, resi, model, chain, r, c, poseindx] in self.selectedResidues:
self.grdGrid.SetCellValue(i, 0, model + "|" + chain + ":" + resn + str(resi))
i += 1
i = 0
for chain in self.selectedChains:
self.grdGrid.SetCellValue(i, 1, chain)
i += 1
i = 0
for model in self.selectedModels:
self.grdGrid.SetCellValue(i, 2, model)
i += 1
### END DELETION
### =================================================================================
# You need to keep the following line here
# Otherwise, whenever you leave the protocol and then come back to it, it will
# automatically scroll to include whatever element last had the focus, which gets incredibly
# annoying for the user. The following will prevent it from moving
self.Scroll(0, self.winscrollpos)
# Gives this module a handle on the sequence window
def setSeqWin(self, seqWin):
self.seqWin = seqWin
# So the sequence window knows about what model "designed_view" really is
self.seqWin.setProtocolPanel(self)
# Gives this module a handle on the PyMOL command line
def setPyMOL(self, pymol):
self.pymol = pymol
self.cmd = pymol.cmd
self.stored = pymol.stored
# Gives this module a handle on the selection panel
def setSelectWin(self, selectWin):
self.selectWin = selectWin
self.selectWin.setProtPanel(self)
### =================================================================================
### THESE ARE EXAMPLE EVENT FUNCTIONS
### YOU MAY DELETE THESE AFTER REMOVING THE EXAMPLE CONTROLS
def toggleExample(self, event):
if (self.toggleState == 1):
self.toggleState = 2
if (platform.system() == "Darwin"):
self.btnButton.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnButton_2.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnButton.SetLabel("State 2")
elif (self.toggleState == 2):
self.toggleState = 3
if (platform.system() == "Darwin"):
self.btnButton.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnButton_3.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnButton.SetLabel("State 3")
else:
self.toggleState = 1
if (platform.system() == "Darwin"):
self.btnButton.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnButton_1.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnButton.SetLabel("State 1")
def bitmapButtonClick(self, event):
dlg = wx.MessageDialog(self, "This example is nice isn't it?", "Question", wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
result = dlg.ShowModal()
if (result == wx.ID_YES):
dlg2 = wx.MessageDialog(self, "You pressed yes", "Result", wx.OK | wx.ICON_EXCLAMATION | wx.CENTRE)
dlg2.ShowModal()
dlg2.Destroy()
else:
dlg2 = wx.MessageDialog(self, "You pressed no", "Result", wx.OK | wx.ICON_EXCLAMATION | wx.CENTRE)
dlg2.ShowModal()
dlg2.Destroy()
dlg.Destroy()
def toggleServer(self, event):
if (self.serverOn):
self.serverOn = False
if (platform.system() == "Darwin"):
self.btnServer.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnServer_Off.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnServer.SetLabel("Server Off")
else:
self.serverOn = True
if (platform.system() == "Darwin"):
self.btnServer.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnServer_On.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnServer.SetLabel("Server On")
### END DELETION
### =================================================================================
# This function should be called to officially load a structure into InteractiveROSETTA
# Temporary PDBs downloaded from the Internet should be downloaded to the "sandbox" first
# To get to the sandbox, simply execute "goToSandbox()"
# On Windows, the sandbox is C:\Users\username\InteractiveROSETTA (the folder is hidden)
# On OSX, the sandbox is /Users/username/.InteractiveROSETTA
# On Linux, the sandobx is /home/username/.InteractiveROSETTA
# The sandbox gets cleaned up at the start of every session, so anything that is left there
# will get deleted the next time InteractiveROSETTA starts up
def loadPDB(self, pdbfile):
self.seqWin.PyMOLPDBLoad(1, pdbfile, "Show")
# This function cancels the submission
def cancelSubmit(self):
logInfo("Canceled " + self.protocolcode + " operation")
try:
os.remove(self.protocolcode + "input")
except:
pass
try:
os.remove(self.protocolcode + "inputtemp")
except:
pass
self.tmrSubmit.Stop()
self.seqWin.cannotDelete = False
self.enableControls()
if (platform.system() == "Darwin"):
self.btnSubmit.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnSubmit.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnSubmit.SetLabel("Submit!")
self.btnSubmit.SetToolTipString("Submit the job")
deleteInputFiles()
self.parent.parent.restartDaemon()
self.parent.GoBtn.Enable()
self.removeMessage()
self.buttonState = "Submit!"
# This function adds your message to the label underneath the sequence viewer
# If you want a new message at any point, set it in "self.runMsg" and call this function
def updateMessage(self):
self.seqWin.labelMsg.SetLabel(self.runMsg)
self.seqWin.labelMsg.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.seqWin.labelMsg.SetForegroundColour("#FFFFFF")
self.seqWin.msgQueue.append(self.runMsg)
# This function removes your message from underneath the sequence viewer
# Do not change the value of "self.runMsg" before calling this function or it will not
# be removed properly
def removeMessage(self):
# Get rid of the messages
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find(self.runMsg) >= 0):
self.seqWin.msgQueue.pop(i)
break
if (len(self.seqWin.msgQueue) > 0):
self.seqWin.labelMsg.SetLabel(self.seqWin.msgQueue[len(self.seqWin.msgQueue)-1])
else:
self.seqWin.labelMsg.SetLabel("")
self.seqWin.labelMsg.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.seqWin.labelMsg.SetForegroundColour("#FFFFFF")
# This is the function that gets called when a submission occurs
# It can turn into the Cancel and Finalize buttons if those are appropriate for your module
def submitClick(self, event):
# This is also the "Finalize!" button
logInfo("Submit button clicked")
if (self.buttonState == "Submit!"):
### DO OTHER THINGS AFTER SUBMISSION
### e.g. DISABLE SOME CONTROLS, PREPARE INPUTS, CHECK THAT INPUTS ARE VALID
### ...
### ...
self.updateMessage()
# Disable protocol changing and sequence editing while the protocol is active
self.parent.GoBtn.Disable()
self.seqWin.cannotDelete = True
# Uses a Timer to handle interactions with the daemon
self.stage = 1
if (platform.system() == "Darwin"):
self.btnSubmit.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnSubmit_Cancel.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnSubmit.SetLabel("Cancel!")
self.buttonState = "Cancel!"
self.btnSubmit.SetToolTipString("Cancel the " + self.protocolcode + " simulation")
self.tmrSubmit = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.threadSubmit, self.tmrSubmit)
self.tmrSubmit.Start(1000)
elif (self.buttonState == "Cancel!"):
dlg = wx.MessageDialog(self, "Are you sure you want to cancel the " + self.protocolcode + " simulation? All progress will be lost.", "Cancel " + self.protocolcode + " Simulation", wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
result = dlg.ShowModal()
if (result == wx.ID_YES):
self.cancelSubmit()
dlg.Destroy()
else:
# Finalize button, ask whether the changes will be accepted or rejected
dlg = wx.MessageDialog(self, "Do you want to accept the results of this " + self.protocolcode + " simulation?", "Accept/Reject " + self.protocolcode, wx.YES_NO | wx.CANCEL | wx.ICON_QUESTION | wx.CENTRE)
result = dlg.ShowModal()
if (result == wx.ID_YES):
accept = True
logInfo(self.protocolcode + " accepted")
elif (result == wx.ID_NO):
accept = False
logInfo(self.protocolcode + " rejected")
else:
dlg.Destroy()
logInfo("Finalize operation cancelled")
return
dlg.Destroy()
if (platform.system() == "Darwin"):
self.btnSubmit.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnSubmit.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnSubmit.SetLabel("Submit!")
self.buttonState = "Submit!"
self.btnSubmit.SetToolTipString("Perform " + self.protocolcode)
self.seqWin.cannotDelete = False
### DO THINGS COMMON TO BOTH ACCEPT AND REJECT
### ...
### ...
if (not(accept)):
### HANDLE A REJECTION
### ...
### ...
### ===========================================================================
### EXAMPLE CODE TO DELETE THE OUTPUTTED MODEL FROM PYMOL
### DELETE ME LATER
self.cmd.remove("example_view")
### END DELETION
### ===========================================================================
return
### HANDLE AN ACCEPT
### ...
### ...
### ===============================================================================
### EXAMPLE CODE TO LOAD THE MODEL INTO THE SEQUENCEVIEWER
### DELETE ME LATER
goToSandbox()
self.cmd.remove("example_view")
self.seqWin.PyMOLPDBLoad(1, "output.pdb", showDialog="Show")
### END DELETION
### ===============================================================================
def recoverFromError(self):
# This function tells the user what the error was and tries to revert the protocol
# back to the pre-daemon state so the main GUI can continue to be used
f = open("errreport", "r")
errmsg = "An error was encountered during the protocol:\n\n"
for aline in f:
errmsg = errmsg + aline.strip()
f.close()
errmsg = str(errmsg)
logInfo("Error encountered")
logInfo(errmsg)
if (platform.system() == "Windows"):
sessioninfo = os.path.expanduser("~") + "\\InteractiveRosetta\\sessionlog"
else:
sessioninfo = os.path.expanduser("~") + "/.InteractiveRosetta/sessionlog"
errmsg = errmsg + "\n\nIf you don't know what caused this, send the file " + sessioninfo + " to a developer along with an explanation of what you did."
# You have to use a MessageDialog because the MessageBox doesn't always work for some reason
dlg = wx.MessageDialog(self, errmsg, "Error Encountered", wx.OK|wx.ICON_EXCLAMATION)
dlg.ShowModal()
dlg.Destroy()
os.remove("errreport")
self.seqWin.cannotDelete = False
self.parent.GoBtn.Enable()
self.enableControls(True)
if (platform.system() == "Darwin"):
self.btnSubmit.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnSubmit.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnSubmit.SetLabel("Design!")
self.buttonState = "Submit!"
self.removeMessage()
def enableControls(self, enabled=True):
### Enable or disable controls in this function, using enabled as the boolean to
### either enable (True) or disable (False)
### Example:
self.btnSubmit.Enable(enabled)
def threadSubmit(self, event):
daemon_jobID = self.protocolcode
inputfile = daemon_jobID + "input"
outputfile = daemon_jobID + "output"
# Why am I using a Timer? See the explanation in kic.py
goToSandbox()
if (self.stage == 1):
self.tmrSubmit.Stop()
self.timeoutCount = 0
self.progress = None
fout = open(inputfile + "temp", "w")
### WRITE THE INPUTS INTO A FILE THAT THE DAEMON CAN READ
### IF THIS IS GOING TO A REMOTE SERVER YOU NEED TO HAVE ONE OF THE FOLLOWING:
### REQUESTED_NODES\t<VALUE>
### REQUIRED_NODES\t<VALUE>
### IF REQUESTED_NODES IS USED, THEN THE SERVER WILL TRY TO GET AT MOST <VALUE> NODES
### IF REQUIRED_NODES IS USED, THEN THE SERVER WILL GET AT LEAST <VALUE> NODES
### ...
### ...
### ================================================================================
### EXAMPLE FOR LOCAL DAEMON
### DELETE ME LATER
fout.write("PDBFILE\t1adw_1.pdb\n")
fout.write("BEGIN PDB DATA\n")
fin = open(self.datadir + "/1adw_1.pdb")
for aline in fin:
fout.write(aline)
fin.close()
fout.write("END PDB DATA\n")
fout.write("RESFILE\t1adw.resfile\n")
fout.write("BEGIN RESFILE DATA\n")
fin = open(self.datadir + "/1adw.resfile")
for aline in fin:
fout.write(aline)
fin.close()
fout.write("END RESFILE DATA\n")
fout.write("REQUIRED_NODES\t1\n")
### ================================================================================
fout.close()
### ADD THE PARAMS AND SCOREFXN DATA TO THE OUTPUTFILE
### IT HAS THE FOLLOWING FORMAT:
### PARAMS\t<FILENAME>
### BEGIN PARAMS DATA
### <DATA>
### END PARAMS DATA
### SCOREFXN\t<FILENAME>
### BEGIN SCOREFXN DATA
### <DATA>
### END SCOREFXN DATA
appendScorefxnParamsInfoToFile(inputfile + "temp", self.selectWin.weightsfile)
serverToggleable = False
try:
# To find out if the developer defined the serverOn variable
self.serverOn
serverToggleable = True
except:
pass
if (self.useServer or (serverToggleable and self.serverOn)):
try:
### USE THE CODE BELOW IF YOU WANT TO SEND TO THE USER'S SELECTED SERVER
serverName = ""
home = os.path.expanduser("~")
if (platform.system() == "Windows"):
fin = open(home + "/InteractiveROSETTA/seqwindow.cfg", "r")
else:
fin = open(home + "/.InteractiveROSETTA/seqwindow.cfg", "r")
for aline in fin:
if (aline.startswith("[SERVER]")):
serverName = aline.split("\t")[1].strip()
fin.close()
if (len(serverName.strip()) == 0):
raise Exception("No server specified")
self.ID = sendToServer(inputfile)
### ======================================================================
### IF YOU WANT TO FORCE SENDING IT TO A CERTAIN SERVER
# serverURL = <URL>
# dlg = wx.MessageDialog(self, "Your job will be sent to the following server: " + serverURL + "\n\nIs this okay? If there is sensitive data in your submission that you don't want on this server, please select No.", "Confirm Server Submission", wx.YES_NO | wx.ICON_EXCLAMATION | wx.CENTRE)
# if (dlg.ShowModal() == wx.ID_NO):
# self.cancelSubmit()
# return
# dlg.Destroy()
# self.ID = sendToServer(inputfile, serverURL)
# serverName = serverURL
### ======================================================================
dlg = wx.TextEntryDialog(None, "Enter a description for this submission:", "Job Description", "")
if (dlg.ShowModal() == wx.ID_OK):
desc = dlg.GetValue()
desc = desc.replace("\t", " ").replace("\n", " ").strip()
else:
desc = self.ID
goToSandbox()
# First make sure this isn't a duplicate
alreadythere = False
try:
f = open("downloadwatch", "r")
for aline in f:
if (len(aline.split("\t")) >= 2 and aline.split("\t")[0] == self.protocolcode.upper() and aline.split("\t")[1] == self.ID.strip()):
alreadythere = True
break
f.close()
except:
pass
if (not(alreadythere)):
f = open("downloadwatch", "a")
f.write(self.protocolcode.upper() + "\t" + self.ID.strip() + "\t" + str(datetime.datetime.now().strftime("%A, %B %d - %I:%M:%S %p")) + "\t" + serverName + "\t" + desc + "\n")
f.close()
dlg = wx.MessageDialog(self, "InteractiveROSETTA is now watching the server for job ID " + desc.strip() + ". You will be notified when the package is available for download.", "Listening for Download", wx.OK | wx.ICON_EXCLAMATION | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
logInfo(self.protocolcode.upper() + " input sent to server daemon with ID " + self.ID)
if (platform.system() == "Darwin"):
self.btnSubmit.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnSubmit.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnSubmit.SetLabel("Submit!")
self.buttonState = "Submit!"
self.btnSubmit.SetToolTipString("Perform " + self.protocolcode)
self.seqWin.cannotDelete = False
self.parent.GoBtn.Enable()
self.enableControls(True)
self.removeMessage()
return
except Exception as e:
logInfo("Server daemon not available")
if (serverToggleable and serverOn):
os.rename(inputfile + "temp", inputfile)
logInfo(self.protocolcode + " input uploaded locally at " + inputfile)
else:
f = open("errreport", "w")
f.write(e.message.strip())
f.close()
self.recoverFromError()
return
else:
os.rename(inputfile + "temp", inputfile)
logInfo(self.protocolcode + " input uploaded locally at " + inputfile)
self.stage = 2
self.tmrSubmit.Start(1000)
### IF YOUR PROTOCOL HAS MORE THAN ONE STEP, YOU CAN IMPLEMENT THAT HERE AS
### MULTIPLE STAGES
else:
# Read the output dumped by the child process
if (os.path.isfile(outputfile)):
if (self.progress is not None):
try:
self.progress.Destroy()
except:
pass
goToSandbox()
self.tmrSubmit.Stop()
fin = open(outputfile, "r")
### PARSE THE outputfile
### ...
### ...
fin.close()
logInfo("Found " + self.protocolcode + " output at " + outputfile)
if (platform.system() == "Darwin"):
self.btnSubmit.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/biotools/btnSubmit_Finalize.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
else:
self.btnSubmit.SetLabel("Finalize!")
self.buttonState = "Finalize!"
self.btnSubmit.SetToolTipString("Accept or reject the results of this protocol")
os.remove(outputfile)
self.removeMessage()
### DO SOMETHING WITH THE OUTPUT
### e.g.) DISPLAY IT IN CONTROLS/PYMOL
### ...
### ...
### ===========================================================================
### THIS IS AN EXAMPLE THAT LOADS AN OUTPUT MODEL FOR PYMOL VIEWING
### DELETE IT LATER
self.cmd.load("output.pdb", "example_view")
defaultPyMOLView(self.cmd, "example_view")
### ===========================================================================
elif (os.path.isfile("errreport")):
self.tmrSubmit.Stop()
self.recoverFromError()
if (self.progress is not None):
try:
self.progress.Destroy()
except:
pass
elif (os.path.isfile("progress")):
# The local daemon can output its progress to keep the GUI updated about
# how far along it is, along with a message
# This is optional
# See job/__init__.py for more information
if (self.progress is None):
self.progress = wx.ProgressDialog(self.protocolcode.upper() + " Progress", "Performing " + self.protocolcode + "...", 100, style=wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME)
fin = open("progress", "r")
data = fin.readlines()
fin.close()
# First line should be a fraction
try:
num = float(data[0].split("/")[0].strip())
den = float(data[0].split("/")[1].strip())
# Convert to a percentage
percent = int(num / den * 100.0)
if (percent > 99):
# Let's the appearance of the output file kill the progress bar
percent = 99
except:
return
try:
# The optional second line is a new message
newmsg = data[1].strip()
(keepGoing, skip) = self.progress.Update(percent, newmsg)
except:
(keepGoing, skip) = self.progress.Update(percent)
if (not(keepGoing)):
# User clicked "Cancel" on the progress bar
self.cancelSubmit()
self.progress.Destroy() | gpl-2.0 | 8,341,332,279,122,857,000 | 51.935438 | 310 | 0.546958 | false |
cypsun/FreeCAD | src/Mod/Arch/ArchSectionPlane.py | 8 | 25673 | #***************************************************************************
#* *
#* Copyright (c) 2011 *
#* Yorik van Havre <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD,WorkingPlane,math,Draft,ArchCommands,DraftVecUtils
from FreeCAD import Vector
if FreeCAD.GuiUp:
import FreeCADGui
from PySide import QtCore, QtGui
from DraftTools import translate
from pivy import coin
else:
def translate(ctxt,txt):
return txt
def makeSectionPlane(objectslist=None,name="Section"):
"""makeSectionPlane([objectslist]) : Creates a Section plane objects including the
given objects. If no object is given, the whole document will be considered."""
obj = FreeCAD.ActiveDocument.addObject("App::FeaturePython",name)
obj.Label = translate("Arch",name)
_SectionPlane(obj)
if FreeCAD.GuiUp:
_ViewProviderSectionPlane(obj.ViewObject)
if objectslist:
g = []
for o in objectslist:
if o.isDerivedFrom("Part::Feature"):
g.append(o)
elif o.isDerivedFrom("App::DocumentObjectGroup"):
g.append(o)
obj.Objects = g
return obj
def makeSectionView(section,name="View"):
"""makeSectionView(section) : Creates a Drawing view of the given Section Plane
in the active Page object (a new page will be created if none exists"""
page = None
for o in FreeCAD.ActiveDocument.Objects:
if o.isDerivedFrom("Drawing::FeaturePage"):
page = o
break
if not page:
page = FreeCAD.ActiveDocument.addObject("Drawing::FeaturePage",translate("Arch","Page"))
page.Template = Draft.getParam("template",FreeCAD.getResourceDir()+'Mod/Drawing/Templates/A3_Landscape.svg')
view = FreeCAD.ActiveDocument.addObject("Drawing::FeatureViewPython",name)
page.addObject(view)
_ArchDrawingView(view)
view.Source = section
view.Label = translate("Arch","View of")+" "+section.Name
return view
class _CommandSectionPlane:
"the Arch SectionPlane command definition"
def GetResources(self):
return {'Pixmap' : 'Arch_SectionPlane',
'Accel': "S, E",
'MenuText': QtCore.QT_TRANSLATE_NOOP("Arch_SectionPlane","Section Plane"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Arch_SectionPlane","Creates a section plane object, including the selected objects")}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
sel = FreeCADGui.Selection.getSelection()
ss = "["
for o in sel:
if len(ss) > 1:
ss += ","
ss += "FreeCAD.ActiveDocument."+o.Name
ss += "]"
FreeCAD.ActiveDocument.openTransaction(translate("Arch","Create Section Plane"))
FreeCADGui.addModule("Arch")
FreeCADGui.doCommand("section = Arch.makeSectionPlane("+ss+")")
FreeCADGui.doCommand("section.Placement = FreeCAD.DraftWorkingPlane.getPlacement()")
#FreeCADGui.doCommand("Arch.makeSectionView(section)")
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
class _SectionPlane:
"A section plane object"
def __init__(self,obj):
obj.Proxy = self
obj.addProperty("App::PropertyPlacement","Placement","Base",translate("Arch","The placement of this object"))
obj.addProperty("Part::PropertyPartShape","Shape","Base","")
obj.addProperty("App::PropertyLinkList","Objects","Arch",translate("Arch","The objects that must be considered by this section plane. Empty means all document"))
obj.addProperty("App::PropertyBool","OnlySolids","Arch",translate("Arch","If false, non-solids will be cut too, with possible wrong results."))
obj.OnlySolids = True
self.Type = "SectionPlane"
def execute(self,obj):
import Part
if hasattr(obj.ViewObject,"DisplayLength"):
l = obj.ViewObject.DisplayLength.Value
h = obj.ViewObject.DisplayHeight.Value
elif hasattr(obj.ViewObject,"DisplaySize"):
# old objects
l = obj.ViewObject.DisplaySize.Value
h = obj.ViewObject.DisplaySize.Value
else:
l = 1
h = 1
p = Part.makePlane(l,l,Vector(l/2,-l/2,0),Vector(0,0,-1))
# make sure the normal direction is pointing outwards, you never know what OCC will decide...
if p.normalAt(0,0).getAngle(obj.Placement.Rotation.multVec(FreeCAD.Vector(0,0,1))) > 1:
p.reverse()
p.Placement = obj.Placement
obj.Shape = p
def onChanged(self,obj,prop):
pass
def getNormal(self,obj):
return obj.Shape.Faces[0].normalAt(0,0)
def __getstate__(self):
return self.Type
def __setstate__(self,state):
if state:
self.Type = state
class _ViewProviderSectionPlane:
"A View Provider for Section Planes"
def __init__(self,vobj):
vobj.addProperty("App::PropertyLength","DisplayLength","Arch",translate("Arch","The display length of this section plane"))
vobj.addProperty("App::PropertyLength","DisplayHeight","Arch",translate("Arch","The display height of this section plane"))
vobj.addProperty("App::PropertyLength","ArrowSize","Arch",translate("Arch","The size of the arrows of this section plane"))
vobj.addProperty("App::PropertyPercent","Transparency","Base","")
vobj.addProperty("App::PropertyFloat","LineWidth","Base","")
vobj.addProperty("App::PropertyColor","LineColor","Base","")
vobj.addProperty("App::PropertyBool","CutView","Arch",translate("Arch","Show the cut in the 3D view"))
vobj.DisplayLength = 1
vobj.DisplayHeight = 1
vobj.ArrowSize = 1
vobj.Transparency = 85
vobj.LineWidth = 1
vobj.LineColor = (0.0,0.0,0.4,1.0)
vobj.CutView = False
vobj.Proxy = self
self.Object = vobj.Object
def getIcon(self):
import Arch_rc
return ":/icons/Arch_SectionPlane_Tree.svg"
def claimChildren(self):
return []
def attach(self,vobj):
self.clip = None
self.mat1 = coin.SoMaterial()
self.mat2 = coin.SoMaterial()
self.fcoords = coin.SoCoordinate3()
#fs = coin.SoType.fromName("SoBrepFaceSet").createInstance() # this causes a FreeCAD freeze for me
fs = coin.SoIndexedFaceSet()
fs.coordIndex.setValues(0,7,[0,1,2,-1,0,2,3])
self.drawstyle = coin.SoDrawStyle()
self.drawstyle.style = coin.SoDrawStyle.LINES
self.lcoords = coin.SoCoordinate3()
ls = coin.SoType.fromName("SoBrepEdgeSet").createInstance()
ls.coordIndex.setValues(0,57,[0,1,-1,2,3,4,5,-1,6,7,8,9,-1,10,11,-1,12,13,14,15,-1,16,17,18,19,-1,20,21,-1,22,23,24,25,-1,26,27,28,29,-1,30,31,-1,32,33,34,35,-1,36,37,38,39,-1,40,41,42,43,44])
sep = coin.SoSeparator()
psep = coin.SoSeparator()
fsep = coin.SoSeparator()
fsep.addChild(self.mat2)
fsep.addChild(self.fcoords)
fsep.addChild(fs)
psep.addChild(self.mat1)
psep.addChild(self.drawstyle)
psep.addChild(self.lcoords)
psep.addChild(ls)
sep.addChild(fsep)
sep.addChild(psep)
vobj.addDisplayMode(sep,"Default")
self.onChanged(vobj,"DisplayLength")
self.onChanged(vobj,"LineColor")
self.onChanged(vobj,"Transparency")
self.onChanged(vobj,"CutView")
def getDisplayModes(self,vobj):
return ["Default"]
def getDefaultDisplayMode(self):
return "Default"
def setDisplayMode(self,mode):
return mode
def updateData(self,obj,prop):
if prop in ["Placement"]:
self.onChanged(obj.ViewObject,"DisplayLength")
self.onChanged(obj.ViewObject,"CutView")
return
def onChanged(self,vobj,prop):
if prop == "LineColor":
l = vobj.LineColor
self.mat1.diffuseColor.setValue([l[0],l[1],l[2]])
self.mat2.diffuseColor.setValue([l[0],l[1],l[2]])
elif prop == "Transparency":
if hasattr(vobj,"Transparency"):
self.mat2.transparency.setValue(vobj.Transparency/100.0)
elif prop in ["DisplayLength","DisplayHeight","ArrowSize"]:
if hasattr(vobj,"DisplayLength"):
ld = vobj.DisplayLength.Value/2
hd = vobj.DisplayHeight.Value/2
elif hasattr(vobj,"DisplaySize"):
# old objects
ld = vobj.DisplaySize.Value/2
hd = vobj.DisplaySize.Value/2
else:
ld = 1
hd = 1
verts = []
fverts = []
for v in [[-ld,-hd],[ld,-hd],[ld,hd],[-ld,hd]]:
if hasattr(vobj,"ArrowSize"):
l1 = vobj.ArrowSize.Value if vobj.ArrowSize.Value > 0 else 0.1
else:
l1 = 0.1
l2 = l1/3
pl = FreeCAD.Placement(vobj.Object.Placement)
p1 = pl.multVec(Vector(v[0],v[1],0))
p2 = pl.multVec(Vector(v[0],v[1],-l1))
p3 = pl.multVec(Vector(v[0]-l2,v[1],-l1+l2))
p4 = pl.multVec(Vector(v[0]+l2,v[1],-l1+l2))
p5 = pl.multVec(Vector(v[0],v[1]-l2,-l1+l2))
p6 = pl.multVec(Vector(v[0],v[1]+l2,-l1+l2))
verts.extend([[p1.x,p1.y,p1.z],[p2.x,p2.y,p2.z]])
fverts.append([p1.x,p1.y,p1.z])
verts.extend([[p2.x,p2.y,p2.z],[p3.x,p3.y,p3.z],[p4.x,p4.y,p4.z],[p2.x,p2.y,p2.z]])
verts.extend([[p2.x,p2.y,p2.z],[p5.x,p5.y,p5.z],[p6.x,p6.y,p6.z],[p2.x,p2.y,p2.z]])
verts.extend(fverts+[fverts[0]])
self.lcoords.point.setValues(verts)
self.fcoords.point.setValues(fverts)
elif prop == "LineWidth":
self.drawstyle.lineWidth = vobj.LineWidth
elif prop == "CutView":
if hasattr(vobj,"CutView") and FreeCADGui.ActiveDocument.ActiveView:
sg = FreeCADGui.ActiveDocument.ActiveView.getSceneGraph()
if vobj.CutView:
if self.clip:
sg.removeChild(self.clip)
self.clip = None
for o in Draft.getGroupContents(vobj.Object.Objects,walls=True):
if hasattr(o.ViewObject,"Lighting"):
o.ViewObject.Lighting = "One side"
self.clip = coin.SoClipPlane()
self.clip.on.setValue(True)
norm = vobj.Object.Proxy.getNormal(vobj.Object)
mp = vobj.Object.Shape.CenterOfMass
mp = DraftVecUtils.project(mp,norm)
dist = mp.Length + 0.1 # to not clip exactly on the section object
norm = norm.negative()
plane = coin.SbPlane(coin.SbVec3f(norm.x,norm.y,norm.z),-dist)
self.clip.plane.setValue(plane)
sg.insertChild(self.clip,0)
else:
if self.clip:
sg.removeChild(self.clip)
self.clip = None
return
def __getstate__(self):
return None
def __setstate__(self,state):
return None
class _ArchDrawingView:
def __init__(self, obj):
obj.addProperty("App::PropertyLink","Source","Base","The linked object")
obj.addProperty("App::PropertyEnumeration","RenderingMode","Drawing view","The rendering mode to use")
obj.addProperty("App::PropertyBool","ShowCut","Drawing view","If cut geometry is shown or not")
obj.addProperty("App::PropertyBool","ShowFill","Drawing view","If cut geometry is filled or not")
obj.addProperty("App::PropertyFloat","LineWidth","Drawing view","The line width of the rendered objects")
obj.addProperty("App::PropertyLength","FontSize","Drawing view","The size of the texts inside this object")
obj.RenderingMode = ["Solid","Wireframe"]
obj.RenderingMode = "Wireframe"
obj.LineWidth = 0.35
obj.ShowCut = False
obj.Proxy = self
self.Type = "ArchSectionView"
obj.FontSize = 12
def execute(self, obj):
if hasattr(obj,"Source"):
if obj.Source:
if not hasattr(self,"svg"):
self.onChanged(obj,"Source")
else:
if not self.svg:
self.onChanged(obj,"Source")
if not hasattr(self,"svg"):
return ''
if not hasattr(self,"direction"):
p = FreeCAD.Placement(obj.Source.Placement)
self.direction = p.Rotation.multVec(FreeCAD.Vector(0,0,1))
linewidth = obj.LineWidth/obj.Scale
st = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").GetFloat("CutLineThickness",2)
da = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").GetString("archHiddenPattern","30,10")
da =da.replace(" ","")
svg = self.svg.replace('LWPlaceholder', str(linewidth) + 'px')
svg = svg.replace('SWPlaceholder', str(linewidth*st) + 'px')
svg = svg.replace('DAPlaceholder', str(da))
if hasattr(self,"spaces"):
if round(self.direction.getAngle(FreeCAD.Vector(0,0,1)),Draft.precision()) in [0,round(math.pi,Draft.precision())]:
for s in self.spaces:
svg += Draft.getSVG(s,scale=obj.Scale,fontsize=obj.FontSize.Value,direction=self.direction)
result = ''
result += '<g id="' + obj.Name + '"'
result += ' transform="'
result += 'rotate('+str(obj.Rotation)+','+str(obj.X)+','+str(obj.Y)+') '
result += 'translate('+str(obj.X)+','+str(obj.Y)+') '
result += 'scale('+str(obj.Scale)+','+str(obj.Scale)+')'
result += '">\n'
result += svg
result += '</g>\n'
# print "complete node:",result
obj.ViewResult = result
def onChanged(self, obj, prop):
if prop in ["Source","RenderingMode","ShowCut"]:
import Part, DraftGeomUtils
if hasattr(obj,"Source"):
if obj.Source:
if obj.Source.Objects:
objs = Draft.getGroupContents(obj.Source.Objects,walls=True)
objs = Draft.removeHidden(objs)
# separate spaces
self.spaces = []
os = []
for o in objs:
if Draft.getType(o) == "Space":
self.spaces.append(o)
else:
os.append(o)
objs = os
self.svg = ''
fillpattern = '<pattern id="sectionfill" patternUnits="userSpaceOnUse" patternTransform="matrix(5,0,0,5,0,0)"'
fillpattern += ' x="0" y="0" width="10" height="10">'
fillpattern += '<g>'
fillpattern += '<rect width="10" height="10" style="stroke:none; fill:#ffffff" /><path style="stroke:#000000; stroke-width:1" d="M0,0 l10,10" /></g></pattern>'
# generating SVG
if obj.RenderingMode == "Solid":
# render using the Arch Vector Renderer
import ArchVRM, WorkingPlane
wp = WorkingPlane.plane()
wp.setFromPlacement(obj.Source.Placement)
wp.inverse()
render = ArchVRM.Renderer()
render.setWorkingPlane(wp)
render.addObjects(objs)
if hasattr(obj,"ShowCut"):
render.cut(obj.Source.Shape,obj.ShowCut)
else:
render.cut(obj.Source.Shape)
self.svg += render.getViewSVG(linewidth="LWPlaceholder")
self.svg += fillpattern
self.svg += render.getSectionSVG(linewidth="SWPlaceholder",fillpattern="sectionfill")
if hasattr(obj,"ShowCut"):
if obj.ShowCut:
self.svg += render.getHiddenSVG(linewidth="LWPlaceholder")
# print render.info()
else:
# render using the Drawing module
import Drawing, Part
shapes = []
hshapes = []
sshapes = []
p = FreeCAD.Placement(obj.Source.Placement)
self.direction = p.Rotation.multVec(FreeCAD.Vector(0,0,1))
for o in objs:
if o.isDerivedFrom("Part::Feature"):
if o.Shape.isNull():
pass
#FreeCAD.Console.PrintWarning(translate("Arch","Skipping empty object: ")+o.Name)
elif o.Shape.isValid():
if hasattr(obj.Source,"OnlySolids"):
if obj.Source.OnlySolids:
shapes.extend(o.Shape.Solids)
else:
shapes.append(o.Shape)
else:
shapes.extend(o.Shape.Solids)
else:
FreeCAD.Console.PrintWarning(translate("Arch","Skipping invalid object: ")+o.Name)
cutface,cutvolume,invcutvolume = ArchCommands.getCutVolume(obj.Source.Shape.copy(),shapes)
if cutvolume:
nsh = []
for sh in shapes:
for sol in sh.Solids:
if sol.Volume < 0:
sol.reverse()
c = sol.cut(cutvolume)
s = sol.section(cutface)
try:
wires = DraftGeomUtils.findWires(s.Edges)
for w in wires:
f = Part.Face(w)
sshapes.append(f)
#s = Part.Wire(s.Edges)
#s = Part.Face(s)
except Part.OCCError:
#print "ArchDrawingView: unable to get a face"
sshapes.append(s)
nsh.extend(c.Solids)
#sshapes.append(s)
if hasattr(obj,"ShowCut"):
if obj.ShowCut:
c = sol.cut(invcutvolume)
hshapes.append(c)
shapes = nsh
if shapes:
self.shapes = shapes
self.baseshape = Part.makeCompound(shapes)
svgf = Drawing.projectToSVG(self.baseshape,self.direction)
if svgf:
svgf = svgf.replace('stroke-width="0.35"','stroke-width="LWPlaceholder"')
svgf = svgf.replace('stroke-width="1"','stroke-width="LWPlaceholder"')
svgf = svgf.replace('stroke-width:0.01','stroke-width:LWPlaceholder')
self.svg += svgf
if hshapes:
hshapes = Part.makeCompound(hshapes)
self.hiddenshape = hshapes
svgh = Drawing.projectToSVG(hshapes,self.direction)
if svgh:
svgh = svgh.replace('stroke-width="0.35"','stroke-width="LWPlaceholder"')
svgh = svgh.replace('stroke-width="1"','stroke-width="LWPlaceholder"')
svgh = svgh.replace('stroke-width:0.01','stroke-width:LWPlaceholder')
svgh = svgh.replace('fill="none"','fill="none"\nstroke-dasharray="DAPlaceholder"')
self.svg += svgh
if sshapes:
svgs = ""
if hasattr(obj,"ShowFill"):
if obj.ShowFill:
svgs += fillpattern
svgs += '<g transform="rotate(180)">\n'
for s in sshapes:
if s.Edges:
f = Draft.getSVG(s,direction=self.direction.negative(),linewidth=0,fillstyle="sectionfill",color=(0,0,0))
svgs += f
svgs += "</g>\n"
sshapes = Part.makeCompound(sshapes)
self.sectionshape = sshapes
svgs += Drawing.projectToSVG(sshapes,self.direction)
if svgs:
svgs = svgs.replace('stroke-width="0.35"','stroke-width="SWPlaceholder"')
svgs = svgs.replace('stroke-width="1"','stroke-width="SWPlaceholder"')
svgs = svgs.replace('stroke-width:0.01','stroke-width:SWPlaceholder')
svgs = svgs.replace('stroke-width="0.35 px"','stroke-width="SWPlaceholder"')
svgs = svgs.replace('stroke-width:0.35','stroke-width:SWPlaceholder')
self.svg += svgs
def __getstate__(self):
return self.Type
def __setstate__(self,state):
if state:
self.Type = state
def getDisplayModes(self,vobj):
modes=["Default"]
return modes
def setDisplayMode(self,mode):
return mode
def getDXF(self,obj):
"returns a DXF representation of the view"
if obj.RenderingMode == "Solid":
print "Unable to get DXF from Solid mode: ",obj.Label
return ""
result = []
import Drawing
if not hasattr(self,"baseshape"):
self.onChanged(obj,"Source")
if hasattr(self,"baseshape"):
if self.baseshape:
result.append(Drawing.projectToDXF(self.baseshape,self.direction))
if hasattr(self,"sectionshape"):
if self.sectionshape:
result.append(Drawing.projectToDXF(self.sectionshape,self.direction))
if hasattr(self,"hiddenshape"):
if self.hiddenshape:
result.append(Drawing.projectToDXF(self.hiddenshape,self.direction))
return result
if FreeCAD.GuiUp:
FreeCADGui.addCommand('Arch_SectionPlane',_CommandSectionPlane())
| lgpl-2.1 | 2,914,885,388,243,609,600 | 49.044834 | 200 | 0.496358 | false |
novapost/insight-reloaded | insight_reloaded/tests/test_api.py | 1 | 2660 | # -*- coding: utf-8 -*-
import json
import redis
from tornado.testing import AsyncHTTPTestCase
from insight_reloaded.api import application
from insight_reloaded import __version__ as VERSION
from insight_reloaded.insight_settings import (
REDIS_HOST, REDIS_PORT, REDIS_DB, DEFAULT_REDIS_QUEUE_KEY
)
from tornado import ioloop
class InsightApiHTTPTest(AsyncHTTPTestCase):
def get_new_ioloop(self):
return ioloop.IOLoop.instance()
def get_app(self):
return application
def tearDown(self):
# 1. Empty the status queue
r = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT,
db=REDIS_DB)
while r.lpop(DEFAULT_REDIS_QUEUE_KEY):
pass
return super(InsightApiHTTPTest, self).tearDown()
def test_api_version(self):
self.http_client.fetch(self.get_url('/'), self.stop)
response = self.wait()
body_json = json.loads(response.body)
self.assertIn('version', body_json)
self.assertIn('insight_reloaded', body_json)
self.assertEqual('insight-reloaded', body_json['name'])
self.assertEqual(VERSION, body_json['version'])
def test_api_request(self):
self.http_client.fetch(self.get_url('/') +
u'?url=http://my_file_url.com/file.pdf',
self.stop)
response = self.wait()
json_body = json.loads(response.body)
self.assertIn('insight_reloaded', json_body)
self.assertEqual(json_body['number_in_queue'], 1)
def test_api_url_missing(self):
self.http_client.fetch(self.get_url('/') + '?arg=foobar', self.stop)
response = self.wait()
self.assertEqual(response.code, 404)
def test_status(self):
# 1. Check the status
self.http_client.fetch(self.get_url('/status'), self.stop)
response = self.wait()
json_body = json.loads(response.body)
self.assertDictEqual({"insight_reloaded": "There is 0 job in the "
"'insight-reloaded' queue.",
"number_in_queue": 0}, json_body)
# 2. Add a job
self.http_client.fetch(self.get_url('/') +
'?url=http://my_file_url.com/file.pdf',
self.stop)
response = self.wait()
self.assertEqual(response.code, 200)
json_body = json.loads(response.body)
self.assertDictEqual({"insight_reloaded": "Job added to queue "
"'insight-reloaded'.",
"number_in_queue": 1}, json_body)
| bsd-3-clause | 2,022,860,773,039,093,500 | 33.545455 | 76 | 0.580075 | false |
orione7/Italorione | servers/filebox.py | 9 | 3689 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para filebox
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
logger.info("[streamcloud.py] test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page( url = page_url )
if "<b>File Not Found</b>" in data:
return False,"El archivo no existe<br/>en filebox o ha sido borrado."
else:
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[filebox.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
'''
<input type="hidden" name="op" value="download2">
<input type="hidden" name="id" value="235812b1j9w1">
<input type="hidden" name="rand" value="na73zeeooqyfkndsv4uxzzpbajwi6mhbmixtogi">
<input type="hidden" name="referer" value="http://www.seriesyonkis.com/s/ngo/2/5/1/8/773">
'''
logger.info("[filebox.py] URL ")
data = scrapertools.cache_page(page_url)
import time
time.sleep(5)
codigo = scrapertools.get_match(data,'<input type="hidden" name="id" value="([^"]+)">[^<]+')
rand = scrapertools.get_match(data,'<input type="hidden" name="rand" value="([^"]+)">')
#op=download2&id=xuquejiv6xdf&rand=r6dq7hn7so2ygpnxv2zg2i3cu3sbdsunf57gtni&referer=&method_free=&method_premium=&down_direct=1
post = "op=download2&id="+codigo+"&rand="+rand+"&referer=&method_free=&method_premium=&down_direct=1"
data = scrapertools.cache_page( page_url , post=post, headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'],['Referer',page_url]] )
logger.info("data="+data)
media_url = scrapertools.get_match(data,"this.play\('([^']+)'")
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [filebox]",media_url])
for video_url in video_urls:
logger.info("[filebox.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://www.filebox.com/embed-wa5p8wzh7tlq-700x385.html
patronvideos = 'filebox.com/embed-([0-9a-zA-Z]+)'
logger.info("[filebox.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[filebox]"
url = "http://www.filebox.com/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'filebox' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# http://www.filebox.com/729x1eo9zrx1
patronvideos = 'filebox.com/([0-9a-zA-Z]+)'
logger.info("[filebox.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[filebox]"
url = "http://www.filebox.com/"+match
if url!="http://www.filebox.com/embed" and url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'filebox' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def test():
video_urls = get_video_url("http://www.filebox.com/sstr2hlxt398")
return len(video_urls)>0
| gpl-3.0 | 4,726,176,458,500,455,000 | 38.212766 | 200 | 0.614759 | false |
drmrd/ansible | test/units/modules/network/dellos10/test_dellos10_facts.py | 42 | 4746 | # (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from units.modules.utils import set_module_args
from .dellos10_module import TestDellos10Module, load_fixture
from ansible.modules.network.dellos10 import dellos10_facts
class TestDellos10Facts(TestDellos10Module):
module = dellos10_facts
def setUp(self):
super(TestDellos10Facts, self).setUp()
self.mock_run_command = patch(
'ansible.modules.network.dellos10.dellos10_facts.run_commands')
self.run_command = self.mock_run_command.start()
def tearDown(self):
super(TestDellos10Facts, self).tearDown()
self.mock_run_command.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item)
command = obj['command']
except ValueError:
command = item
if '|' in command:
command = str(command).replace('|', '')
filename = str(command).replace(' ', '_')
filename = filename.replace('/', '7')
filename = filename.replace(':', '_colon_')
output.append(load_fixture(filename))
return output
self.run_command.side_effect = load_from_file
def test_dellos10_facts_gather_subset_default(self):
set_module_args(dict())
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('os10', ansible_facts['ansible_net_hostname'])
self.assertIn('ethernet1/1/8', ansible_facts['ansible_net_interfaces'].keys())
self.assertEquals(7936, ansible_facts['ansible_net_memtotal_mb'])
self.assertEquals(5693, ansible_facts['ansible_net_memfree_mb'])
def test_dellos10_facts_gather_subset_config(self):
set_module_args({'gather_subset': 'config'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('config', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('os10', ansible_facts['ansible_net_hostname'])
self.assertIn('ansible_net_config', ansible_facts)
def test_dellos10_facts_gather_subset_hardware(self):
set_module_args({'gather_subset': 'hardware'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('x86_64', ansible_facts['ansible_net_cpu_arch'])
self.assertEquals(7936, ansible_facts['ansible_net_memtotal_mb'])
self.assertEquals(5693, ansible_facts['ansible_net_memfree_mb'])
def test_dellos10_facts_gather_subset_interfaces(self):
set_module_args({'gather_subset': 'interfaces'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
self.assertIn('ethernet1/1/8', ansible_facts['ansible_net_interfaces'].keys())
self.assertEquals(sorted(['mgmt1/1/1', 'ethernet1/1/4', 'ethernet1/1/2', 'ethernet1/1/3', 'ethernet1/1/1']),
sorted(list(ansible_facts['ansible_net_neighbors'].keys())))
self.assertIn('ansible_net_interfaces', ansible_facts)
| gpl-3.0 | 5,236,351,527,136,101,000 | 42.145455 | 116 | 0.654657 | false |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/Twisted/doc/core/examples/row_util.py | 20 | 3261 | from twisted.enterprise import row
##################################################
########## Definitions of Row Classes ############
##################################################
class KeyFactory:
"""This is a lame, but simple way to generate keys.
For real code, use the database instead."""
def __init__(self, minimum, pool):
self.min = minimum
self.pool = minimum + pool
self.current = self.min
def getNextKey(self):
next = self.current + 1
self.current = next
if self.current >= self.pool:
raise ValueError("Key factory key pool exceeded.")
return next
def myRowFactory(rowClass, data, kw):
newRow = rowClass()
newRow.__dict__.update(kw)
return newRow
class RoomRow(row.RowObject):
rowColumns = [
("roomId", "int"),
("town_id", "int"),
("name", "varchar"),
("owner", "varchar"),
("posx", "int"),
("posy", "int"),
("width", "int"),
("height", "int")
]
rowKeyColumns = [("roomId","int")]
rowTableName = "testrooms"
rowFactoryMethod = [myRowFactory]
def __init__(self):
self.furniture = []
def addStuff(self, stuff):
self.furniture.append(stuff)
def moveTo(self, x, y):
self.posx = x
self.posy = y
def __repr__(self):
return "<Room #%s: %s (%s) (%s,%s)>" % (self.roomId, self.name, self.owner, self.posx, self.posy)
class FurnitureRow(row.RowObject):
rowColumns = [
("furnId", "int"),
("roomId", "int"),
("name", "varchar"),
("posx", "int"),
("posy", "int")
]
rowKeyColumns = [("furnId","int")]
rowTableName = "furniture"
rowForeignKeys = [("testrooms", [("roomId","int")], [("roomId","int")], "addStuff", 1) ]
def __repr__(self):
return "Furniture #%s: room #%s (%s) (%s,%s)" % (self.furnId, self.roomId, self.name, self.posx, self.posy)
class RugRow(row.RowObject):
rowColumns = [
("rugId", "int"),
("roomId", "int"),
("name", "varchar")
]
rowKeyColumns = [("rugId","int")]
rowTableName = "rugs"
rowFactoryMethod = [myRowFactory]
rowForeignKeys = [( "testrooms", [("roomId","int")],[("roomId","int")], "addStuff", 1) ]
def __repr__(self):
return "Rug %#s: room #%s, (%s)" % (self.rugId, self.roomId, self.name)
class LampRow(row.RowObject):
rowColumns = [
("lampId", "int"),
("furnId", "int"),
("furnName", "varchar"),
("lampName", "varchar")
]
rowKeyColumns = [("lampId","int")]
rowTableName = "lamps"
rowForeignKeys = [("furniture",
[("furnId","int"),("furnName", "varchar")], # child table columns (this table)
[("furnId","int"),("name", "varchar")], # parent table columns (the other table)
None,
1)
]
# NOTE: this has no containerMethod so children will be added to "childRows"
def __repr__(self):
return "Lamp #%s" % self.lampId
| apache-2.0 | 4,142,719,729,510,860,000 | 30.660194 | 115 | 0.482981 | false |
wtanaka/google-app-engine-django-openid | src/openid/yadis/discover.py | 10 | 4423 | # -*- test-case-name: openid.test.test_yadis_discover -*-
__all__ = ['discover', 'DiscoveryResult', 'DiscoveryFailure']
from cStringIO import StringIO
from openid import fetchers
from openid.yadis.constants import \
YADIS_HEADER_NAME, YADIS_CONTENT_TYPE, YADIS_ACCEPT_HEADER
from openid.yadis.parsehtml import MetaNotFound, findHTMLMeta
class DiscoveryFailure(Exception):
"""Raised when a YADIS protocol error occurs in the discovery process"""
identity_url = None
def __init__(self, message, http_response):
Exception.__init__(self, message)
self.http_response = http_response
class DiscoveryResult(object):
"""Contains the result of performing Yadis discovery on a URI"""
# The URI that was passed to the fetcher
request_uri = None
# The result of following redirects from the request_uri
normalized_uri = None
# The URI from which the response text was returned (set to
# None if there was no XRDS document found)
xrds_uri = None
# The content-type returned with the response_text
content_type = None
# The document returned from the xrds_uri
response_text = None
def __init__(self, request_uri):
"""Initialize the state of the object
sets all attributes to None except the request_uri
"""
self.request_uri = request_uri
def usedYadisLocation(self):
"""Was the Yadis protocol's indirection used?"""
return self.normalized_uri != self.xrds_uri
def isXRDS(self):
"""Is the response text supposed to be an XRDS document?"""
return (self.usedYadisLocation() or
self.content_type == YADIS_CONTENT_TYPE)
def discover(uri):
"""Discover services for a given URI.
@param uri: The identity URI as a well-formed http or https
URI. The well-formedness and the protocol are not checked, but
the results of this function are undefined if those properties
do not hold.
@return: DiscoveryResult object
@raises Exception: Any exception that can be raised by fetching a URL with
the given fetcher.
@raises DiscoveryFailure: When the HTTP response does not have a 200 code.
"""
result = DiscoveryResult(uri)
resp = fetchers.fetch(uri, headers={'Accept': YADIS_ACCEPT_HEADER})
if resp.status != 200:
raise DiscoveryFailure(
'HTTP Response status from identity URL host is not 200. '
'Got status %r' % (resp.status,), resp)
# Note the URL after following redirects
result.normalized_uri = resp.final_url
# Attempt to find out where to go to discover the document
# or if we already have it
result.content_type = resp.headers.get('content-type')
result.xrds_uri = whereIsYadis(resp)
if result.xrds_uri and result.usedYadisLocation():
resp = fetchers.fetch(result.xrds_uri)
if resp.status != 200:
exc = DiscoveryFailure(
'HTTP Response status from Yadis host is not 200. '
'Got status %r' % (resp.status,), resp)
exc.identity_url = result.normalized_uri
raise exc
result.content_type = resp.headers.get('content-type')
result.response_text = resp.body
return result
def whereIsYadis(resp):
"""Given a HTTPResponse, return the location of the Yadis document.
May be the URL just retrieved, another URL, or None, if I can't
find any.
[non-blocking]
@returns: str or None
"""
# Attempt to find out where to go to discover the document
# or if we already have it
content_type = resp.headers.get('content-type')
# According to the spec, the content-type header must be an exact
# match, or else we have to look for an indirection.
if (content_type and
content_type.split(';', 1)[0].lower() == YADIS_CONTENT_TYPE):
return resp.final_url
else:
# Try the header
yadis_loc = resp.headers.get(YADIS_HEADER_NAME.lower())
if not yadis_loc:
# Parse as HTML if the header is missing.
#
# XXX: do we want to do something with content-type, like
# have a whitelist or a blacklist (for detecting that it's
# HTML)?
try:
yadis_loc = findHTMLMeta(StringIO(resp.body))
except MetaNotFound:
pass
return yadis_loc
| gpl-3.0 | 6,306,477,015,103,625,000 | 31.762963 | 78 | 0.647976 | false |
JeremyAgost/gemrb | gemrb/GUIScripts/bg2/ImportFile.py | 2 | 2502 | # GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, import (GUICG24)
import GemRB
#import from a character sheet
ImportWindow = 0
TextAreaControl = 0
def OnLoad():
global ImportWindow, TextAreaControl
GemRB.LoadWindowPack("GUICG",640,480)
ImportWindow = GemRB.LoadWindow(20)
TextAreaControl = ImportWindow.GetControl(4)
TextAreaControl.SetText(10963)
TextAreaControl = ImportWindow.GetControl(2)
TextAreaControl.SetFlags (IE_GUI_TEXTAREA_SELECTABLE)
TextAreaControl.GetCharacters()
DoneButton = ImportWindow.GetControl(0)
DoneButton.SetText(2610)
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
DoneButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
CancelButton = ImportWindow.GetControl(1)
CancelButton.SetText(15416)
CancelButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, DonePress)
CancelButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CancelPress)
TextAreaControl.SetEvent(IE_GUI_TEXTAREA_ON_CHANGE, SelectPress)
ImportWindow.SetVisible(WINDOW_VISIBLE)
return
def SelectPress():
DoneButton = ImportWindow.GetControl(0)
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
return
def DonePress():
FileName = TextAreaControl.QueryText()
Slot = GemRB.GetVar("Slot")
GemRB.CreatePlayer(FileName, Slot| 0x8000, 1, 11) # 11 = force bg2
if ImportWindow:
ImportWindow.Unload()
# the medium portrait isn't available, so we copy the original hack
MediumPortrait = GemRB.GetPlayerPortrait (Slot, 1)[0:-1] + "M"
GemRB.SetToken("SmallPortrait", GemRB.GetPlayerPortrait (Slot, 1) )
GemRB.SetToken("LargePortrait", MediumPortrait )
GemRB.SetNextScript("CharGen7")
return
def CancelPress():
if ImportWindow:
ImportWindow.Unload()
GemRB.SetNextScript(GemRB.GetToken("NextScript"))
return
| gpl-2.0 | -2,963,437,594,236,049,400 | 31.921053 | 81 | 0.776179 | false |
m-g-90/cpyjava | setup.py | 1 | 3689 | from distutils.core import setup, Extension
from distutils.spawn import find_executable
from distutils.util import get_platform
import os
try:
import pip
deps = ""
for dep in deps.split(' '):
try:
if len(dep)>0:
pip.main(['install', '--user', dep])
except:
pass
except:
pass
sourcedir = os.path.dirname(os.path.realpath(__file__))
def findJavaFolders():
java_folders = []
try:
tmp = os.environ['JAVA_HOME']
if tmp is not None:
if isinstance(tmp, str) and len(tmp) and os.path.isdir(tmp):
java_folders.append(tmp)
except:
pass
executables = ['java','java.exe']
for executable in executables:
try:
path = find_executable(executable)
if path is not None:
java_folders.append(os.path.dirname(os.path.abspath(path)))
java_folders.append(os.path.dirname(java_folders[-1]))
except:
pass
try:
path = find_executable(executable)
if path is not None:
import subprocess
output = subprocess.run([path,'-server','-verbose:class'],stderr=subprocess.STDOUT,stdout=subprocess.PIPE,check=False).stdout
import re
path = re.match(r".*\[Loaded java\.lang\.Object from (?P<path>.*?)rt\.jar\].*",str(output)).group("path")
if path is not None:
java_folders.append(os.path.dirname(os.path.abspath(path)))
java_folders.append(os.path.dirname(java_folders[-1]))
except Exception as ex:
print("Attempt to extract jvm path failed for "+executable+": "+repr(ex))
return java_folders
def findJavaLibrary():
if get_platform().startswith("win"):
extension = '.dll'
elif get_platform().startswith("linux"):
extension = '.so'
else:
raise Exception('JVM search failed: unknown operating system.')
subpaths = [
os.path.join('jre','bin','server','jvm'),
os.path.join('jre', 'bin', 'jvm'),
os.path.join('jre', 'lib', 'jvm'),
os.path.join('bin', 'server', 'jvm'),
os.path.join('bin', 'jvm'),
os.path.join('lib', 'jvm')
]
java_folders = findJavaFolders()
for prefix in java_folders:
for mid in subpaths:
if os.path.isfile(os.path.join(prefix,mid+extension)):
return os.path.join(prefix,mid+extension)
raise Exception('JVM search failed: no jvm'+extension+' found. (Searchpath: '+(';'.join(java_folders))+')')
define_macros = [('PYJAVA_SETUP_PY', '1',),('PYJAVA_EXPORT','1'),('PYJAVA_JVM_LOADLIBRARY','1')]
try:
jvmfile = "\"\""+findJavaLibrary().replace("\\",'\\\\')+"\"\""
define_macros.append(('PYJAVA_JVM_LOCATIONHINT',jvmfile))
except:
pass
cpyjava_module = Extension('cpyjava',
define_macros = define_macros,
include_dirs = [os.path.join(sourcedir,'src')],
libraries = [],
library_dirs = [],
sources = [os.path.join(os.path.join(os.path.join(sourcedir,'src'),'pyjava'),x) for x in os.listdir(os.path.join(os.path.join(sourcedir,'src'),'pyjava')) if x.endswith(".c")])
setup (name = 'cpyjava',
version = '0.6.5',
description = 'python extension to use java objects',
author = 'Marc Greim',
author_email = '',
url = 'https://github.com/m-g-90/cpyjava',
long_description = '''
python extension to use java objects.
License: GNU Lesser General Public License v3.0
''',
ext_modules = [cpyjava_module])
| lgpl-3.0 | -4,174,608,035,547,583,000 | 31.9375 | 195 | 0.568447 | false |
danny200309/anaconda | anaconda_lib/autopep/autopep8_lib/lib2to3/pgen2/tokenize.py | 67 | 19169 | # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = list(map(
re.compile, (Token, PseudoToken, Single3, Double3)))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| gpl-3.0 | -5,711,389,601,434,276,000 | 37.338 | 87 | 0.522771 | false |
koorukuroo/networkx_for_unicode | networkx/algorithms/operators/unary.py | 11 | 1748 | """Unary operations on graphs"""
# Copyright (C) 2004-2013 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['complement', 'reverse']
def complement(G, name=None):
"""Return the graph complement of G.
Parameters
----------
G : graph
A NetworkX graph
name : string
Specify name for new graph
Returns
-------
GC : A new graph.
Notes
------
Note that complement() does not create self-loops and also
does not produce parallel edges for MultiGraphs.
Graph, node, and edge data are not propagated to the new graph.
"""
if name is None:
name = "complement(%s)"%(G.name)
R = G.__class__()
R.name = name
R.add_nodes_from(G)
R.add_edges_from( ((n, n2)
for n,nbrs in G.adjacency_iter()
for n2 in G if n2 not in nbrs
if n != n2) )
return R
def reverse(G, copy=True):
"""Return the reverse directed graph of G.
Parameters
----------
G : directed graph
A NetworkX directed graph
copy : bool
If True, then a new graph is returned. If False, then the graph is
reversed in place.
Returns
-------
H : directed graph
The reversed G.
"""
if not G.is_directed():
raise nx.NetworkXError("Cannot reverse an undirected graph.")
else:
return G.reverse(copy=copy)
| bsd-3-clause | 4,400,587,694,061,278,000 | 24.705882 | 74 | 0.556636 | false |
pombredanne/cpe | cpe/comp/cpecomp.py | 2 | 10392 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of cpe package.
This module allows to store the value of the components of a CPE Name and
compare it with others.
Copyright (C) 2013 Alejandro Galindo García, Roberto Abdelkader Martínez Pérez
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For any problems using the cpe package, or general questions and
feedback about it, please contact:
- Alejandro Galindo García: [email protected]
- Roberto Abdelkader Martínez Pérez: [email protected]
"""
import types
class CPEComponent(object):
"""
Represents a generic component of CPE Name,
compatible with the components of all versions of CPE specification.
"""
###############
# CONSTANTS #
###############
# Constants of possible versions of CPE components
#: Version 1.1 of CPE component
COMP_1_1 = "1.1"
#: Version 2.2 of CPE component
COMP_2_2 = "2.2"
#: Version 2.3 with WFN style of CPE component
COMP_2_3_WFN = "2.3_wfn"
#: Version 2.3 with URI style of CPE component
COMP_2_3_URI = "2.3_uri"
#: Version 2.3 with formatted string style of CPE component
COMP_2_3_FS = "2.3_fs"
# Attributes associated with components of all versions of CPE
#: Part attribute of CPE Name that indicates the type of system
#: associated with the product
ATT_PART = "part"
#: Vendor attribute of CPE Name that describes or identify the person or
#: organization that manufactured or created the product
ATT_VENDOR = "vendor"
#: Product attribute of CPE Name that describes or identify the most common
#: and recognizable title or name of the product
ATT_PRODUCT = "product"
#: Version attribute of CPE Name that indicates vendor-specific
#: alphanumeric strings characterizing the particular release version
#: of the product
ATT_VERSION = "version"
#: Version attribute of CPE Name that indicates vendor-specific
#: alphanumeric strings characterizing the particular update,
#: service pack, or point release of the product
ATT_UPDATE = "update"
#: Edition attribute of CPE Name that captures the edition-related terms
#: applied by the vendor to the product
ATT_EDITION = "edition"
#: Language attribute of CPE Name that defines the language supported
#: in the user interface of the product being described
ATT_LANGUAGE = "language"
# Attributes associated with components of version 2.3 of CPE
#: SW_edition attribute of version 2.3 of CPE Name that characterizes
#: how the product is tailored to a particular market or class of
#: end users
ATT_SW_EDITION = "sw_edition"
#: Target_SW attribute of version 2.3 of CPE Name that characterizes the
#: software computing environment within which the product operates
ATT_TARGET_SW = "target_sw"
#: Target_HW attribute of version 2.3 of CPE Name that characterizes the
#: instruction set architecture (e.g., x86) on which the product being
#: described or identified by the WFN operates
ATT_TARGET_HW = "target_hw"
#: Other attribute of version 2.3 of CPE Name that capture any other
#: general descriptive or identifying information which is vendor-
#: or product-specific and which does not logically fit in any other
#: attribute value
ATT_OTHER = "other"
#: List of attribute names associated with CPE Name components
#: (versions 1.1 and 2.2 of CPE specification)
CPE_COMP_KEYS = (ATT_PART,
ATT_VENDOR,
ATT_PRODUCT,
ATT_VERSION,
ATT_UPDATE,
ATT_EDITION,
ATT_LANGUAGE)
#: List of attribute names associated with CPE Name components
#: of version 2.3
CPE_COMP_KEYS_EXTENDED = (ATT_PART,
ATT_VENDOR,
ATT_PRODUCT,
ATT_VERSION,
ATT_UPDATE,
ATT_EDITION,
ATT_LANGUAGE,
ATT_SW_EDITION,
ATT_TARGET_SW,
ATT_TARGET_HW,
ATT_OTHER)
# Possible values of "part" attribute of CPE (type of system)
#: Value of part attribute associated with a hardware system
VALUE_PART_HW = "h"
#: Value of part attribute associated with an operating system
VALUE_PART_OS = "o"
#: Value of part attribute associated with an application
VALUE_PART_APP = "a"
#: Value of part attribute that indicates a CPE Name with
#: undefined type of system
VALUE_PART_UNDEFINED = "u"
#: Possible values of a type of system in CPE specification:
#: hardware, operating system, software and undefined
SYSTEM_VALUES = (VALUE_PART_HW,
VALUE_PART_OS,
VALUE_PART_APP,
VALUE_PART_UNDEFINED)
###############
# VARIABLES #
###############
#: Order of attributes of CPE Name components
ordered_comp_parts = {0: ATT_PART,
1: ATT_VENDOR,
2: ATT_PRODUCT,
3: ATT_VERSION,
4: ATT_UPDATE,
5: ATT_EDITION,
6: ATT_LANGUAGE,
7: ATT_SW_EDITION,
8: ATT_TARGET_SW,
9: ATT_TARGET_HW,
10: ATT_OTHER}
###################
# CLASS METHODS #
###################
@classmethod
def is_valid_attribute(cls, att_name):
"""
Check if input attribute name is correct.
:param string att_name: attribute name to check
:returns: True is attribute name is valid, otherwise False
:rtype: boolean
TEST: a wrong attribute
>>> from .cpecomp import CPEComponent
>>> att = CPEComponent.ATT_PRODUCT
>>> CPEComponent.is_valid_attribute(att)
True
"""
return att_name in CPEComponent.CPE_COMP_KEYS_EXTENDED
####################
# OBJECT METHODS #
####################
def __contains__(self, item):
"""
Returns True if item is included in set of values of self.
:param CPEComponent item: component to find in self
:returns: True if item is included in set of self, otherwise False
:rtype: boolean
"""
from .cpecomp_undefined import CPEComponentUndefined
from .cpecomp_empty import CPEComponentEmpty
from .cpecomp_anyvalue import CPEComponentAnyValue
if ((self == item) or
isinstance(self, CPEComponentUndefined) or
isinstance(self, CPEComponentEmpty) or
isinstance(self, CPEComponentAnyValue)):
return True
return False
def __eq__(self, other):
"""
Returns True if other (first element of operation) and
self (second element of operation) are equal components,
false otherwise.
:param CPEComponent other: component to compare
:returns: True if other == self, False otherwise
:rtype: boolean
"""
len_self = len(self._standard_value)
len_other = len(other._standard_value)
if isinstance(self._standard_value, list):
# Self is version 1.1 of CPE Name
if isinstance(other._standard_value, list):
# Other is version 1.1 of CPE Name
value_self = self._standard_value
value_other = other._standard_value
# Other is higher version than to 1.1 of CPE Name
elif len_self == 1:
value_self = self._standard_value[0]
value_other = other._standard_value
else:
# The comparation between components is impossible
return False
else:
# Self is higher version than 1.1 of CPE Name
if isinstance(other._standard_value, list):
# Other is version 1.1 of CPE Name
if len_other == 1:
value_self = self._standard_value
value_other = other._standard_value[0]
else:
# The comparation between components is impossible
return False
else:
value_self = self._standard_value
value_other = other._standard_value
return ((value_self == value_other) and
(self._is_negated == other._is_negated))
def __init__(self, comp_str):
"""
Store the value of component.
:param string comp_str: value of component value
:returns: None
"""
self._is_negated = False
self._encoded_value = comp_str
self._standard_value = [comp_str]
def __ne__(self, other):
"""
Returns True if other (first element of operation) and
self (second element of operation) are not equal components,
false otherwise.
:param CPEComponent other: component to compare
:returns: True if other != self, False otherwise
:rtype: boolean
"""
return not (self == other)
def __repr__(self):
"""
Returns a unambiguous representation of CPE component.
:returns: Representation of CPE component as string
:rtype: string
"""
return "{0}()".format(self.__class__.__name__)
if __name__ == "__main__":
import doctest
doctest.testmod()
doctest.testfile('../tests/testfile_cpecomp.txt')
| lgpl-3.0 | -875,874,535,469,048,000 | 32.182109 | 79 | 0.59378 | false |
nagyistoce/koalacloud | boto/ec2/instanceinfo.py | 42 | 1912 | # Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class InstanceInfo(object):
"""
Represents an EC2 Instance status response from CloudWatch
"""
def __init__(self, connection=None, id=None, state=None):
"""
:ivar str id: The instance's EC2 ID.
:ivar str state: Specifies the current status of the instance.
"""
self.connection = connection
self.id = id
self.state = state
def __repr__(self):
return 'InstanceInfo:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId' or name == 'InstanceId':
self.id = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
| apache-2.0 | -6,542,789,492,319,992,000 | 36.490196 | 74 | 0.680439 | false |
splantefeve/systemd | src/test/test-systemd-tmpfiles.py | 21 | 5410 | #!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1+
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
import os
import sys
import socket
import subprocess
import tempfile
import pwd
import grp
try:
from systemd import id128
except ImportError:
id128 = None
EX_DATAERR = 65 # from sysexits.h
EXIT_TEST_SKIP = 77
try:
subprocess.run
except AttributeError:
sys.exit(EXIT_TEST_SKIP)
exe_with_args = sys.argv[1:]
def test_line(line, *, user, returncode=EX_DATAERR, extra={}):
args = ['--user'] if user else []
print('Running {} on {!r}'.format(' '.join(exe_with_args + args), line))
c = subprocess.run(exe_with_args + ['--create', '-'] + args,
input=line, stdout=subprocess.PIPE, universal_newlines=True,
**extra)
assert c.returncode == returncode, c
def test_invalids(*, user):
test_line('asdfa', user=user)
test_line('f "open quote', user=user)
test_line('f closed quote""', user=user)
test_line('Y /unknown/letter', user=user)
test_line('w non/absolute/path', user=user)
test_line('s', user=user) # s is for short
test_line('f!! /too/many/bangs', user=user)
test_line('f++ /too/many/plusses', user=user)
test_line('f+!+ /too/many/plusses', user=user)
test_line('f!+! /too/many/bangs', user=user)
test_line('w /unresolved/argument - - - - "%Y"', user=user)
test_line('w /unresolved/argument/sandwich - - - - "%v%Y%v"', user=user)
test_line('w /unresolved/filename/%Y - - - - "whatever"', user=user)
test_line('w /unresolved/filename/sandwich/%v%Y%v - - - - "whatever"', user=user)
test_line('w - - - - - "no file specified"', user=user)
test_line('C - - - - - "no file specified"', user=user)
test_line('C non/absolute/path - - - - -', user=user)
test_line('b - - - - - -', user=user)
test_line('b 1234 - - - - -', user=user)
test_line('c - - - - - -', user=user)
test_line('c 1234 - - - - -', user=user)
test_line('t - - -', user=user)
test_line('T - - -', user=user)
test_line('a - - -', user=user)
test_line('A - - -', user=user)
test_line('h - - -', user=user)
test_line('H - - -', user=user)
def test_uninitialized_t():
if os.getuid() == 0:
return
test_line('w /foo - - - - "specifier for --user %t"',
user=True, returncode=0, extra={'env':{}})
def test_content(line, expected, *, user, extra={}):
d = tempfile.TemporaryDirectory(prefix='test-systemd-tmpfiles.')
arg = d.name + '/arg'
spec = line.format(arg)
test_line(spec, user=user, returncode=0, extra=extra)
content = open(arg).read()
print('expect: {!r}\nactual: {!r}'.format(expected, content))
assert content == expected
def test_valid_specifiers(*, user):
test_content('f {} - - - - two words', 'two words', user=user)
if id128:
try:
test_content('f {} - - - - %m', '{}'.format(id128.get_machine().hex), user=user)
except AssertionError as e:
print(e)
print('/etc/machine-id: {!r}'.format(open('/etc/machine-id').read()))
print('/proc/cmdline: {!r}'.format(open('/proc/cmdline').read()))
print('skipping')
test_content('f {} - - - - %b', '{}'.format(id128.get_boot().hex), user=user)
test_content('f {} - - - - %H', '{}'.format(socket.gethostname()), user=user)
test_content('f {} - - - - %v', '{}'.format(os.uname().release), user=user)
test_content('f {} - - - - %U', '{}'.format(os.getuid()), user=user)
test_content('f {} - - - - %G', '{}'.format(os.getgid()), user=user)
puser = pwd.getpwuid(os.getuid())
test_content('f {} - - - - %u', '{}'.format(puser.pw_name), user=user)
pgroup = grp.getgrgid(os.getgid())
test_content('f {} - - - - %g', '{}'.format(pgroup.gr_name), user=user)
# Note that %h is the only specifier in which we look the environment,
# because we check $HOME. Should we even be doing that?
home = os.path.expanduser("~")
test_content('f {} - - - - %h', '{}'.format(home), user=user)
xdg_runtime_dir = os.getenv('XDG_RUNTIME_DIR')
if xdg_runtime_dir is not None or not user:
test_content('f {} - - - - %t',
xdg_runtime_dir if user else '/run',
user=user)
xdg_config_home = os.getenv('XDG_CONFIG_HOME')
if xdg_config_home is not None or not user:
test_content('f {} - - - - %S',
xdg_config_home if user else '/var/lib',
user=user)
xdg_cache_home = os.getenv('XDG_CACHE_HOME')
if xdg_cache_home is not None or not user:
test_content('f {} - - - - %C',
xdg_cache_home if user else '/var/cache',
user=user)
if xdg_config_home is not None or not user:
test_content('f {} - - - - %L',
xdg_config_home + '/log' if user else '/var/log',
user=user)
test_content('f {} - - - - %%', '%', user=user)
if __name__ == '__main__':
test_invalids(user=False)
test_invalids(user=True)
test_uninitialized_t()
test_valid_specifiers(user=False)
test_valid_specifiers(user=True)
| gpl-2.0 | 1,773,790,200,876,041,500 | 36.832168 | 92 | 0.569131 | false |
oliverlee/sympy | sympy/diffgeom/tests/test_hyperbolic_space.py | 32 | 2582 | '''
unit test describing the hyperbolic half-plane with the Poincare metric. This
is a basic model of hyperbolic geometry on the (positive) half-space
{(x,y) \in R^2 | y > 0}
with the Riemannian metric
ds^2 = (dx^2 + dy^2)/y^2
It has constant negative scalar curvature = -2
https://en.wikipedia.org/wiki/Poincare_half-plane_model
'''
from sympy import diag
from sympy.diffgeom import (twoform_to_matrix,
metric_to_Christoffel_1st, metric_to_Christoffel_2nd,
metric_to_Riemann_components, metric_to_Ricci_components)
import sympy.diffgeom.rn
from sympy.tensor.array import ImmutableDenseNDimArray
def test_H2():
TP = sympy.diffgeom.TensorProduct
R2 = sympy.diffgeom.rn.R2
y = R2.y
dy = R2.dy
dx = R2.dx
g = (TP(dx, dx) + TP(dy, dy))*y**(-2)
automat = twoform_to_matrix(g)
mat = diag(y**(-2), y**(-2))
assert mat == automat
gamma1 = metric_to_Christoffel_1st(g)
assert gamma1[0, 0, 0] == 0
assert gamma1[0, 0, 1] == -y**(-3)
assert gamma1[0, 1, 0] == -y**(-3)
assert gamma1[0, 1, 1] == 0
assert gamma1[1, 1, 1] == -y**(-3)
assert gamma1[1, 1, 0] == 0
assert gamma1[1, 0, 1] == 0
assert gamma1[1, 0, 0] == y**(-3)
gamma2 = metric_to_Christoffel_2nd(g)
assert gamma2[0, 0, 0] == 0
assert gamma2[0, 0, 1] == -y**(-1)
assert gamma2[0, 1, 0] == -y**(-1)
assert gamma2[0, 1, 1] == 0
assert gamma2[1, 1, 1] == -y**(-1)
assert gamma2[1, 1, 0] == 0
assert gamma2[1, 0, 1] == 0
assert gamma2[1, 0, 0] == y**(-1)
Rm = metric_to_Riemann_components(g)
assert Rm[0, 0, 0, 0] == 0
assert Rm[0, 0, 0, 1] == 0
assert Rm[0, 0, 1, 0] == 0
assert Rm[0, 0, 1, 1] == 0
assert Rm[0, 1, 0, 0] == 0
assert Rm[0, 1, 0, 1] == -y**(-2)
assert Rm[0, 1, 1, 0] == y**(-2)
assert Rm[0, 1, 1, 1] == 0
assert Rm[1, 0, 0, 0] == 0
assert Rm[1, 0, 0, 1] == y**(-2)
assert Rm[1, 0, 1, 0] == -y**(-2)
assert Rm[1, 0, 1, 1] == 0
assert Rm[1, 1, 0, 0] == 0
assert Rm[1, 1, 0, 1] == 0
assert Rm[1, 1, 1, 0] == 0
assert Rm[1, 1, 1, 1] == 0
Ric = metric_to_Ricci_components(g)
assert Ric[0, 0] == -y**(-2)
assert Ric[0, 1] == 0
assert Ric[1, 0] == 0
assert Ric[0, 0] == -y**(-2)
assert Ric == ImmutableDenseNDimArray([-y**(-2), 0, 0, -y**(-2)], (2, 2))
## scalar curvature is -2
#TODO - it would be nice to have index contraction built-in
R = (Ric[0, 0] + Ric[1, 1])*y**2
assert R == -2
## Gauss curvature is -1
assert R/2 == -1
| bsd-3-clause | 4,924,178,747,862,059,000 | 27.373626 | 85 | 0.538342 | false |
lichengshuang/python | others/163music/config.py | 1 | 2295 | VERSION = "1.0.0"
SESSION_EN = True # 开启session回话请求模式
CELLPHONE_LOGIN_PARAM = {
"phone":"",
"password":"",
"rememberLogin":True
}
DISCOVER = ["playlist", "toplist", "artist", "album"]
DISCOVER_INFO = {
"playlist":{},
"toplist":{},
"artist":{
"top":{
"url":"http://music.163.com/weapi/artist/top?csrf_token=",
"param":{
}
},
},
"album":{}
}
COMMENTS_TYPE = ["A_PL_0","R_SO_4"] # 0-歌单评论列表 1-歌曲评论列表
DISCOVER_PLAYLIST_LINK = "http://music.163.com/discover/playlist/" # 获取网易云音乐的链接 发现音乐-歌单
PLAYLIST_LINK = "http://music.163.com/playlist/" # 网易云音乐歌单列表
TOPLIST_LINK = "http://music.163.com/discover/toplist?id=19723756"
COMMENTS_LINK = "http://music.163.com/weapi/v1/resource/comments/" # 评论链接
SEARCH_SUGGEST_LINK = "http://music.163.com/weapi/search/suggest/web?csrf_token=" # 搜索链接
SEARCH_LINK = "http://music.163.com/weapi/cloudsearch/get/web?csrf_token="
COMMENTS_PARAM = { # 评论post数据格式
"rid": "",
"offset": "0",
"total": "true",
"limit": "20",
"csrf_token": ""
}
COMMENTS_PARAM_LIMIT_SIZE = {"min":1, "max":100} # 评论post数据limit限制
PLAYLIST_PARAM = { # 发现音乐-歌单post数据格式
"order":"hot",
"cat":"全部",
"limit":35,
"offset":0
}
PLAYLIST_PARAM_LIMIT_SIZE = {"min":1, "max":100} # 发现音乐-歌单post数据格式limit限制
SEARCH_SUGGEST_PARAM = {
"s":"",
"limit":"",
"csrf_token":"",
}
SEARCH_PARAM = {
"hlposttag":"</span>",
"hlpretag":'<span class="s-fc7">',
"limit":"30",
"offset":"0",
"s":"",
"total":"true",
"type":"1",
"csrf_token":"",
}
HEADERS = {
"accept":"*/*",
"accept-encoding":"gzip, deflate",
"accept-language":"zh-cn,zh;q=0.8",
"cache-control":"max-age=0",
"connection":"keep-alive",
"host":"music.163.com",
"upgrade-insecure-requests":"1",
"user-agent":'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtmL, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
}
| apache-2.0 | -7,869,895,987,362,727,000 | 27.226667 | 135 | 0.53897 | false |
skirsdeda/cmsplugin-filer | cmsplugin_filer_file/migrations/0004_fix_table_names.py | 10 | 9724 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from cmsplugin_filer_utils.migration import rename_tables_new_to_old, rename_tables_old_to_new
class Migration(SchemaMigration):
cms_plugin_table_mapping = (
# (old_name, new_name),
('cmsplugin_filerfile', 'cmsplugin_filer_file_filerfile'),
)
needed_by = (
("cms", "0069_static_placeholder_permissions"),
)
def forwards(self, orm):
rename_tables_old_to_new(db, self.cms_plugin_table_mapping)
def backwards(self, orm):
rename_tables_new_to_old(db, self.cms_plugin_table_mapping)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'cmsplugin_filer_file.filerfile': {
'Meta': {'object_name': 'FilerFile', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['filer.File']"}),
'style': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'target_blank': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmsplugin_filer_file']
| bsd-3-clause | 4,347,537,131,040,272,400 | 76.792 | 195 | 0.559955 | false |
q1ang/seaborn | seaborn/algorithms.py | 35 | 6889 | """Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numpy as np
from scipy import stats
from .external.six.moves import range
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
smooth : bool, default False
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate); only works for one-dimensional inputs and cannot
be used `units` is present.
func : callable, default np.mean
Function to call on the args that are passed in.
random_seed : int | None, default None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
smooth = kwargs.get("smooth", False)
random_seed = kwargs.get("random_seed", None)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rs = np.random.RandomState(random_seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Do the bootstrap
if smooth:
return _smooth_bootstrap(args, n_boot, func, func_kwargs)
if units is not None:
return _structured_bootstrap(args, n_boot, units, func,
func_kwargs, rs)
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _smooth_bootstrap(args, n_boot, func, func_kwargs):
"""Bootstrap by resampling from a kernel density estimate."""
n = len(args[0])
boot_dist = []
kde = [stats.gaussian_kde(np.transpose(a)) for a in args]
for i in range(int(n_boot)):
sample = [a.resample(n).T for a in kde]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def randomize_corrmat(a, tail="both", corrected=True, n_iter=1000,
random_seed=None, return_dist=False):
"""Test the significance of set of correlations with permutations.
By default this corrects for multiple comparisons across one side
of the matrix.
Parameters
----------
a : n_vars x n_obs array
array with variables as rows
tail : both | upper | lower
whether test should be two-tailed, or which tail to integrate over
corrected : boolean
if True reports p values with respect to the max stat distribution
n_iter : int
number of permutation iterations
random_seed : int or None
seed for RNG
return_dist : bool
if True, return n_vars x n_vars x n_iter
Returns
-------
p_mat : float
array of probabilites for actual correlation from null CDF
"""
if tail not in ["upper", "lower", "both"]:
raise ValueError("'tail' must be 'upper', 'lower', or 'both'")
rs = np.random.RandomState(random_seed)
a = np.asarray(a, np.float)
flat_a = a.ravel()
n_vars, n_obs = a.shape
# Do the permutations to establish a null distribution
null_dist = np.empty((n_vars, n_vars, n_iter))
for i_i in range(n_iter):
perm_i = np.concatenate([rs.permutation(n_obs) + (v * n_obs)
for v in range(n_vars)])
a_i = flat_a[perm_i].reshape(n_vars, n_obs)
null_dist[..., i_i] = np.corrcoef(a_i)
# Get the observed correlation values
real_corr = np.corrcoef(a)
# Figure out p values based on the permutation distribution
p_mat = np.zeros((n_vars, n_vars))
upper_tri = np.triu_indices(n_vars, 1)
if corrected:
if tail == "both":
max_dist = np.abs(null_dist[upper_tri]).max(axis=0)
elif tail == "lower":
max_dist = null_dist[upper_tri].min(axis=0)
elif tail == "upper":
max_dist = null_dist[upper_tri].max(axis=0)
cdf = lambda x: stats.percentileofscore(max_dist, x) / 100.
for i, j in zip(*upper_tri):
observed = real_corr[i, j]
if tail == "both":
p_ij = 1 - cdf(abs(observed))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
else:
for i, j in zip(*upper_tri):
null_corrs = null_dist[i, j]
cdf = lambda x: stats.percentileofscore(null_corrs, x) / 100.
observed = real_corr[i, j]
if tail == "both":
p_ij = 2 * (1 - cdf(abs(observed)))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
# Make p matrix symettrical with nans on the diagonal
p_mat += p_mat.T
p_mat[np.diag_indices(n_vars)] = np.nan
if return_dist:
return p_mat, null_dist
return p_mat
| bsd-3-clause | -4,729,349,026,308,521,000 | 32.769608 | 79 | 0.590361 | false |
ValentmSTEM/gps_v2 | tornado/test/import_test.py | 9 | 1720 | import unittest
class ImportTest(unittest.TestCase):
def test_import_everything(self):
# Some of our modules are not otherwise tested. Import them
# all (unless they have external dependencies) here to at
# least ensure that there are no syntax errors.
import tornado.auth
import tornado.autoreload
# import tornado.curl_httpclient # depends on pycurl
# import tornado.database # depends on MySQLdb
import tornado.escape
import tornado.httpclient
import tornado.httpserver
import tornado.httputil
import tornado.ioloop
import tornado.iostream
import tornado.locale
import tornado.options
import tornado.netutil
# import tornado.platform.twisted # depends on twisted
import tornado.process
import tornado.simple_httpclient
import tornado.stack_context
import tornado.template
import tornado.testing
import tornado.util
import tornado.web
import tornado.websocket
import tornado.wsgi
# for modules with dependencies, if those dependencies can be loaded,
# load them too.
def test_import_pycurl(self):
try:
import pycurl
except ImportError:
pass
else:
import tornado.curl_httpclient
def test_import_mysqldb(self):
try:
import MySQLdb
except ImportError:
pass
else:
import tornado.database
def test_import_twisted(self):
try:
import twisted
except ImportError:
pass
else:
import tornado.platform.twisted
| mit | -7,956,158,265,615,655,000 | 29.175439 | 73 | 0.623256 | false |
ronaldahmed/robot-navigation | neural-navigation-with-lstm/MARCO/Sense.py | 2 | 26222 | #!/usr/bin/env python
import os, string
from nltk.probability import ConditionalFreqDist
from nltk.tagger import BackoffTagger,DefaultTagger,UnigramTagger,tagger_accuracy
#,TaggerI,SequentialTagger,NthOrderTagger,RegexpTagger
from nltk.tagger.brill import BrillTagger, FastBrillTaggerTrainer
from nltk.tagger.brill import SymmetricProximateTokensTemplate,ProximateTokensTemplate,ProximateTokensRule
from nltk.token import Token
from nltk.tree import Tree
from nltk.stemmer.porter import PorterStemmer
from nltk.featurestructure import FeatureStructure
from nltk_contrib import pywordnet
import enchant
from Senses import Senses, Senses2, Senses3
from DirectionCorpus import printDirs,constructItemRegexp,DirectionCorpusReader,saveParse
from Options import Options
from Utility import logger, lstail
pywordnet.setCacheCapacity(100)
Lexicon = 'Directions/Lexicon2.lex'
Corpus2 = True
if Corpus2: Senses.update(Senses2)
Corpus3 = True
if Corpus3: Senses.update(Senses3)
class ProximateSensesRule(ProximateTokensRule):
PROPERTY_NAME = 'sense' # for printing.
TAG='SENSE'
def extract_property(token): # [staticmethod]
"""@return: The given token's C{SENSE} property."""
return token['SENSE']
extract_property = staticmethod(extract_property)
class ProximateStemsRule(ProximateTokensRule):
PROPERTY_NAME = 'stem' # for printing.
TAG='STEM'
def extract_property(token): # [staticmethod]
"""@return: The given token's C{STEM} property."""
return token['STEM']
extract_property = staticmethod(extract_property)
class ProximateSensesTemplate(ProximateTokensTemplate):
TAG='SENSE'
class SymmetricProximateSensesTemplate(SymmetricProximateTokensTemplate):
def __init__(self, rule_class, *boundaries):
self._ptt1 = ProximateSensesTemplate(rule_class, *boundaries)
reversed = [(-e,-s) for (s,e) in boundaries]
self._ptt2 = ProximateSensesTemplate(rule_class, *reversed)
class SurfaceSemanticsStructure(FeatureStructure):
"""
A class of C{FeatureStructure} to represent surface semantics.
"""
def __setitem__(self, name, value):
if type(name) == str:
self._features[name] = value
elif type(name) == tuple:
if len(name) == 1:
self._features[name[0]] = value
else:
self._features[name[0]][name[1:]] = value
else: raise TypeError
def _repr(self, reentrances, reentrance_ids):
if 'MEAN' in self._features: return "[MEAN="+repr(self['MEAN'])+"]"
else: return FeatureStructure._repr(self, reentrances, reentrance_ids)
def add(self,key,value):
if key in self.feature_names():
val = self[key]
if isinstance(val,list) and isinstance(value,list):
val.extend(value)
elif isinstance(val,list):
val.append(value)
elif isinstance(value,list):
value.append(val)
self[key] = value
else: self[key] = [val, value]
else:
self[key] = value
def has_key(self,key): return self._features.has_key(key)
def getindex(self,name):
if not isinstance(self._features[name],FeatureStructure):
return None
childFeatures = self._features[name]._features
if 'INDEX' in childFeatures:
return self[name]['INDEX']
for feature in childFeatures:
index = self._features[name].getindex(feature)
if index is not None: return index
if '_' in name and name[-1] in string.digits:
return int(name.split('_')[-1])
return None
def sorted_features(self):
indices = [(self.getindex(name),name) for name in self._features if name != 'INDEX']
indices.sort()
return [name for (index,name) in indices]
def copydict(self,d):
self._features.update(d)
def deepcopy(self, memo=None):
"""
@return: a new copy of this surface semantics structure.
@param memo: The memoization dictionary, which should
typically be left unspecified.
"""
# Check the memoization dictionary.
if memo is None: memo = {}
memo_copy = memo.get(id(self))
if memo_copy is not None: return memo_copy
# Create a new copy. Do this *before* we fill out its
# features, in case of cycles.
newcopy = SurfaceSemanticsStructure()
memo[id(self)] = newcopy
features = newcopy._features
# Fill out the features.
for (fname, fval) in self._features.items():
if isinstance(fval, FeatureStructure):
features[fname] = fval.deepcopy(memo)
else:
features[fname] = fval
return newcopy
def parse(cls,s):
"""
Same as FeatureStructure.parse, but a classmethod,
so it will return the subclass.
Convert a string representation of a feature structure (as
displayed by repr) into a C{FeatureStructure}. This parse
imposes the following restrictions on the string
representation:
- Feature names cannot contain any of the following:
whitespace, parentheses, quote marks, equals signs,
dashes, and square brackets.
- Only the following basic feature value are supported:
strings, integers, variables, C{None}, and unquoted
alphanumeric strings.
- For reentrant values, the first mention must specify
a reentrance identifier and a value; and any subsequent
mentions must use arrows (C{'->'}) to reference the
reentrance identifier.
"""
try:
value, position = cls._parse(s, 0, {})
except ValueError, e:
estr = ('Error parsing field structure\n\n ' +
s + '\n ' + ' '*e.args[1] + '^ ' +
'Expected %s\n' % e.args[0])
raise ValueError, estr
if position != len(s): raise ValueError()
return value
def _parse(cls, s, position=0, reentrances=None):
"""
Same as FeatureStructure._parse, but a classmethod,
so it will return the subclass.
Helper function that parses a feature structure.
@param s: The string to parse.
@param position: The position in the string to start parsing.
@param reentrances: A dictionary from reentrance ids to values.
@return: A tuple (val, pos) of the feature structure created
by parsing and the position where the parsed feature
structure ends.
"""
# A set of useful regular expressions (precompiled)
_PARSE_RE = cls._PARSE_RE
# Check that the string starts with an open bracket.
if s[position] != '[': raise ValueError('open bracket', position)
position += 1
# If it's immediately followed by a close bracket, then just
# return an empty feature structure.
match = _PARSE_RE['bracket'].match(s, position)
if match is not None: return cls(), match.end()
# Build a list of the features defined by the structure.
# Each feature has one of the three following forms:
# name = value
# name (id) = value
# name -> (target)
features = {}
while position < len(s):
# Use these variables to hold info about the feature:
name = id = target = val = None
# Find the next feature's name.
match = _PARSE_RE['name'].match(s, position)
if match is None: raise ValueError('feature name', position)
name = match.group(1)
position = match.end()
# Check for a reentrance link ("-> (target)")
match = _PARSE_RE['reentrance'].match(s, position)
if match is not None:
position = match.end()
match = _PARSE_RE['ident'].match(s, position)
if match is None: raise ValueError('identifier', position)
target = match.group(1)
position = match.end()
try: features[name] = reentrances[target]
except: raise ValueError('bound identifier', position)
# If it's not a reentrance link, it must be an assignment.
else:
match = _PARSE_RE['assign'].match(s, position)
if match is None: raise ValueError('equals sign', position)
position = match.end()
# Find the feature's id (if specified)
match = _PARSE_RE['ident'].match(s, position)
if match is not None:
id = match.group(1)
if reentrances.has_key(id):
raise ValueError('new identifier', position+1)
position = match.end()
val, position = cls._parseval(s, position, reentrances)
features[name] = val
if id is not None:
reentrances[id] = val
# Check for a close bracket
match = _PARSE_RE['bracket'].match(s, position)
if match is not None:
return cls(**features), match.end()
# Otherwise, there should be a comma
match = _PARSE_RE['comma'].match(s, position)
if match is None: raise ValueError('comma', position)
position = match.end()
# We never saw a close bracket.
raise ValueError('close bracket', position)
def _parseval(cls, s, position, reentrances):
"""
Same as FeatureStructure._parseval, but a classmethod,
so it will return the subclass.
Helper function that parses a feature value. Currently
supports: None, integers, variables, strings, nested feature
structures.
@param s: The string to parse.
@param position: The position in the string to start parsing.
@param reentrances: A dictionary from reentrance ids to values.
@return: A tuple (val, pos) of the value created by parsing
and the position where the parsed value ends.
"""
# A set of useful regular expressions (precompiled)
_PARSE_RE = cls._PARSE_RE
# End of string (error)
if position == len(s): raise ValueError('value', position)
# String value
if s[position] in "'\"":
start = position
quotemark = s[position:position+1]
position += 1
while 1:
match = _PARSE_RE['stringmarker'].search(s, position)
if not match: raise ValueError('close quote', position)
position = match.end()
if match.group() == '\\': position += 1
elif match.group() == quotemark:
return eval(s[start:position]), position
# Nested feature structure
if s[position] == '[':
return cls._parse(s, position, reentrances)
# Variable
match = _PARSE_RE['var'].match(s, position)
if match is not None:
return FeatureVariable.parse(match.group()), match.end()
# None
match = _PARSE_RE['none'].match(s, position)
if match is not None:
return None, match.end()
# Integer value
match = _PARSE_RE['int'].match(s, position)
if match is not None:
return int(match.group()), match.end()
# Alphanumeric symbol (must be checked after integer)
match = _PARSE_RE['symbol'].match(s, position)
if match is not None:
return match.group(), match.end()
# We don't know how to parse this value.
raise ValueError('value', position)
_parseval=classmethod(_parseval)
_parse=classmethod(_parse)
parse=classmethod(parse)
class SSS(SurfaceSemanticsStructure):
"""Alias for SurfaceSemanticsStructure"""
def tree2frame(Dirs, index = 0, parent = ''):
"""
@return: content frame representation of the surface semantics of the parse tree.
@rtype: C{SurfaceSemanticsStructure}
@return proposition name
@rtype: C{str}
@return index
@rtype: C{int}
"""
Frame = SurfaceSemanticsStructure()
if isinstance(Dirs,Tree):
Prop = Dirs.node.capitalize()
hasSubTree = True in [isinstance(child,Tree) for child in Dirs]
else: Prop = None
if isinstance(Dirs,Tree) and hasSubTree:
for i,child in enumerate(Dirs):
value,prop,index = tree2frame(child,index+1,Dirs.node.capitalize())
filed = False # Account for children with the same names
if value and prop:
prop_name = prop
while not filed:
if not Frame.has_key(prop):
Frame[prop] = value
filed = True
else:
prop= prop_name+'_'+str(i)
elif value:
Frame1 = Frame.unify(value)
if Frame1: Frame = Frame1
else:
while not filed:
if not Frame.has_key('SubFrame'+'_'+str(index)):
Frame['SubFrame'+'_'+str(index)] = value
filed = True
elif ((isinstance(Dirs,Tree) and not hasSubTree and Dirs)
or isinstance(Dirs,Token)):
index += 1
if isinstance(Dirs,Token): token = Dirs
if isinstance(Dirs,Tree):
token = Token(TEXT=' '.join([child['TEXT'] for child in Dirs]))
parent = Dirs.node.capitalize()
Frame['TEXT'] = token['TEXT']
Frame['MEAN'] = extractSurfaceSemantics(token,parent)
Frame['INDEX']=index
return Frame,Prop,index
def trees2frames(Trees):
return [tree2frame(tree)[0] for tree in Trees]
def saveFrame(frame_list,filename,directory='Directions/ContentFrames/',prefix='ContentFrame-'):
"""
@param parse_list: List of content frames
@type parse_list: C{list}
@param directory: name of the directory to save the parses into
@type parse_list: C{str}
"""
filename = prefix+filename.split('/')[-1].split('-')[-1]
fullpath = os.path.join(directory,filename)
if os.path.isfile(fullpath): os.rename(fullpath,fullpath+'~')
file = open(fullpath,'w')
file.write('\n'.join([repr(d) for d in frame_list]))
file.write('\n')
file.close()
def getPartOfSpeech(token,parent):
POS = ''
if token.has_key('SENSE') or parent: # My tags
if token.has_key('SENSE'): Sense = token['SENSE']
else: Sense = parent
if Sense.endswith('_n') or Sense in ('Dist_unit', 'Struct_type'): POS='N'
elif Sense.endswith('_v') and Sense != 'Aux_v': POS='V'
elif Sense.endswith('_p'): POS='P'
elif Sense in ('Appear','Count','Reldist','Structural','Order_adj', 'Obj_adj'): POS='ADJ'
elif Sense in ('Dir','Adv'): POS='ADV'
else: return Sense
elif token.has_key('TAG'): # Penn Treebank
if token['TAG'].startswith('NN'): POS='N'
elif token['TAG'].startswith('VB'): POS='V'
elif token['TAG'].startswith('JJ') or token['TAG'].startswith('CD'): POS='ADJ'
elif token['TAG'].startswith('RB'): POS='ADV'
elif token['TAG'].startswith('IN'): POS='P'
else: return token['TAG']
return POS
def findMissingSenses():
for k,v in Senses.items():
for pos,senseList in v.items():
for s in senseList:
try:
if pos!='see': pywordnet.getSense(k,pos,s-1)
except (KeyError,TypeError),err: # Inflected form
logger.errror('Trying inflected form b/c of Error %s',err)
logger.error('%s',pywordnet.getSense(s[0],pos,s[1][0]-1))
except: logger.error('Cannot find %s, %s, %s', k,pos,s)
def printSurfaceSemantics(text,POS,senses):
if isinstance(senses,str): return '_'.join([text,POS,senses])
return '_'.join([text,POS,','.join([str(i) for i in senses])])
def splitSurfaceSemantics(sss_str):
if '_' not in sss_str: return []
sss_str = sss_str.replace('[','')
sss_str = sss_str.replace(']','')
text,POS,senses = sss_str.split('_')
if '(' in senses: # Handle 'see' redirection
text,senses = senses[2:-1].split('\', ')
senses = senses.split(',')
return text,POS,senses
def parseSurfaceSemantics(sss_str):
if '_' not in sss_str: return []
text,POS,senses = splitSurfaceSemantics(sss_str)
try:
return [pywordnet.getWord(text,POS).getSenses()[int(s)-1] for s in senses]
except (IndexError,KeyError):
sense = None
for altPOS in ('N','V','ADJ','ADV'):
if altPOS == POS: continue
try:
return [pywordnet.getWord(text,POS).getSenses()[int(s)-1] for s in senses]
except (IndexError,KeyError): pass
return []
def extractSurfaceSemantics(token,parent):
global Senses
POS=getPartOfSpeech(token,parent)
tokenSenses = {}
text = token['TEXT'].lower()
default = token['TEXT'].upper()
if POS in ['N', 'V', 'ADV', 'ADJ']:
try: #Redo as test = foo while not tokenSensesword: try: foo ; except KeyError: foo = next foo
tokenSenses = Senses[text]
except KeyError:
logger.warning('extractSurfaceSemantics : Text not in tagged senses: %s', text)
try:
#logger.warning('extractSurfaceSemantics : Previously unseen word but in WordNet?: %s', text)
# stringified range of possible senses without spaces
tokenSenses = {POS : range(1,len(pywordnet.getWord(text,POS).getSenses())+1)}
except KeyError:
try:
logger.warning('extractSurfaceSemantics : Inflected version of WordNet word? %s', text)
if text.endswith('s'):
text = text[:-1]
tokenSenses = Senses[text]
else:
stemmer = PorterStemmer() # Update WordNetStemmer to NLTK 1.4 API
stemmer.stem(token)
text = token['STEM']
tokenSenses = Senses[text]
except KeyError:
text = token['TEXT'].lower()
try:
logger.warning('extractSurfaceSemantics : Misspelling / typo of WordNet word? %s', text)
spellchecker = enchant.DictWithPWL('en_US', Lexicon)
s = ''
for s in spellchecker.suggest(text):
if s in Senses:
tokenSenses = Senses[s]
break
if not tokenSenses and spellchecker.suggest(text):
s = spellchecker.suggest(text)[0]
tokenSenses = {POS : range(1,len(pywordnet.getWord(s,POS).getSenses())+1)}
if s and Options.Spellcheck:
logger.warning('extractSurfaceSemantics : Found spelling correction %s for %s', s,text)
text = s
#logger.debug('*** extractSurfaceSemantics : Implement spelling correction. *** ')
#raise KeyError
except KeyError:
logger.error('extractSurfaceSemantics : Unknown token: %s', text)
return default
# Handle experienced typos.
if 'see' in tokenSenses:
### FIXME adding to dict for typos that are other words
text = tokenSenses['see']
try:
tokenSenses = Senses[text]
except: return default
# Handle morphology variants that wordnet understands.
elif isinstance(tokenSenses, tuple):
text,tokenSenses[POS] = tokenSenses[POS]
try:
return '_'.join([text,POS,','.join([str(i) for i in tokenSenses[POS]])])
except KeyError:
#logger.warning('extractSurfaceSemantics : Expected POS %s for token %s, Got %s, Using %s',
# POS, token, tokenSenses.keys(), tokenSenses.keys()[0])
if tokenSenses.keys():
POS = token['POS'] = tokenSenses.keys()[0]
return '_'.join([text,POS,','.join([str(i) for i in tokenSenses.values()[0]])])
except Exception,e:
logger.error('extractSurfaceSemantics: %s: Could not find sense %s for token %s',
e, POS, token) #tokenSenses, text
return default
def invertConditionalFreqDist(CFDist):
iCFDist = ConditionalFreqDist()
Stemmer=PorterStemmer()
for cond in CFDist.conditions():
for val in CFDist[cond].samples():
sense = cond.split('_')[0] #Cut off any POS
for tok in val:
if type(tok) == str:
iCFDist[Stemmer.raw_stem(tok)].inc(sense,CFDist[cond].count(val))
return iCFDist
def TrainSenseTagger(Pcfg,CFDist):
logger.info("Training unigram tagger:")
SenseUnigramTagger = UnigramTagger(TAG='SENSE',TEXT='STEM')
#SenseUnigramTagger.train(taggedData)
SenseUnigramTagger._freqdist = invertConditionalFreqDist(CFDist)
SenseDefaultTagger = DefaultTagger('APPEAR', TAG='SENSE',TEXT='STEM')
backoff = BackoffTagger([SenseUnigramTagger,SenseDefaultTagger], TAG='SENSE',TEXT='STEM')
return backoff
# # Brill tagger
# templates = [
# SymmetricProximateSensesTemplate(ProximateSensesRule, (1,1)),
# SymmetricProximateSensesTemplate(ProximateSensesRule, (2,2)),
# SymmetricProximateSensesTemplate(ProximateSensesRule, (1,2)),
# SymmetricProximateSensesTemplate(ProximateSensesRule, (1,3)),
# SymmetricProximateSensesTemplate(ProximateStemsRule, (1,1)),
# SymmetricProximateSensesTemplate(ProximateStemsRule, (2,2)),
# SymmetricProximateSensesTemplate(ProximateStemsRule, (1,2)),
# SymmetricProximateSensesTemplate(ProximateStemsRule, (1,3)),
# ProximateSensesTemplate(ProximateSensesRule, (-1, -1), (1,1)),
# ProximateSensesTemplate(ProximateStemsRule, (-1, -1), (1,1)),
# ]
# trace = 3
# trainer = FastBrillTaggerTrainer(backoff, templates, trace, TAG='SENSE')
# #trainer = BrillTaggerTrainer(backoff, templates, trace, TAG='SENSE')
# b = trainer.train(trainingData, max_rules=100, min_score=2)
def readCorrFrame(parses,instructID):
CaughtError=None
CaughtErrorTxt=''
frames=[]
for frame in open('Directions/ContentFrames/ContentFrame-'+instructID).readlines():
frame = str(frame) #Escape
if not frame or frame == '\n\n':
return [],Failed,EOFError,'Empty instruction file'
try:
frames.append(SurfaceSemanticsStructure.parse(frame))
except ValueError,e:
CaughtErrorTxt = "Can't parse: " + str(e)
logger.error("%s.",CaughtErrorTxt)
if str(e).startswith("Error parsing field structure"):
CaughtError = 'EOFError'
else:
CaughtError = 'ValueError'
return frames,CaughtError,CaughtErrorTxt
def getSSS(instructID):
if not instructID.endswith('txt'): instructID += '.txt'
return readCorrFrame([],instructID)[0]
if __name__ == '__main__':
logger.initLogger('Sense',LogDir='MarcoLogs')
Directors = ['EDA','EMWC','KLS','KXP','TJS','WLH']
Maps = ['Jelly','L','Grid']
Corpus = DirectionCorpusReader(constructItemRegexp(Directors,Maps))
else: Corpus = None
def genCorrContentFrame(filename, Corpus=Corpus, TreePath='CorrFullTrees/'):
if '-' in filename: instructionID = filename.split('-')[1]
else: instructionID = filename
print '\n',instructionID
if not Corpus:
Directors = ['EDA','EMWC','KLS','KXP','TJS','WLH']
Maps = ['Jelly','L','Grid']
Corpus = DirectionCorpusReader(constructItemRegexp(Directors,Maps))
Trees=[tree['TREE'] for tree in Corpus.read(TreePath+'/FullTree-'+instructionID)]
Frames = trees2frames(Trees)
saveParse(Trees,instructionID,directory='Directions/'+TreePath)
saveFrame(Frames,instructionID)
for frame in Frames: print `frame`
#for frame in readCorrFrame('',instructionID): print `frame`
def genUncorrContentFrames(Directors):
import re
Corpus = DirectionCorpusReader(constructItemRegexp(Directors, mapversions='[01]'))
for filename in lstail('Directions/FullTrees', re.compile('^FullTree-.*.txt$')):
try:
genCorrContentFrame(filename, TreePath='FullTrees/')
except ValueError:
pass
def output_pwl(filename=Lexicon):
spellchecker = enchant.DictWithPWL('en_US',filename)
Valid = []
Unknown = []
for word in Senses:
if spellchecker.check(word): Valid.append(word)
else: Unknown.append(word)
print 'Found:', Valid
print 'Unknown:'
Unmatched = []
Matched = []
for w in Unknown:
suggestions = spellchecker.suggest(w)
match = ''
for s in suggestions:
if spellchecker.pwl.check(s):
match = s
break
print ' ', w, 'Match: "'+match+'"', suggestions
if match: Matched.append((w,match))
else: Unmatched.append(w)
Matched.sort()
Unmatched.sort()
print 'Matched:'
for M in Matched: print M
print 'Unmatched', Unmatched
WordList = Valid+Unmatched
WordList.sort()
return WordList
if __name__ == '__main__':
for filename in file('Directions/CorrFullTrees/new.txt'): genCorrContentFrame(filename[:-1])
filelist = file('Directions/CorrFullTrees/new.txt','a')
filelist.write('\n')
filelist.close()
#for filename in os.listdir('Directions/CorrFullTrees/'):
# if filename.startswith("FullTree-") and filename.endswith(".txt"): genCorrContentFrame(filename)
#pass
| mit | 3,730,315,939,619,735,600 | 39.844237 | 115 | 0.590992 | false |
sajuptpm/murano | murano/api/v1/actions.py | 1 | 3059 | # Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from webob import exc
from murano.common import policy
from murano.common import wsgi
from murano.db.services import environments as envs
from murano.db.services import sessions
from murano.db import session as db_session
from murano.common.i18n import _LI, _LE, _
from murano.services import actions
from murano.services import states
from murano.utils import verify_env
LOG = logging.getLogger(__name__)
class Controller(object):
@verify_env
def execute(self, request, environment_id, action_id, body):
policy.check("execute_action", request.context, {})
LOG.debug('Action:Execute <ActionId: {0}>'.format(action_id))
unit = db_session.get_session()
# no new session can be opened if environment has deploying status
env_status = envs.EnvironmentServices.get_status(environment_id)
if env_status in (states.EnvironmentStatus.DEPLOYING,
states.EnvironmentStatus.DELETING):
LOG.info(_LI('Could not open session for environment <EnvId: {0}>,'
'environment has deploying '
'status.').format(environment_id))
raise exc.HTTPForbidden()
user_id = request.context.user
session = sessions.SessionServices.create(environment_id, user_id)
if not sessions.SessionServices.validate(session):
LOG.error(_LE('Session <SessionId {0}> '
'is invalid').format(session.id))
raise exc.HTTPForbidden()
task_id = actions.ActionServices.execute(
action_id, session, unit, request.context.auth_token, body or {})
return {'task_id': task_id}
@verify_env
def get_result(self, request, environment_id, task_id):
policy.check("execute_action", request.context, {})
LOG.debug('Action:GetResult <TaskId: {0}>'.format(task_id))
unit = db_session.get_session()
result = actions.ActionServices.get_result(environment_id, task_id,
unit)
if result is not None:
return result
msg = (_('Result for task with environment_id: {} and '
'task_id: {} was not found.')
.format(environment_id, task_id))
LOG.error(msg)
raise exc.HTTPNotFound(msg)
def create_resource():
return wsgi.Resource(Controller())
| apache-2.0 | 5,078,734,744,131,354,000 | 35.416667 | 79 | 0.645636 | false |
ceakki/ddns-server | dns/mysql.py | 1 | 2003 | __author__ = 'cristian'
import MySQLdb
import MySQLdb.cursors
class MySql:
_host = ''
_user = ''
_password = ''
_database = ''
_con = None
error = ''
last_id = 0
num_rows = 0
def __init__(self, mysql_host, mysql_user, mysql_pass, mysql_db):
self._host = mysql_host
self._user = mysql_user
self._password = mysql_pass
self._database = mysql_db
# mysql connection function
def open(self):
self.error = ''
try:
con = MySQLdb.connect(self._host, self._user, self._password, self._database,
cursorclass=MySQLdb.cursors.DictCursor)
except MySQLdb.Error, e:
try:
con = None
self.error = "MySQL Error [%d]: %s" % (e.args[0], e.args[1])
except IndexError:
con = None
self.error = "MySQL Error: %s" % str(e)
return con
# mysql query function
def query(self, query):
self.error = ''
self.last_id = 0
self.num_rows = 0
if self._con is None:
con = self.open()
if self.error:
return False
self._con = con
if self._con.open is False:
con = self.open()
if self.error:
return False
self._con = con
with self._con:
try:
curs = self._con.cursor()
curs.execute(query)
rows = curs.fetchall()
if curs.lastrowid:
self.last_id = curs.lastrowid
self.num_rows = curs.rowcount
except MySQLdb.Error, e:
try:
rows = False
self.error = "MySQL Error [%d]: %s" % (e.args[0], e.args[1])
except IndexError:
rows = False
self.error = "MySQL Error: %s" % str(e)
return rows | mit | 3,517,876,505,594,465,300 | 23.439024 | 89 | 0.458313 | false |
rlefevre1/hpp-rbprm-corba | script/tests/robot_jumpEasy_path.py | 2 | 2903 | from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.gepetto import Viewer
white=[1.0,1.0,1.0,1.0]
green=[0.23,0.75,0.2,0.5]
yellow=[0.85,0.75,0.15,1]
pink=[1,0.6,1,1]
orange=[1,0.42,0,1]
brown=[0.85,0.75,0.15,0.5]
blue = [0.0, 0.0, 0.8, 1.0]
grey = [0.7,0.7,0.7,1.0]
red = [0.8,0.0,0.0,1.0]
rootJointType = 'freeflyer'
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
urdfName = 'robot_test_trunk'
urdfNameRom = ['robot_test_lleg_rom','robot_test_rleg_rom']
urdfSuffix = ""
srdfSuffix = ""
rbprmBuilder = Builder ()
rbprmBuilder.loadModel(urdfName, urdfNameRom, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
rbprmBuilder.setJointBounds ("base_joint_xyz", [-6,6, -3, 3, 0, 2.5])
rbprmBuilder.boundSO3([-0.1,0.1,-1,1,-1,1])
rbprmBuilder.setFilter(['robot_test_lleg_rom', 'robot_test_rleg_rom'])
rbprmBuilder.setNormalFilter('robot_test_lleg_rom', [0,0,1], 0.5)
rbprmBuilder.setNormalFilter('robot_test_rleg_rom', [0,0,1], 0.5)
rbprmBuilder.client.basic.robot.setDimensionExtraConfigSpace(3)
rbprmBuilder.client.basic.robot.setExtraConfigSpaceBounds([0,0,0,0,0,0])
#~ from hpp.corbaserver.rbprm. import ProblemSolver
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
ps = ProblemSolver( rbprmBuilder )
r = Viewer (ps)
r.loadObstacleModel (packageName, "ground_jump_easy", "planning")
q_init = rbprmBuilder.getCurrentConfig ();
#q_init[(len(q_init)-3):]=[0,0,1] # set normal for init / goal config
q_init [0:3] = [-4, 1, 0.9]; rbprmBuilder.setCurrentConfig (q_init); r (q_init)
q_goal = q_init [::]
#q_goal [0:3] = [-2, 0, 0.9]; r (q_goal) # premiere passerelle
q_goal [0:3] = [3, 1, 0.9]; r (q_goal) # pont
#~ ps.addPathOptimizer("GradientBased")
ps.addPathOptimizer("RandomShortcut")
#ps.client.problem.selectSteeringMethod("SteeringDynamic")
ps.selectPathPlanner("RRTdynamic")
ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)
ps.client.problem.selectConFigurationShooter("RbprmShooter")
ps.client.problem.selectPathValidation("RbprmPathValidation",0.05)
r(q_init)
#ps.client.problem.prepareSolveStepByStep()
#i = 0
#r.displayRoadmap("rm"+str(i),0.02)
#ps.client.problem.executeOneStep() ;i = i+1; r.displayRoadmap("rm"+str(i),0.02) ; r.client.gui.removeFromGroup("rm"+str(i-1),r.sceneName) ;
#t = ps.solve ()
r.solveAndDisplay("rm",1,0.02)
#t = ps.solve ()
#r.displayRoadmap("rm",0.02)
r.displayPathMap("rmPath",0,0.02)
from hpp.gepetto import PathPlayer
pp = PathPlayer (rbprmBuilder.client.basic, r)
pp.displayPath(0,r.color.lightGreen)
pp(0)
pp.displayPath(1,blue)
r.client.gui.setVisibility("path_0_root","ALWAYS_ON_TOP")
pp.displayPath(1,black)
pp (1)
#r.client.gui.removeFromGroup("rm",r.sceneName)
r.client.gui.removeFromGroup("rmPath",r.sceneName)
r.client.gui.removeFromGroup("path_1_root",r.sceneName)
#~ pp.toFile(1, "/home/stonneau/dev/hpp/src/hpp-rbprm-corba/script/paths/stair.path")
| lgpl-3.0 | -3,690,715,483,265,578,500 | 27.742574 | 140 | 0.720978 | false |
riverma/climate | ocw-ui/backend/tests/test_processing.py | 5 | 12972 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
import datetime as dt
from webtest import TestApp
from backend.config import WORK_DIR
from backend.run_webservices import app
import backend.processing as bp
import ocw.metrics as metrics
import ocw.data_source.rcmed as rcmed
from ocw.dataset import Dataset
from ocw.evaluation import Evaluation
import numpy
test_app = TestApp(app)
class TestLocalDatasetLoad(unittest.TestCase):
def setUp(self):
self.dataset_object = {
'dataset_id': os.path.abspath('/tmp/d1.nc'),
'var_name': 'tasmax',
'lat_name': 'lat',
'lon_name': 'lon',
'time_name': 'time'
}
def test_valid_load(self):
dataset = bp._load_local_dataset_object(self.dataset_object)
self.assertEqual(dataset.variable, self.dataset_object['var_name'])
def test_default_name_assignment(self):
dataset = bp._load_local_dataset_object(self.dataset_object)
self.assertEqual(dataset.name, 'd1.nc')
def test_custom_name_assignment(self):
self.dataset_object['name'] = 'CustomName'
dataset = bp._load_local_dataset_object(self.dataset_object)
self.assertEqual(dataset.name, self.dataset_object['name'])
class TestDatasetProessingHelper(unittest.TestCase):
def test_invalid_process_dataset_objects(self):
invalid_dataset_object = {'data_source_id': 3, 'dataset_info': {}}
self.assertRaises(
ValueError,
bp._process_dataset_object,
invalid_dataset_object, 'fake parameter')
class TestRCMEDDatasetLoad(unittest.TestCase):
def setUp(self):
metadata = rcmed.get_parameters_metadata()
# Load TRMM from RCMED
dataset_dat = [m for m in metadata if m['parameter_id'] == '36'][0]
self.dataset_info = {
'dataset_id': int(dataset_dat['dataset_id']),
'parameter_id': int(dataset_dat['parameter_id'])
}
self.eval_bounds = {
'start_time': dt.datetime(1998, 02, 01),
'end_time': dt.datetime(1998, 03, 01),
'lat_min': -10,
'lat_max': 10,
'lon_min': -15,
'lon_max': 15
}
def test_valid_load(self):
dataset = bp._load_rcmed_dataset_object(self.dataset_info, self.eval_bounds)
lat_min, lat_max, lon_min, lon_max = dataset.spatial_boundaries()
start_time, end_time = dataset.time_range()
self.assertTrue(self.eval_bounds['lat_min'] <= lat_min)
self.assertTrue(self.eval_bounds['lat_max'] >= lat_max)
self.assertTrue(self.eval_bounds['lon_min'] <= lon_min)
self.assertTrue(self.eval_bounds['lon_max'] >= lon_max)
self.assertTrue(self.eval_bounds['start_time'] <= start_time)
self.assertTrue(self.eval_bounds['end_time'] >= end_time)
def test_default_name_assignment(self):
dataset = bp._load_rcmed_dataset_object(self.dataset_info, self.eval_bounds)
self.assertEquals(dataset.name, 'TRMM v.7 Monthly Precipitation')
def test_custom_name_assignment(self):
self.dataset_info['name'] = 'CustomName'
dataset = bp._load_rcmed_dataset_object(self.dataset_info, self.eval_bounds)
self.assertEquals(dataset.name, self.dataset_info['name'])
class TestMetricLoad(unittest.TestCase):
def test_get_valid_metric_options(self):
metric_map = bp._get_valid_metric_options()
bias = metric_map['Bias']()
self.assertTrue(isinstance(bias, metrics.Bias))
def test_valid_metric_load(self):
metric_objs = bp._load_metrics(['Bias'])
self.assertTrue(isinstance(metric_objs[0], metrics.Bias))
def test_invalid_metric_load(self):
self.assertRaises(ValueError, bp._load_metrics, ['AAA'])
class TestSpatialRebinHelpers(unittest.TestCase):
def test_latlon_bin_helper(self):
eval_bounds = {
'lat_min': -57.2,
'lat_max': 58.2,
'lon_min': -45.3,
'lon_max': 39.2,
}
lat_step = 1
lon_step = 1
lats = numpy.arange(eval_bounds['lat_min'], eval_bounds['lat_max'])
lons = numpy.arange(eval_bounds['lon_min'], eval_bounds['lon_max'])
new_lats, new_lons = bp._calculate_new_latlon_bins(eval_bounds, lat_step, lon_step)
self.assertTrue(numpy.array_equal(lats, new_lats))
self.assertTrue(numpy.array_equal(lons, new_lons))
class TestCalculateGridShape(unittest.TestCase):
def test_grid_shape_calculation(self):
ref_dataset = _create_fake_dataset('foo')
shape = bp._calculate_grid_shape(ref_dataset, max_cols=3)
self.assertEquals(shape, (3, 3))
class TestBalanceGridShape(unittest.TestCase):
def test_balance_grid_shape(self):
# Test column imbalance
self.assertEquals(bp._balance_grid_shape(7, 2, 6), (3, 3))
self.assertEquals(bp._balance_grid_shape(7, 2, 4), (3, 3))
self.assertEquals(bp._balance_grid_shape(10, 2, 6), (3, 4))
self.assertEquals(bp._balance_grid_shape(20, 3, 7), (4, 5))
# Test row imbalance
self.assertEquals(bp._balance_grid_shape(7, 6, 2), (3, 3))
self.assertEquals(bp._balance_grid_shape(7, 4, 2), (3, 3))
self.assertEquals(bp._balance_grid_shape(10, 6, 2), (3, 4))
self.assertEquals(bp._balance_grid_shape(20, 7, 3), (4, 5))
class TestFilePathCreation(unittest.TestCase):
def setUp(self):
self.full_evaluation = Evaluation(
_create_fake_dataset('Ref'),
[_create_fake_dataset('T1'), _create_fake_dataset('T2')],
[metrics.TemporalStdDev(), metrics.Bias(), metrics.Bias()]
)
self.unary_evaluation = Evaluation(
None,
[_create_fake_dataset('T1'), _create_fake_dataset('T2')],
[metrics.TemporalStdDev()]
)
def test_binary_metric_path_generation(self):
time_stamp = dt.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
self.assertEquals(
bp._generate_binary_eval_plot_file_path(self.full_evaluation,
0, # dataset_index
1, # metric_index
time_stamp),
'/tmp/ocw/{}/ref_compared_to_t1_bias'.format(time_stamp)
)
def test_unary_metric_path_generation_full_eval(self):
time_stamp = dt.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
self.assertEquals(
bp._generate_unary_eval_plot_file_path(self.full_evaluation,
0, # dataset_index
0, # metric_index
time_stamp),
'/tmp/ocw/{}/ref_temporalstddev'.format(time_stamp)
)
self.assertEquals(
bp._generate_unary_eval_plot_file_path(self.full_evaluation,
1, # dataset_index
0, # metric_index
time_stamp),
'/tmp/ocw/{}/t1_temporalstddev'.format(time_stamp)
)
def test_unary_metric_path_generation_partial_eval(self):
time_stamp = dt.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
self.assertEquals(
bp._generate_unary_eval_plot_file_path(self.unary_evaluation,
0, # dataset_index
0, # metric_index
time_stamp),
'/tmp/ocw/{}/t1_temporalstddev'.format(time_stamp)
)
self.assertEquals(
bp._generate_unary_eval_plot_file_path(self.unary_evaluation,
1, # dataset_index
0, # metric_index
time_stamp),
'/tmp/ocw/{}/t2_temporalstddev'.format(time_stamp)
)
class TestPlotTitleCreation(unittest.TestCase):
def setUp(self):
self.full_evaluation = Evaluation(
_create_fake_dataset('Ref'),
[_create_fake_dataset('T1'), _create_fake_dataset('T2')],
[metrics.TemporalStdDev(), metrics.Bias(), metrics.Bias()]
)
self.unary_evaluation = Evaluation(
None,
[_create_fake_dataset('T1'), _create_fake_dataset('T2')],
[metrics.TemporalStdDev()]
)
def test_binary_plot_title_generation(self):
self.assertEquals(
bp._generate_binary_eval_plot_title(self.full_evaluation, 0, 1),
'Bias of Ref compared to T1'
)
def test_unary_plot_title_generation_full_eval(self):
self.assertEqual(
bp._generate_unary_eval_plot_title(self.full_evaluation, 0, 0),
'TemporalStdDev of Ref'
)
self.assertEqual(
bp._generate_unary_eval_plot_title(self.full_evaluation, 1, 0),
'TemporalStdDev of T1'
)
def test_unary_plot_title_generation_partial_eval(self):
self.assertEquals(
bp._generate_unary_eval_plot_title(self.unary_evaluation, 0, 0),
'TemporalStdDev of T1'
)
self.assertEquals(
bp._generate_unary_eval_plot_title(self.unary_evaluation, 1, 0),
'TemporalStdDev of T2'
)
class TestRunEvaluation(unittest.TestCase):
def test_full_evaluation(self):
data = {
'reference_dataset': {
'data_source_id': 1,
'dataset_info': {
'dataset_id': os.path.abspath('/tmp/d1.nc'),
'var_name': 'tasmax',
'lat_name': 'lat',
'lon_name': 'lon',
'time_name': 'time'
}
},
'target_datasets': [
{
'data_source_id': 1,
'dataset_info': {
'dataset_id': os.path.abspath('/tmp/d2.nc'),
'var_name': 'tasmax',
'lat_name': 'lat',
'lon_name': 'lon',
'time_name': 'time'
}
}
],
'spatial_rebin_lat_step': 1,
'spatial_rebin_lon_step': 1,
'temporal_resolution': 365,
'metrics': ['Bias'],
'start_time': '1989-01-01 00:00:00',
'end_time': '1991-01-01 00:00:00',
'lat_min': -25.0,
'lat_max': 22.0,
'lon_min': -14.0,
'lon_max': 40.0,
'subregion_information': None
}
# NOTE: Sometimes the file download will die if you use the this WebTest
# call for testing. If that is the case, download the files manually with wget.
test_app.post_json('/processing/run_evaluation/', data)
result_dirs = [x for x in os.listdir(WORK_DIR)
if os.path.isdir(os.path.join(WORK_DIR, x))]
eval_dir = os.path.join(WORK_DIR, result_dirs[-1])
eval_files = [f for f in os.listdir(eval_dir)
if os.path.isfile(os.path.join(eval_dir, f))]
self.assertTrue(len(eval_files) == 1)
self.assertEquals(eval_files[0], 'd1.nc_compared_to_d2.nc_bias.png')
class TestMetricNameRetrieval(unittest.TestCase):
def test_metric_name_retrieval(self):
invalid_metrics = ['ABCMeta', 'Metric', 'UnaryMetric', 'BinaryMetric']
data = test_app.get('/processing/metrics/').json
metrics = data['metrics']
self.assertTrue(invalid_metrics not in metrics)
self.assertTrue(len(metrics) > 0)
self.assertTrue('Bias' in metrics)
def _create_fake_dataset(name):
lats = numpy.array(range(-10, 25, 1))
lons = numpy.array(range(-30, 40, 1))
times = numpy.array(range(8))
values = numpy.zeros((len(times), len(lats), len(lons)))
return Dataset(lats, lons, times, values, name=name)
| apache-2.0 | 4,547,437,981,451,303,000 | 38.190332 | 91 | 0.562519 | false |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32comext/axscript/test/testHost.py | 10 | 7524 | import sys
import pythoncom
from win32com.axscript.server.error import Exception
from win32com.axscript import axscript
from win32com.axscript.server import axsite
from win32com.server import util, connect
import win32com.server.policy
from win32com.client.dynamic import Dispatch
from win32com.server.exception import COMException
import unittest
import win32com.test.util
verbose = "-v" in sys.argv
class MySite(axsite.AXSite):
def __init__(self, *args):
self.exception_seen = None
axsite.AXSite.__init__(self, *args)
def OnScriptError(self, error):
self.exception_seen = exc = error.GetExceptionInfo()
context, line, char = error.GetSourcePosition()
if not verbose:
return
print(" >Exception:", exc[1])
try:
st = error.GetSourceLineText()
except pythoncom.com_error:
st = None
if st is None: st = ""
text = st + "\n" + (" " * (char-1)) + "^" + "\n" + exc[2]
for line in text.splitlines():
print(" >" + line)
class MyCollection(util.Collection):
def _NewEnum(self):
return util.Collection._NewEnum(self)
class Test:
_public_methods_ = [ 'echo', 'fail' ]
_public_attrs_ = ['collection']
def __init__(self):
self.verbose = verbose
self.collection = util.wrap( MyCollection( [1,'Two',3] ))
self.last = ""
self.fail_called = 0
# self._connect_server_ = TestConnectServer(self)
def echo(self, *args):
self.last = "".join([str(s) for s in args])
if self.verbose:
for arg in args:
print(arg, end=' ')
print()
def fail(self, *args):
print("**** fail() called ***")
for arg in args:
print(arg, end=' ')
print()
self.fail_called = 1
# self._connect_server_.Broadcast(last)
#### Connections currently wont work, as there is no way for the engine to
#### know what events we support. We need typeinfo support.
IID_ITestEvents = pythoncom.MakeIID("{8EB72F90-0D44-11d1-9C4B-00AA00125A98}")
class TestConnectServer(connect.ConnectableServer):
_connect_interfaces_ = [IID_ITestEvents]
# The single public method that the client can call on us
# (ie, as a normal COM server, this exposes just this single method.
def __init__(self, object):
self.object = object
def Broadcast(self,arg):
# Simply broadcast a notification.
self._BroadcastNotify(self.NotifyDoneIt, (arg,))
def NotifyDoneIt(self, interface, arg):
interface.Invoke(1000, 0, pythoncom.DISPATCH_METHOD, 1, arg)
VBScript = """\
prop = "Property Value"
sub hello(arg1)
test.echo arg1
end sub
sub testcollection
if test.collection.Item(0) <> 1 then
test.fail("Index 0 was wrong")
end if
if test.collection.Item(1) <> "Two" then
test.fail("Index 1 was wrong")
end if
if test.collection.Item(2) <> 3 then
test.fail("Index 2 was wrong")
end if
num = 0
for each item in test.collection
num = num + 1
next
if num <> 3 then
test.fail("Collection didn't have 3 items")
end if
end sub
"""
PyScript = """\
# A unicode \xa9omment.
prop = "Property Value"
def hello(arg1):
test.echo(arg1)
def testcollection():
# test.collection[1] = "New one"
got = []
for item in test.collection:
got.append(item)
if got != [1, "Two", 3]:
test.fail("Didn't get the collection")
pass
"""
# XXX - needs py3k work! Throwing a bytes string with an extended char
# doesn't make much sense, but py2x allows it. What it gets upset with
# is a real unicode arg - which is the only thing py3k allows!
PyScript_Exc = """\
def hello(arg1):
raise RuntimeError("exc with extended \xa9har")
"""
ErrScript = """\
bad code for everyone!
"""
state_map = {
axscript.SCRIPTSTATE_UNINITIALIZED: "SCRIPTSTATE_UNINITIALIZED",
axscript.SCRIPTSTATE_INITIALIZED: "SCRIPTSTATE_INITIALIZED",
axscript.SCRIPTSTATE_STARTED: "SCRIPTSTATE_STARTED",
axscript.SCRIPTSTATE_CONNECTED: "SCRIPTSTATE_CONNECTED",
axscript.SCRIPTSTATE_DISCONNECTED: "SCRIPTSTATE_DISCONNECTED",
axscript.SCRIPTSTATE_CLOSED: "SCRIPTSTATE_CLOSED",
}
def _CheckEngineState(engine, name, state):
got = engine.engine.eScript.GetScriptState()
if got != state:
got_name = state_map.get(got, str(got))
state_name = state_map.get(state, str(state))
raise RuntimeError("Warning - engine %s has state %s, but expected %s" % (name, got_name, state_name))
class EngineTester(win32com.test.util.TestCase):
def _TestEngine(self, engineName, code, expected_exc = None):
echoer = Test()
model = {
'test' : util.wrap(echoer),
}
site = MySite(model)
engine = site._AddEngine(engineName)
try:
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.AddCode(code)
engine.Start()
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_STARTED)
self.failUnless(not echoer.fail_called, "Fail should not have been called")
# Now call into the scripts IDispatch
ob = Dispatch(engine.GetScriptDispatch())
try:
ob.hello("Goober")
self.failUnless(expected_exc is None,
"Expected %r, but no exception seen" % (expected_exc,))
except pythoncom.com_error:
if expected_exc is None:
self.fail("Unexpected failure from script code: %s" % (site.exception_seen,))
if expected_exc not in site.exception_seen[2]:
self.fail("Could not find %r in %r" % (expected_exc, site.exception_seen[2]))
return
self.assertEqual(echoer.last, "Goober")
self.assertEqual(str(ob.prop), "Property Value")
ob.testcollection()
self.failUnless(not echoer.fail_called, "Fail should not have been called")
# Now make sure my engines can evaluate stuff.
result = engine.eParse.ParseScriptText("1+1", None, None, None, 0, 0, axscript.SCRIPTTEXT_ISEXPRESSION)
self.assertEqual(result, 2)
# re-initialize to make sure it transitions back to initialized again.
engine.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.Start()
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_STARTED)
# Transition back to initialized, then through connected too.
engine.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.SetScriptState(axscript.SCRIPTSTATE_CONNECTED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_CONNECTED)
engine.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.SetScriptState(axscript.SCRIPTSTATE_CONNECTED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_CONNECTED)
engine.SetScriptState(axscript.SCRIPTSTATE_DISCONNECTED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_DISCONNECTED)
finally:
engine.Close()
engine = None
site = None
def testVB(self):
self._TestEngine("VBScript", VBScript)
def testPython(self):
self._TestEngine("Python", PyScript)
def testPythonUnicodeError(self):
self._TestEngine("Python", PyScript)
def testVBExceptions(self):
self.assertRaises(pythoncom.com_error,
self._TestEngine, "VBScript", ErrScript)
def testPythonExceptions(self):
expected = "RuntimeError: exc with extended \xa9har"
self._TestEngine("Python", PyScript_Exc, expected)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -1,928,957,304,268,795,400 | 32.145374 | 109 | 0.683413 | false |
xodus7/tensorflow | tensorflow/contrib/gan/python/eval/python/sliced_wasserstein.py | 43 | 1211 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model evaluation tools for TFGAN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.eval.python import sliced_wasserstein_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.eval.python.sliced_wasserstein_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = sliced_wasserstein_impl.__all__
remove_undocumented(__name__, __all__)
| apache-2.0 | 51,862,381,028,747,340 | 42.25 | 80 | 0.71924 | false |
wiki-ai/ores | tests/scoring_systems/tests/test_celery_queue.py | 2 | 3337 | import celery
from revscoring.extractors import OfflineExtractor
from ores.score_request import ScoreRequest
from ores.scoring.models import RevIdScorer
from ores.scoring_context import ScoringContext
from ores.scoring_systems.celery_queue import CeleryQueue
from ores.task_tracker import InMemoryTaskTracker
from .util import fakewiki
def test_score():
application = celery.Celery(__name__)
CeleryQueue(
{'fakewiki': fakewiki}, application=application, timeout=15)
# Can't run the following tests because it starts a new thread and that
# will break our signal timeout strategy.
# celerytest.start_celery_worker(application, concurrency=1)
# test_scoring_system(scoring_system)
def test_celery_queue():
application = celery.Celery(__name__)
CeleryQueue(
{'fakewiki': fakewiki}, application=application, timeout=0.10)
# Can't run the following tests because it starts a new thread and that
# will break our signal timeout strategy.
# celerytest.start_celery_worker(application, concurrency=1)
# response = scoring_system.score(
# ScoreRequest("fakewiki", [1], ["fake"],
# injection_caches={1: {wait_time: 0.05}}))
# assert 1 in response.errors, str(response.errors)
# assert isinstance(response.errors[1]['fake'], errors.TimeoutError), \
# type(response.errors[1]['fake'])
def test_task():
revid = RevIdScorer(version='0.0.1')
fakewiki = ScoringContext(
'fakewiki', {'revid': revid}, OfflineExtractor())
application = celery.Celery(__name__)
scoring_system = CeleryQueue(
{'fakewiki': fakewiki}, application=application, timeout=0.10)
request = {
'context': 'fakewiki',
'rev_ids': [1234],
'model_names': ['revid'],
'precache': False,
'include_features': False,
'injection_caches': {},
'ip': None,
'model_info': None
}
actual = scoring_system._process_score_map(request, ['revid'], 1234, {'datasource.revision.id': 1234})
expected = {'revid': {'score': {'prediction': False,
'probability': {False: 0.5700000000000001, True: 0.43}}}}
assert actual == expected
def test_locking():
application = celery.Celery(__name__)
revid = RevIdScorer(version='0.0.1')
fakewiki = ScoringContext(
'fakewiki', {'revid': revid}, OfflineExtractor())
scoring_system = CeleryQueue(
{'fakewiki': fakewiki}, application=application, timeout=0., task_tracker=InMemoryTaskTracker())
request = {
'context': 'fakewiki',
'rev_ids': [1234],
'model_names': ['revid'],
'precache': False,
'include_features': False,
'injection_caches': {},
'ip': None,
'model_info': None
}
scoring_system._lock_process(['revid'], 123, ScoreRequest.from_json(request), None, 'Task ID')
assert len(list(scoring_system.task_tracker.tasks.keys())) == 1
assert list(scoring_system.task_tracker.tasks.values()) == ['Task ID']
key = list(scoring_system.task_tracker.tasks.keys())[0]
# We should not assert the exact key as it's internal but we should assert it has the important bits
assert 'fakewiki' in key
assert 'revid' in key
assert '0.0.1' in key
assert '123' in key
| mit | 36,190,594,075,345,100 | 34.88172 | 106 | 0.64579 | false |
jounex/hue | apps/search/src/search/search_controller.py | 8 | 4390 | #!/usr/bin/env python
# -- coding: utf-8 --
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.db.models import Q
from desktop.models import Document2, Document, SAMPLE_USERNAME
from libsolr.api import SolrApi
from search.conf import SOLR_URL
from search.models import Collection2
LOG = logging.getLogger(__name__)
class SearchController(object):
"""
Glue the models to the views.
"""
def __init__(self, user):
self.user = user
def get_search_collections(self):
return [d.content_object for d in Document.objects.get_docs(self.user, Document2, extra='search-dashboard').order_by('-id')]
def get_shared_search_collections(self):
# Those are the ones appearing in the menu
docs = Document.objects.filter(Q(owner=self.user) | Q(owner__username=SAMPLE_USERNAME), extra='search-dashboard')
return [d.content_object for d in docs.order_by('-id')]
def get_owner_search_collections(self):
if self.user.is_superuser:
docs = Document.objects.filter(extra='search-dashboard')
else:
docs = Document.objects.filter(extra='search-dashboard', owner=self.user)
return [d.content_object for d in docs.order_by('-id')]
def get_icon(self, name):
if name == 'Twitter':
return 'search/art/icon_twitter_48.png'
elif name == 'Yelp Reviews':
return 'search/art/icon_yelp_48.png'
elif name == 'Web Logs':
return 'search/art/icon_logs_48.png'
else:
return 'search/art/icon_search_48.png'
def delete_collections(self, collection_ids):
result = {'status': -1, 'message': ''}
try:
for doc2 in self.get_owner_search_collections():
if doc2.id in collection_ids:
doc = doc2.doc.get()
doc.delete()
doc2.delete()
result['status'] = 0
except Exception, e:
LOG.warn('Error deleting collection: %s' % e)
result['message'] = unicode(str(e), "utf8")
return result
def copy_collections(self, collection_ids):
result = {'status': -1, 'message': ''}
try:
for doc2 in self.get_shared_search_collections():
if doc2.id in collection_ids:
doc2 = Document2.objects.get(uuid=doc2.uuid)
doc = doc2.doc.get()
name = doc2.name + '-copy'
doc2 = doc2.copy(name=name, owner=self.user)
doc.copy(content_object=doc2, name=name, owner=self.user)
collection = Collection2(self.user, document=doc2)
collection.data['collection']['label'] = name
doc2.update_data({'collection': collection.data['collection']})
doc2.save()
result['status'] = 0
except Exception, e:
LOG.exception('Error copying collection')
result['message'] = unicode(str(e), "utf8")
return result
def is_collection(self, collection_name):
solr_collections = SolrApi(SOLR_URL.get(), self.user).collections()
return collection_name in solr_collections
def is_core(self, core_name):
solr_cores = SolrApi(SOLR_URL.get(), self.user).cores()
return core_name in solr_cores
def get_solr_collection(self):
return SolrApi(SOLR_URL.get(), self.user).collections()
def get_all_indexes(self, show_all=False):
indexes = []
try:
indexes = self.get_solr_collection().keys()
except:
LOG.exception('failed to get indexes')
try:
indexes += SolrApi(SOLR_URL.get(), self.user).aliases().keys()
except:
LOG.exception('failed to get index aliases')
if show_all or not indexes:
return indexes + SolrApi(SOLR_URL.get(), self.user).cores().keys()
else:
return indexes
def can_edit_index(user):
return user.is_superuser
| apache-2.0 | 51,740,912,046,425,190 | 31.043796 | 128 | 0.669248 | false |
strahlc/exaile | xlgui/widgets/playback.py | 2 | 49901 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Pango
from xl import (
event,
formatter,
player,
providers,
settings,
xdg
)
from xl.common import clamp
from xl.nls import gettext as _
from xlgui.widgets import menu
from xlgui.guiutil import GtkTemplate
class ProgressBarFormatter(formatter.ProgressTextFormatter):
"""
A formatter for progress bars
"""
def __init__(self, player):
formatter.ProgressTextFormatter.__init__(self, '', player)
self.on_option_set('gui_option_set', settings,
'gui/progress_bar_text_format')
event.add_ui_callback(self.on_option_set, 'gui_option_set')
def on_option_set(self, event, settings, option):
"""
Updates the internal format on setting change
"""
if option == 'gui/progress_bar_text_format':
self.props.format = settings.get_option(
'gui/progress_bar_text_format',
'$current_time / $remaining_time')
class PlaybackProgressBar(Gtk.ProgressBar):
"""
Progress bar which automatically follows playback
"""
def __init__(self, player):
Gtk.ProgressBar.__init__(self)
self.__player = player
self.set_show_text(True)
self.reset()
self.formatter = ProgressBarFormatter(player)
self.__timer_id = None
self.__events = ('playback_track_start', 'playback_player_end',
'playback_toggle_pause', 'playback_error')
for e in self.__events:
event.add_ui_callback(getattr(self, 'on_%s' % e), e, self.__player)
def destroy(self):
"""
Cleanups
"""
for e in self.__events:
event.remove_callback(getattr(self, 'on_%s' % e), e, self.__player)
def reset(self):
"""
Resets the progress bar appearance
"""
self.set_fraction(0)
self.set_text(_('Not Playing'))
def __enable_timer(self):
"""
Enables the update timer
"""
if self.__timer_id is not None:
return
interval = settings.get_option('gui/progress_update_millisecs', 1000)
if interval % 1000 == 0:
self.__timer_id = GLib.timeout_add_seconds(
interval / 1000, self.on_timer)
else:
self.__timer_id = GLib.timeout_add(
interval, self.on_timer)
self.on_timer()
def __disable_timer(self):
"""
Disables the update timer
"""
if self.__timer_id is not None:
GLib.source_remove(self.__timer_id)
self.__timer_id = None
def on_timer(self):
"""
Updates progress bar appearance
"""
if self.__player.current is None:
self.__disable_timer()
self.reset()
return False
self.set_fraction(self.__player.get_progress())
self.set_text(self.formatter.format())
return True
def on_playback_track_start(self, event_type, player, track):
"""
Starts update timer
"""
self.reset()
self.__enable_timer()
def on_playback_player_end(self, event_type, player, track):
"""
Stops update timer
"""
self.__disable_timer()
self.reset()
def on_playback_toggle_pause(self, event_type, player, track):
"""
Starts or stops update timer
"""
if player.is_playing():
self.__enable_timer()
elif player.is_paused():
self.__disable_timer()
def on_playback_error(self, event_type, player, message):
"""
Stops update timer
"""
self.__disable_timer()
self.reset()
class Anchor(int):
__gtype__ = GObject.TYPE_INT
for i, a in enumerate('CENTER NORTH NORTH_WEST NORTH_EAST SOUTH SOUTH_WEST SOUTH_EAST WEST EAST'.split()):
setattr(Anchor, a, Anchor(i))
class Marker(GObject.GObject):
"""
A marker pointing to a playback position
"""
__gproperties__ = {
'anchor': (
Anchor,
'anchor position',
'The position the marker will be anchored',
Anchor.CENTER, Anchor.EAST, Anchor.SOUTH,
GObject.PARAM_READWRITE
),
'color': (
Gdk.Color,
'marker color',
'Override color of the marker',
GObject.PARAM_READWRITE
),
'label': (
GObject.TYPE_STRING,
'marker label',
'Textual description of the marker',
None,
GObject.PARAM_READWRITE
),
'position': (
GObject.TYPE_FLOAT,
'marker position',
'Relative position of the marker',
0, 1, 0,
GObject.PARAM_READWRITE
),
'state': (
Gtk.StateType,
'marker state',
'The state of the marker',
Gtk.StateType.NORMAL,
GObject.PARAM_READWRITE
)
}
__gsignals__ = {
'reached': (
GObject.SignalFlags.RUN_LAST,
None,
()
)
}
def __init__(self, position=0):
GObject.GObject.__init__(self)
self.__values = {
'anchor': Anchor.SOUTH,
'color': None,
'label': None,
'position': 0,
'state': Gtk.StateType.NORMAL
}
self.props.position = position
def __str__(self):
"""
Informal representation
"""
if self.props.label is not None:
text = '%s (%g)' % (self.props.label, self.props.position)
else:
text = '%g' % self.props.position
return text
def __lt__(self, other):
"""
Compares positions
"""
return self.props.position < other.props.position
def __gt__(self, other):
"""
Compares positions
"""
return self.props.position > other.props.position
def do_get_property(self, gproperty):
"""
Gets a GObject property
"""
try:
return self.__values[gproperty.name]
except KeyError:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, gproperty, value):
"""
Sets a GObject property
"""
try:
self.__values[gproperty.name] = value
except KeyError:
raise AttributeError('unknown property %s' % property.name)
class MarkerManager(providers.ProviderHandler):
"""
Enables management of playback markers; namely simple
adding, removing and finding. It also takes care of
emitting signals when a marker is reached during playback.
TODO: This presumes there is only one player object present
in exaile, and that markers can only be associated with
the single player object. This class should probably be
changed to be associated with a particular player (which
requires some changes to the marker class)
"""
def __init__(self):
providers.ProviderHandler.__init__(self, 'playback-markers')
self.__events = ('playback_track_start', 'playback_track_end')
self.__timeout_id = None
for e in self.__events:
event.add_ui_callback(getattr(self, 'on_%s' % e), e)
def destroy(self):
"""
Cleanups
"""
for e in self.__events:
event.remove_callback(getattr(self, 'on_%s' % e), e)
def add_marker(self, position):
"""
Creates a new marker for a playback position
:param position: the playback position [0..1]
:type position: float
:returns: the new marker
:rtype: :class:`Marker`
"""
marker = Marker(position)
# Provider compatibility
marker.name = 'marker'
providers.register('playback-markers', marker)
return marker
def remove_marker(self, marker):
"""
Removes a playback marker
:param marker: the marker
:type marker: :class:`Marker`
"""
providers.unregister('playback-markers', marker)
def get_markers_at(self, position):
"""
Gets all markers located at a position
:param position: the mark position
:type position: float
:returns: (m1, m2, ...)
:rtype: (:class:`Marker`, ...)
* *m1*: the first marker
* *m2*: the second marker
* ...
"""
# Reproduce value modifications
position = Marker(position).props.position
markers = ()
for marker in providers.get('playback-markers'):
if marker.props.position == position:
markers += (marker,)
return markers
def on_playback_track_start(self, event, player, track):
"""
Starts marker watching
"""
if self.__timeout_id is not None:
GLib.source_remove(self.__timeout_id)
self.__timeout_id = GLib.timeout_add_seconds(1, self.on_timeout, player)
def on_playback_track_end(self, event, player, track):
"""
Stops marker watching
"""
if self.__timeout_id is not None:
GLib.source_remove(self.__timeout_id)
self.__timeout_id = None
def on_timeout(self, player):
"""
Triggers "reached" signal of markers
"""
if player.current is None:
self.__timeout_id = None
return
track_length = player.current.get_tag_raw('__length')
if track_length is None:
return True
playback_time = player.get_time()
reached_markers = (m for m in providers.get('playback-markers')
if int(m.props.position * track_length) == playback_time)
for marker in reached_markers:
marker.emit('reached')
return True
__MARKERMANAGER = MarkerManager()
add_marker = __MARKERMANAGER.add_marker
remove_marker = __MARKERMANAGER.remove_marker
get_markers_at = __MARKERMANAGER.get_markers_at
class _SeekInternalProgressBar(PlaybackProgressBar):
__gsignals__ = {
'draw': 'override',
}
def __init__(self, player, points, marker_scale):
PlaybackProgressBar.__init__(self, player)
self._points = points
self._seeking = False
self._marker_scale = marker_scale
def do_draw(self, context):
"""
Draws markers on top of the progress bar
"""
Gtk.ProgressBar.do_draw(self, context)
if not self._points:
return
context.set_line_width(self._marker_scale / 0.9)
style = self.get_style_context()
for marker, points in self._points.iteritems():
for i, (x, y) in enumerate(points):
if i == 0:
context.move_to(x, y)
else:
context.line_to(x, y)
context.close_path()
if marker.props.state in (Gtk.StateType.PRELIGHT, Gtk.StateType.ACTIVE):
c = style.get_color(Gtk.StateType.NORMAL)
context.set_source_rgba(c.red, c.green, c.blue, c.alpha)
else:
if marker.props.color is not None:
base = marker.props.color
else:
base = style.get_color(marker.props.state)
context.set_source_rgba(
base.red / 256.0**2,
base.green / 256.0**2,
base.blue / 256.0**2,
0.7
)
context.fill_preserve()
if marker.props.state in (Gtk.StateType.PRELIGHT, Gtk.StateType.ACTIVE):
c = style.get_color(Gtk.StateType.NORMAL)
context.set_source_rgba(c.red, c.green, c.blue, c.alpha)
else:
foreground = style.get_color(marker.props.state)
context.set_source_rgba(
foreground.red / 256.0**2,
foreground.green / 256.0**2,
foreground.blue / 256.0**2,
0.7
)
context.stroke()
def on_timer(self):
"""
Prevents update while seeking
"""
if self._seeking:
return True
return PlaybackProgressBar.on_timer(self)
class SeekProgressBar(Gtk.EventBox, providers.ProviderHandler):
"""
Playback progress bar which allows for seeking
and setting positional markers
"""
__gproperties__ = {
'marker-scale': (
GObject.TYPE_FLOAT,
'marker scale',
'Scaling of markers',
0, 1, 0.7,
GObject.PARAM_READWRITE
)
}
__gsignals__ = {
'button-press-event': 'override',
'button-release-event': 'override',
'motion-notify-event': 'override',
'notify': 'override',
'key-press-event': 'override',
'key-release-event': 'override',
'scroll-event': 'override',
'marker-reached': (
GObject.SignalFlags.RUN_LAST,
GObject.TYPE_BOOLEAN,
(Marker,),
GObject.signal_accumulator_true_handled
)
}
def __init__(self, player, use_markers=True):
'''
TODO: markers aren't designed for more than one player, once
they are we can get rid of the use_markers option
'''
Gtk.EventBox.__init__(self)
points = {}
self.__player = player
self.__values = {'marker-scale': 0.7}
self._points = points
self.__progressbar = _SeekInternalProgressBar(player, points,
self.__values['marker-scale'])
self._progressbar_menu = None
if use_markers:
self._progressbar_menu = ProgressBarContextMenu(self)
self._marker_menu = MarkerContextMenu(self)
self._marker_menu.connect('deactivate',
self.on_marker_menu_deactivate)
providers.ProviderHandler.__init__(self, 'playback-markers')
self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.SCROLL_MASK)
self.set_can_focus(True)
self.connect('hierarchy-changed',
self.on_hierarchy_changed)
self.connect('scroll-event', self.on_scroll_event)
self.add(self.__progressbar)
self.show_all()
def get_label(self, marker):
"""
Builds the most appropriate label
markup to describe a marker
:param marker: the marker
:type marker: :class:`Marker`
:returns: the label
:rtype: string
"""
markup = None
if self.__player.current:
length = self.__player.current.get_tag_raw('__length')
if length is not None:
length = length * marker.props.position
length = formatter.LengthTagFormatter.format_value(length)
if marker.props.label:
markup = '<b>%s</b> (%s)' % (marker.props.label, length)
else:
markup = '%s' % length
else:
if marker.props.label:
markup = '<b>%s</b> (%d%%)' % (
marker.props.label,
int(marker.props.position * 100)
)
else:
markup = '%d%%' % int(marker.props.position * 100)
return markup
def _is_marker_hit(self, marker, check_x, check_y):
"""
Checks whether a marker is hit by a point
:param marker: the marker
:type marker: :class:`Marker`
:param check_x: the x location to check
:type check_x: float
:param check_y: the y location to check
:type check_y: float
:returns: whether the marker was hit
:rtype: bool
"""
points = self._points[marker]
x, y, width, height = self._get_bounding_box(points)
if x <= check_x <= width and y <= check_y <= height:
return True
return False
def _get_points(self, marker, width=None, height=None):
"""
Calculates the points necessary
to represent a marker
:param marker: the marker
:type marker: :class:`Marker`
:param width: area width override
:type width: int
:param height: area height override
:type height: int
:returns: ((x1, y1), (x2, y2), ...)
:rtype: ((float, float), ...)
* *x1*: the x coordinate of the first point
* *y1*: the y coordinate of the first point
* *x2*: the x coordinate of the second point
* *y2*: the y coordinate of the second point
* ...
"""
points = ()
alloc = self.get_allocation()
width = width or alloc.width
height = height or alloc.height
position = width * marker.props.position
marker_scale = int(height * self.props.marker_scale)
# Adjustment by half of the line width
offset = self.props.marker_scale / 0.9 / 2
if marker.props.anchor == Anchor.NORTH_WEST:
points = (
(position - offset, offset),
(position + marker_scale * 0.75 - offset, offset),
(position - offset, marker_scale * 0.75 + offset)
)
elif marker.props.anchor == Anchor.NORTH:
points = (
(position - offset, marker_scale / 2 + offset),
(position + marker_scale / 2 - offset, offset),
(position - marker_scale / 2 - offset, offset)
)
elif marker.props.anchor == Anchor.NORTH_EAST:
points = (
(position - marker_scale * 0.75 - offset, offset),
(position - offset, offset),
(position - offset, marker_scale * 0.75 + offset)
)
elif marker.props.anchor == Anchor.EAST:
points = (
(position - marker_scale / 2 - offset, height / 2 + offset),
(position - offset, height / 2 - marker_scale / 2 + offset),
(position - offset, height / 2 + marker_scale / 2 + offset)
)
elif marker.props.anchor == Anchor.SOUTH_EAST:
points = (
(position - offset, height - offset),
(position - offset, height - marker_scale * 0.75 - offset),
(position - marker_scale * 0.75 - offset, height - offset)
)
elif marker.props.anchor == Anchor.SOUTH:
points = (
(position - offset, height - marker_scale / 2 - offset),
(position + marker_scale / 2 - offset, height - offset),
(position - marker_scale / 2 - offset, height - offset)
)
elif marker.props.anchor == Anchor.SOUTH_WEST:
points = (
(position - offset, height - offset),
(position + marker_scale * 0.75 - offset, height - offset),
(position - offset, height - marker_scale * 0.75 - offset)
)
elif marker.props.anchor == Anchor.WEST:
points = (
(position + marker_scale / 2 - offset, height / 2 + offset),
(position - offset, height / 2 - marker_scale / 2 + offset),
(position - offset, height / 2 + marker_scale / 2 + offset)
)
elif marker.props.anchor == Anchor.CENTER:
points = (
(position - offset, height / 2 - marker_scale / 2 + offset),
(position + marker_scale / 2 - offset, height / 2 + offset),
(position - offset, height / 2 + marker_scale / 2 + offset),
(position - marker_scale / 2 - offset, height / 2 + offset)
)
return points
def _get_bounding_box(self, points):
"""
Calculates the axis aligned bounding box
of a sequence of points
:param points: ((x1, y1), (x2, y2), ...)
:type points: ((float, float), ...)
:returns: (x, y, width, height)
:rtype: (float, float, float, float)
* *x*: the x coordinate of the box
* *y*: the y coordinate of the box
* *width*: the width of the box
* *height*: the height of the box
"""
xs, ys = zip(*points)
return min(xs), min(ys), max(xs), max(ys)
def seek(self, position):
"""
Seeks within the current track
"""
if self.__player.current:
self.__player.set_progress(position)
self.update_progress()
def update_progress(self):
'''
Updates the progress bar and the time with data from the player
'''
if self.__player.current:
length = self.__player.current.get_tag_raw('__length')
if length is not None:
position = float(self.__player.get_time())/length
self.__progressbar.set_fraction(position)
self.__progressbar.set_text(self.__progressbar.formatter.format(
current_time=length * position))
def do_get_property(self, gproperty):
"""
Gets a GObject property
"""
try:
return self.__values[gproperty.name]
except KeyError:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, gproperty, value):
"""
Sets a GObject property
"""
try:
self.__values[gproperty.name] = value
except KeyError:
raise AttributeError('unknown property %s' % property.name)
def do_notify(self, gproperty):
"""
Reacts to GObject property changes
"""
if gproperty.name == 'marker-scale':
for marker in self._points:
self._points[marker] = self._get_points(marker)
self.__progressbar._marker_scale = self.__values['marker-scale']
self.__progressbar.queue_draw()
def do_size_allocate(self, allocation):
"""
Recalculates the marker points
"""
oldallocation = self.get_allocation()
Gtk.EventBox.do_size_allocate(self, allocation)
if allocation != oldallocation:
for marker in self._points:
self._points[marker] = self._get_points(marker)
def do_button_press_event(self, event):
"""
Prepares seeking
"""
event = event.button
hit_markers = []
for marker in self._points:
if self._is_marker_hit(marker, event.x, event.y):
if marker.props.state in (Gtk.StateType.NORMAL,
Gtk.StateType.PRELIGHT):
marker.props.state = Gtk.StateType.ACTIVE
hit_markers += [marker]
hit_markers.sort()
if event.button == 1:
if self.__player.current is None:
return True
length = self.__player.current.get_tag_raw('__length')
if length is None:
return True
if len(hit_markers) > 0:
self.seek(hit_markers[0].props.position)
else:
fraction = event.x / self.get_allocation().width
fraction = max(0, fraction)
fraction = min(fraction, 1)
self.__progressbar.set_fraction(fraction)
self.__progressbar.set_text(
_('Seeking: %s') % self.__progressbar.formatter.format(
current_time=length * fraction))
self.__progressbar._seeking = True
elif event.button == 3:
if len(hit_markers) > 0:
self._marker_menu.popup(event, tuple(hit_markers))
elif self._progressbar_menu is not None:
self._progressbar_menu.popup(event)
def do_button_release_event(self, event):
"""
Completes seeking
"""
event = event.button
for marker in self._points:
if marker.props.state == Gtk.StateType.ACTIVE:
marker.props.state = Gtk.StateType.PRELIGHT
if event.button == 1 and self.__progressbar._seeking:
fraction = event.x / self.get_allocation().width
fraction = max(0, fraction)
fraction = min(fraction, 1)
self.seek(fraction)
self.__progressbar._seeking = False
def do_motion_notify_event(self, event):
"""
Updates progress bar while seeking
and updates marker states on hover
"""
self.set_tooltip_markup(None)
if self.__progressbar._seeking:
press_event = Gdk.EventButton.new(Gdk.EventType.BUTTON_PRESS)
press_event.button = 1
press_event.x = event.x
press_event.y = event.y
self.emit('button-press-event', press_event)
else:
hit_markers = []
for marker in self._points:
if self._is_marker_hit(marker, event.x, event.y):
if marker.props.state == Gtk.StateType.NORMAL:
marker.props.state = Gtk.StateType.PRELIGHT
hit_markers += [marker]
else:
if marker.props.state == Gtk.StateType.PRELIGHT:
marker.props.state = Gtk.StateType.NORMAL
if len(hit_markers) > 0:
hit_markers.sort()
markup = ', '.join([self.get_label(m) for m in hit_markers])
self.set_tooltip_markup(markup)
self.trigger_tooltip_query()
def do_leave_notify_event(self, event):
"""
Resets marker states
"""
for marker in self._points:
# Leave other states intact
if marker.props.state == Gtk.StateType.PRELIGHT:
marker.props.state = Gtk.StateType.NORMAL
def do_key_press_event(self, event):
"""
Prepares seeking via keyboard interaction
* Alt+Up/Right: seek 1% forward
* Alt+Down/Left: seek 1% backward
"""
_, state = event.get_state()
if state & Gtk.StateType.INSENSITIVE:
return
if not state & Gdk.ModifierType.MOD1_MASK:
return
if event.keyval in (Gdk.KEY_Up, Gdk.KEY_Right):
direction = 1
elif event.keyval in (Gdk.KEY_Down, Gdk.KEY_Left):
direction = -1
else:
return
press_event = Gdk.Event.new(Gdk.EventType.BUTTON_PRESS)
press_event.button = 1
new_fraction = self.__progressbar.get_fraction() + 0.01 * direction
alloc = self.get_allocation()
press_event.x = alloc.width * new_fraction
press_event.y = float(alloc.y)
self.emit('button-press-event', press_event)
def do_key_release_event(self, event):
"""
Completes seeking via keyboard interaction
"""
_, state = event.get_state()
if not state & Gdk.ModifierType.MOD1_MASK:
return
if event.keyval in (Gdk.KEY_Up, Gdk.KEY_Right):
direction = 1
elif event.keyval in (Gdk.KEY_Down, Gdk.KEY_Left):
direction = -1
else:
return
release_event = Gdk.Event.new(Gdk.EventType.BUTTON_RELEASE)
release_event.button = 1
new_fraction = self.__progressbar.get_fraction() + 0.01 * direction
alloc = self.get_allocation()
release_event.x = alloc.width * new_fraction
release_event.y = float(alloc.y)
self.emit('button-release-event', release_event)
def on_scroll_event(self, widget, event):
"""
Seek on scroll as VLC does
"""
if not self.__player.current:
return True
if self.__player.current.get_tag_raw('__length') is None:
return True
progress = self.__player.get_progress()
progress_delta = 0.05 # 5% of track length
progress_delta_small = 0.005 # 0.5% of track length
if event.direction == Gdk.ScrollDirection.DOWN or \
event.direction == Gdk.ScrollDirection.LEFT:
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
new_progress = progress - progress_delta_small
else:
new_progress = progress - progress_delta
elif event.direction == Gdk.ScrollDirection.UP or \
event.direction == Gdk.ScrollDirection.RIGHT:
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
new_progress = progress + progress_delta_small
else:
new_progress = progress + progress_delta
elif event.direction == Gdk.ScrollDirection.SMOOTH:
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
new_progress = progress \
+ progress_delta_small * (event.deltax + event.deltay)
else:
new_progress = progress \
+ progress_delta * (event.deltax + event.deltay)
self.__player.set_progress(clamp(new_progress, 0, 1))
self.update_progress()
return True
def on_hierarchy_changed(self, widget, old_toplevel):
"""
Sets up editing cancel on toplevel focus out
"""
# Disconnect from previous toplevel.
prev_conn = getattr(self, '_SeekProgressBar__prev_focus_out_conn', None)
if prev_conn:
prev_conn[0].disconnect(prev_conn[1])
del self.__prev_focus_out_conn
# Connect to new toplevel and store the connection, but only if it's an
# actual toplevel window.
toplevel = self.get_toplevel()
if toplevel.is_toplevel():
conn = toplevel.connect('focus-out-event',
lambda w, e: self.emit('focus-out-event', e.copy()))
self.__prev_focus_out_conn = (toplevel, conn)
def on_marker_menu_deactivate(self, menu):
"""
Makes sure to reset states of
previously selected markers
"""
for marker in self._points:
marker.props.state = Gtk.StateType.NORMAL
self.__progressbar.queue_draw()
def on_marker_notify(self, marker, gproperty):
"""
Recalculates marker points on position changes
"""
if gproperty.name in ('anchor', 'position'):
self._points[marker] = self._get_points(marker)
self.__progressbar.queue_draw()
def on_provider_added(self, marker):
"""
Calculates points after marker addition
:param marker: the new marker
:type marker: :class:`Marker`
"""
notify_id = marker.connect('notify', self.on_marker_notify)
setattr(marker, '%s_notify_id' % id(self), notify_id)
self._points[marker] = self._get_points(marker)
self.__progressbar.queue_draw()
def on_provider_removed(self, marker):
"""
Removes points from internal cache
:param marker: the marker
:type marker: :class:`Marker`
"""
notify_id = getattr(marker, '%s_notify_id' % id(self))
if notify_id is not None:
marker.disconnect(notify_id)
del self._points[marker]
self.__progressbar.queue_draw()
# HACK: These methods implement the PlaybackAdapter interface (passing the
# calls to the internal progress bar, which is an actual PlaybackAdapter).
# This class only pretends to be a PlaybackAdapter because we don't want
# the mixin behavior here.
def on_playback_track_start(self, event, player, track):
self.__progressbar.on_playback_track_start(event, player, track)
def on_playback_track_end(self, event, player, track):
self.__progressbar.on_playback_track_end(event, player, track)
def on_playback_player_end(self, event, player, track):
self.__progressbar.on_playback_player_end(event, player, track)
def on_playback_toggle_pause(self, event, player, track):
self.__progressbar.on_playback_toggle_pause(event, player, track)
def on_playback_error(self, event, player, message):
self.__progressbar.on_playback_error(event, player, message)
class ProgressBarContextMenu(menu.ProviderMenu):
"""
Progress bar specific context menu
"""
def __init__(self, progressbar):
"""
:param progressbar: the progress bar
:type progressbar: :class:`PlaybackProgressBar`
"""
menu.ProviderMenu.__init__(self,
'progressbar-context-menu', progressbar)
self._position = -1
def get_context(self):
"""
Retrieves the context
"""
context = {'current-position': self._position}
return context
def popup(self, event):
"""
Pops up the menu
:param event: an event
:type event: :class:`Gdk.Event`
"""
self._position = event.x / self._parent.get_allocation().width
menu.ProviderMenu.popup(self, event)
class MarkerContextMenu(menu.ProviderMenu):
"""
Marker specific context menu
"""
def __init__(self, markerbar):
"""
:param markerbar: the marker capable progress bar
:type markerbar: :class:`SeekProgressBar`
"""
menu.ProviderMenu.__init__(self,
'playback-marker-context-menu', markerbar)
self._markers = ()
self._position = -1
def regenerate_menu(self):
"""
Builds the menu, with submenu if appropriate
"""
for marker in self._markers:
label = self._parent.get_label(marker)
if label is None:
continue
markup_data = Pango.parse_markup(label, -1, '0')
label_item = Gtk.MenuItem.new_with_mnemonic(markup_data[2])
self.append(label_item)
if len(self._markers) > 1:
item_menu = Gtk.Menu()
label_item.set_submenu(item_menu)
else:
item_menu = self
label_item.set_sensitive(False)
self.append(Gtk.SeparatorMenuItem())
context = {
'current-marker': marker,
'selected-markers': self._markers,
'current-position': self._position
}
for item in self._items:
i = item.factory(self, self._parent, context)
item_menu.append(i)
self.show_all()
def popup(self, event, markers):
"""
Pops up the menu
:param event: an event
:type event: :class:`Gdk.Event`
:param markers: (m1, m2, ...)
:type markers: (:class:`Marker`, ...)
"""
self._markers = markers
self._position = event.x / self._parent.get_allocation().width
menu.ProviderMenu.popup(self, event)
class MoveMarkerMenuItem(menu.MenuItem):
"""
Menu item allowing for movement of markers
"""
def __init__(self, name, after, display_name=_('Move'),
icon_name=None):
menu.MenuItem.__init__(self, name, None, after)
self._parent = None
self._display_name = display_name
self._icon_name = icon_name
self._marker = None
self._reset_position = -1
def factory(self, menu, parent, context):
"""
Generates the menu item
"""
self._parent = parent
item = Gtk.ImageMenuItem.new_with_mnemonic(self._display_name)
if self._icon_name is not None:
item.set_image(Gtk.Image.new_from_icon_name(
self._icon_name, Gtk.IconSize.MENU))
item.connect('activate', self.on_activate, parent, context)
parent.connect('button-press-event',
self.on_parent_button_press_event)
parent.connect('motion-notify-event',
self.on_parent_motion_notify_event)
parent.connect('focus-out-event',
self.on_parent_focus_out_event)
return item
def move_begin(self, marker):
"""
Captures the current marker for movement
:param marker: the marker
:type marker: :class:`Marker`
:returns: whether a marker could be captured
:rtype: bool
"""
self.move_cancel()
if marker is not None:
self._marker = marker
self._marker.props.state = Gtk.StateType.ACTIVE
self._reset_position = marker.props.position
self._parent.props.window.set_cursor(
Gdk.Cursor.new(Gdk.CursorType.SB_H_DOUBLE_ARROW))
return True
return False
def move_update(self, position):
"""
Moves the marker
:param position: the current marker position
:type position: float
:returns: whether a marker could be moved
:rtype: bool
"""
if self._marker is not None:
self._marker.props.position = position
label = self._parent.get_label(self._marker)
self._parent.set_tooltip_markup(label)
return True
return False
def move_finish(self):
"""
Finishes movement and releases the marker
:returns: whether the movement could be finished
:rtype: bool
"""
if self._marker is not None:
self._marker.props.state = Gtk.StateType.NORMAL
self._marker = None
self._reset_position = -1
self._parent.props.window.set_cursor(None)
return True
return False
def move_cancel(self):
"""
Cancels movement and releases the marker
:returns: whether the movement could be cancelled
:rtype: bool
"""
if self._marker is not None:
self._marker.props.position = self._reset_position
self._marker.props.state = Gtk.StateType.NORMAL
self._marker = None
self._reset_position = -1
self._parent.props.window.set_cursor(None)
return True
return False
def on_activate(self, widget, parent, context):
"""
Starts movement of markers
"""
self.move_begin(context.get('current-marker', None))
def on_parent_button_press_event(self, widget, event):
"""
Finishes or cancels movement of markers
"""
if event.button == 1:
return self.move_finish()
elif event.button == 3:
return self.move_cancel()
return False
def on_parent_motion_notify_event(self, widget, event):
"""
Moves markers
"""
position = event.x / widget.get_allocation().width
return self.move_update(position)
def on_parent_focus_out_event(self, widget, event):
"""
Cancels movement of markers
"""
self.move_cancel()
class NewMarkerMenuItem(MoveMarkerMenuItem):
"""
Menu item allowing for insertion
and instant movement of a marker
"""
def __init__(self, name, after):
MoveMarkerMenuItem.__init__(self, name, after,
_('New Marker'), 'list-add')
def move_cancel(self):
"""
Cancels movement and insertion of the marker
:param parent: the parent
:type parent: :class:`SeekProgressBar`
:returns: whether the movement could be cancelled
:rtype: bool
"""
if self._marker is not None:
remove_marker(self._marker)
self._marker = None
self._reset_position = -1
self._parent.props.window.set_cursor(None)
return True
return False
def on_activate(self, widget, parent, context):
"""
Inserts a new marker and starts movement
"""
context['current-marker'] = add_marker(context['current-position'])
MoveMarkerMenuItem.on_activate(self, widget, parent, context)
# XXX: Example implementation only
# Bookmarks: "Add bookmark" (1 new marker)
# A-B-Repeat: "Repeat" (2 new marker, NW, NE)
def __create_progressbar_context_menu():
items = []
items.append(NewMarkerMenuItem('new-marker', []))
for item in items:
providers.register('progressbar-context-menu', item)
__create_progressbar_context_menu()
def __create_marker_context_menu():
items = []
def on_jumpto_item_activate(widget, name, parent, context):
#parent.seek(context['current-marker'].props.position)
position = context['current-marker'].props.position
player.PLAYER.set_progress(position)
def on_remove_item_activate(widget, name, parent, context):
providers.unregister('playback-markers', context['current-marker'])
items.append(menu.simple_menu_item('jumpto-marker',
[], _("_Jump to"), 'go-jump', on_jumpto_item_activate))
items.append(MoveMarkerMenuItem('move-marker',
[items[-1].name]))
items.append(menu.simple_menu_item('remove-marker',
[items[-1].name], _("_Remove Marker"), 'list-remove',
on_remove_item_activate))
for item in items:
providers.register('playback-marker-context-menu', item)
__create_marker_context_menu()
@GtkTemplate('ui', 'widgets', 'volume_control.ui')
class VolumeControl(Gtk.Box):
"""
Encapsulates a button and a slider to
control the volume indicating the current
status via icon and tooltip
"""
__gtype_name__ = 'VolumeControl'
button, \
slider, \
button_image, \
slider_adjustment = GtkTemplate.Child.widgets(4)
def __init__(self, player):
Gtk.Box.__init__(self)
self.init_template()
self.button.add_events(Gdk.EventMask.KEY_PRESS_MASK | Gdk.EventMask.SCROLL_MASK)
self.__volume_setting = '%s/volume' % player._name
self.restore_volume = settings.get_option(self.__volume_setting, 1)
self.icon_names = ['low', 'medium', 'high']
self.__update(self.restore_volume)
event.add_ui_callback(self.on_option_set, '%s_option_set' % player._name)
def __update(self, volume):
"""
Sets the volume level indicator
"""
icon_name = 'audio-volume-muted'
tooltip = _('Muted')
if volume > 0:
i = clamp(int(round(volume * 2)), 0, len(self.icon_names) - 1)
icon_name = 'audio-volume-%s' % self.icon_names[i]
#TRANSLATORS: Volume percentage
tooltip = _('%d%%') % (volume * 100)
else:
volume = 0
if volume == 1.0:
tooltip = _('Full Volume')
if volume > 0:
self.button.set_active(False)
self.button_image.set_from_icon_name(icon_name, Gtk.IconSize.BUTTON)
self.button.set_tooltip_text(tooltip)
self.slider.set_value(volume)
self.slider.set_tooltip_text(tooltip)
@GtkTemplate.Callback
def on_scroll_event(self, widget, event):
"""
Changes the volume on scrolling
"""
page_increment = self.slider_adjustment.props.page_increment
step_increment = self.slider_adjustment.props.step_increment
value = self.slider.get_value()
if event.direction == Gdk.ScrollDirection.DOWN:
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
self.slider.set_value(value - page_increment)
else:
self.slider.set_value(value - step_increment)
return True
elif event.direction == Gdk.ScrollDirection.UP:
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
self.slider.set_value(value + page_increment)
else:
self.slider.set_value(value + step_increment)
return True
elif event.direction == Gdk.ScrollDirection.SMOOTH:
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
self.slider.set_value(value - event.delta_y * page_increment)
else:
self.slider.set_value(value - event.delta_y * step_increment)
return True
return False
@GtkTemplate.Callback
def on_button_toggled(self, button):
"""
Mutes or unmutes the volume
"""
if button.get_active():
self.restore_volume = settings.get_option(self.__volume_setting, 1)
volume = 0
else:
volume = self.restore_volume
if self.restore_volume > 0:
settings.set_option(self.__volume_setting, volume)
@GtkTemplate.Callback
def on_slider_value_changed(self, slider):
"""
Stores the preferred volume
"""
settings.set_option(self.__volume_setting, slider.get_value())
@GtkTemplate.Callback
def on_slider_key_press_event(self, slider, event):
"""
Changes the volume on key press
while the slider is focused
"""
page_increment = slider.get_adjustment().props.page_increment
step_increment = slider.get_adjustment().props.step_increment
value = slider.get_value()
if event.keyval == Gdk.KEY_Down:
slider.set_value(value - step_increment)
return True
elif event.keyval == Gdk.KEY_Page_Down:
slider.set_value(value - page_increment)
return True
elif event.keyval == Gdk.KEY_Up:
slider.set_value(value + step_increment)
return True
elif event.keyval == Gdk.KEY_Page_Up:
slider.set_value(value + page_increment)
return True
return False
def on_option_set(self, event, sender, option):
"""
Updates the volume indication
"""
if option == self.__volume_setting:
self.__update(settings.get_option(option, 1))
def playpause(player):
if player.get_state() in ('playing', 'paused'):
player.toggle_pause()
else:
from xlgui import main
page = main.get_selected_playlist()
if page:
pl = page.playlist
if len(pl) == 0:
return
try:
idx = page.view.get_selected_paths()[0][0]
except IndexError:
idx = 0
player.queue.set_current_playlist(pl)
pl.current_position = idx
player.queue.play(track=pl.current)
def PlayPauseMenuItem(name, player, after):
def factory(name, after, player):
if player.is_playing():
icon_name = 'media-playback-pause'
label = _("_Pause")
else:
icon_name = 'media-playback-start'
label = _("P_lay")
return menu.simple_menu_item(name, after, label, icon_name,
callback=lambda *args: playpause(player) )
return factory(name, after, player)
def NextMenuItem(name, player, after):
return menu.simple_menu_item(name, after, _("_Next Track"), 'media-skip-forward',
callback=lambda *args: player.queue.next() )
def PrevMenuItem(name, player, after):
return menu.simple_menu_item(name, after, _("_Previous Track"), 'media-skip-backward',
callback=lambda *args: player.queue.prev() )
def StopMenuItem(name, player, after):
return menu.simple_menu_item(name, after, _("_Stop"), 'media-playback-stop',
callback=lambda *args: player.stop() )
| gpl-2.0 | -8,296,390,029,054,638,000 | 31.679109 | 106 | 0.551933 | false |
cboling/SDNdbg | docs/old-stuff/pydzcvr/doc/neutronclient/neutron/v2_0/vpn/ipsec_site_connection.py | 1 | 7010 | # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard.
#
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronv20
from neutronclient.neutron.v2_0.vpn import utils as vpn_utils
from neutronclient.openstack.common.gettextutils import _
from neutronclient.openstack.common import jsonutils
def _format_peer_cidrs(ipsec_site_connection):
try:
return '\n'.join([jsonutils.dumps(cidrs) for cidrs in
ipsec_site_connection['peer_cidrs']])
except (TypeError, KeyError):
return ''
class ListIPsecSiteConnection(neutronv20.ListCommand):
"""List IPsec site connections that belong to a given tenant."""
resource = 'ipsec_site_connection'
_formatters = {'peer_cidrs': _format_peer_cidrs}
list_columns = [
'id', 'name', 'peer_address', 'peer_cidrs', 'route_mode',
'auth_mode', 'status']
pagination_support = True
sorting_support = True
class ShowIPsecSiteConnection(neutronv20.ShowCommand):
"""Show information of a given IPsec site connection."""
resource = 'ipsec_site_connection'
class CreateIPsecSiteConnection(neutronv20.CreateCommand):
"""Create an IPsec site connection."""
resource = 'ipsec_site_connection'
def add_known_arguments(self, parser):
parser.add_argument(
'--admin-state-down',
default=True, action='store_false',
help=_('Set admin state up to false.'))
parser.add_argument(
'--name',
help=_('Set friendly name for the connection.'))
parser.add_argument(
'--description',
help=_('Set a description for the connection.'))
parser.add_argument(
'--mtu',
default='1500',
help=_('MTU size for the connection, default:1500'))
parser.add_argument(
'--initiator',
default='bi-directional', choices=['bi-directional',
'response-only'],
help=_('Initiator state in lowercase, default:bi-directional'))
parser.add_argument(
'--dpd',
metavar="action=ACTION,interval=INTERVAL,timeout=TIMEOUT",
type=utils.str2dict,
help=vpn_utils.dpd_help("IPsec connection."))
parser.add_argument(
'--vpnservice-id', metavar='VPNSERVICE',
required=True,
help=_('VPN service instance ID associated with this connection.'))
parser.add_argument(
'--ikepolicy-id', metavar='IKEPOLICY',
required=True,
help=_('IKE policy ID associated with this connection.'))
parser.add_argument(
'--ipsecpolicy-id', metavar='IPSECPOLICY',
required=True,
help=_('IPsec policy ID associated with this connection.'))
parser.add_argument(
'--peer-address',
required=True,
help=_('Peer gateway public IPv4/IPv6 address or FQDN.'))
parser.add_argument(
'--peer-id',
required=True,
help=_('Peer router identity for authentication. Can be '
'IPv4/IPv6 address, e-mail address, key id, or FQDN.'))
parser.add_argument(
'--peer-cidr',
action='append', dest='peer_cidrs',
required=True,
help=_('Remote subnet(s) in CIDR format.'))
parser.add_argument(
'--psk',
required=True,
help=_('Pre-shared key string.'))
def args2body(self, parsed_args):
_vpnservice_id = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'vpnservice',
parsed_args.vpnservice_id)
_ikepolicy_id = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'ikepolicy',
parsed_args.ikepolicy_id)
_ipsecpolicy_id = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'ipsecpolicy',
parsed_args.ipsecpolicy_id)
if int(parsed_args.mtu) < 68:
message = _("Invalid MTU value: MTU must be "
"greater than or equal to 68")
raise exceptions.CommandError(message)
body = {'ipsec_site_connection': {
'vpnservice_id': _vpnservice_id,
'ikepolicy_id': _ikepolicy_id,
'ipsecpolicy_id': _ipsecpolicy_id,
'peer_address': parsed_args.peer_address,
'peer_id': parsed_args.peer_id,
'mtu': parsed_args.mtu,
'initiator': parsed_args.initiator,
'psk': parsed_args.psk,
'admin_state_up': parsed_args.admin_state_down,
}, }
if parsed_args.name:
body['ipsec_site_connection'].update(
{'name': parsed_args.name}
)
if parsed_args.description:
body['ipsec_site_connection'].update(
{'description': parsed_args.description}
)
if parsed_args.tenant_id:
body['ipsec_site_connection'].update(
{'tenant_id': parsed_args.tenant_id}
)
if parsed_args.dpd:
vpn_utils.validate_dpd_dict(parsed_args.dpd)
body['ipsec_site_connection'].update({'dpd': parsed_args.dpd})
if parsed_args.peer_cidrs:
body['ipsec_site_connection'][
'peer_cidrs'] = parsed_args.peer_cidrs
return body
class UpdateIPsecSiteConnection(neutronv20.UpdateCommand):
"""Update a given IPsec site connection."""
resource = 'ipsec_site_connection'
def add_known_arguments(self, parser):
parser.add_argument(
'--dpd',
metavar="action=ACTION,interval=INTERVAL,timeout=TIMEOUT",
type=utils.str2dict,
help=vpn_utils.dpd_help("IPsec connection."))
def args2body(self, parsed_args):
body = {'ipsec_site_connection': {
}, }
if parsed_args.dpd:
vpn_utils.validate_dpd_dict(parsed_args.dpd)
body['ipsec_site_connection'].update({'dpd': parsed_args.dpd})
return body
class DeleteIPsecSiteConnection(neutronv20.DeleteCommand):
"""Delete a given IPsec site connection."""
resource = 'ipsec_site_connection'
| apache-2.0 | -289,869,664,630,373,570 | 36.688172 | 79 | 0.59786 | false |
danbob123/ta | ta.py | 2 | 8802 | # -*- coding: utf-8 -*-
from __future__ import division
from functools import wraps
import numpy as np
from pandas import DataFrame, Series
from pandas.stats import moments
def series_indicator(col):
def inner_series_indicator(f):
@wraps(f)
def wrapper(s, *args, **kwargs):
if isinstance(s, DataFrame):
s = s[col]
return f(s, *args, **kwargs)
return wrapper
return inner_series_indicator
def _wilder_sum(s, n):
s = s.dropna()
nf = (n - 1) / n
ws = [np.nan]*(n - 1) + [s[n - 1] + nf*sum(s[:n - 1])]
for v in s[n:]:
ws.append(v + ws[-1]*nf)
return Series(ws, index=s.index)
@series_indicator('high')
def hhv(s, n):
return moments.rolling_max(s, n)
@series_indicator('low')
def llv(s, n):
return moments.rolling_min(s, n)
@series_indicator('close')
def ema(s, n, wilder=False):
span = n if not wilder else 2*n - 1
return moments.ewma(s, span=span)
@series_indicator('close')
def macd(s, nfast=12, nslow=26, nsig=9, percent=True):
fast, slow = ema(s, nfast), ema(s, nslow)
if percent:
macd = 100*(fast / slow - 1)
else:
macd = fast - slow
sig = ema(macd, nsig)
hist = macd - sig
return DataFrame(dict(macd=macd, signal=sig, hist=hist,
fast=fast, slow=slow))
def aroon(s, n=25):
up = 100 * moments.rolling_apply(s.high, n + 1, lambda x: x.argmax()) / n
dn = 100 * moments.rolling_apply(s.low, n + 1, lambda x: x.argmin()) / n
return DataFrame(dict(up=up, down=dn))
@series_indicator('close')
def rsi(s, n=14):
diff = s.diff()
which_dn = diff < 0
up, dn = diff, diff*0
up[which_dn], dn[which_dn] = 0, -up[which_dn]
emaup = ema(up, n, wilder=True)
emadn = ema(dn, n, wilder=True)
return 100 * emaup/(emaup + emadn)
def stoch(s, nfastk=14, nfullk=3, nfulld=3):
if not isinstance(s, DataFrame):
s = DataFrame(dict(high=s, low=s, close=s))
hmax, lmin = hhv(s, nfastk), llv(s, nfastk)
fastk = 100 * (s.close - lmin)/(hmax - lmin)
fullk = moments.rolling_mean(fastk, nfullk)
fulld = moments.rolling_mean(fullk, nfulld)
return DataFrame(dict(fastk=fastk, fullk=fullk, fulld=fulld))
@series_indicator('close')
def dtosc(s, nrsi=13, nfastk=8, nfullk=5, nfulld=3):
srsi = stoch(rsi(s, nrsi), nfastk, nfullk, nfulld)
return DataFrame(dict(fast=srsi.fullk, slow=srsi.fulld))
def atr(s, n=14):
cs = s.close.shift(1)
tr = s.high.combine(cs, max) - s.low.combine(cs, min)
return ema(tr, n, wilder=True)
def cci(s, n=20, c=0.015):
if isinstance(s, DataFrame):
s = s[['high', 'low', 'close']].mean(axis=1)
mavg = moments.rolling_mean(s, n)
mdev = moments.rolling_apply(s, n, lambda x: np.fabs(x - x.mean()).mean())
return (s - mavg)/(c * mdev)
def cmf(s, n=20):
clv = (2*s.close - s.high - s.low) / (s.high - s.low)
vol = s.volume
return moments.rolling_sum(clv*vol, n) / moments.rolling_sum(vol, n)
def force(s, n=2):
return ema(s.close.diff()*s.volume, n)
@series_indicator('close')
def kst(s, r1=10, r2=15, r3=20, r4=30, n1=10, n2=10, n3=10, n4=15, nsig=9):
rocma1 = moments.rolling_mean(s / s.shift(r1) - 1, n1)
rocma2 = moments.rolling_mean(s / s.shift(r2) - 1, n2)
rocma3 = moments.rolling_mean(s / s.shift(r3) - 1, n3)
rocma4 = moments.rolling_mean(s / s.shift(r4) - 1, n4)
kst = 100*(rocma1 + 2*rocma2 + 3*rocma3 + 4*rocma4)
sig = moments.rolling_mean(kst, nsig)
return DataFrame(dict(kst=kst, signal=sig))
def ichimoku(s, n1=9, n2=26, n3=52):
conv = (hhv(s, n1) + llv(s, n1)) / 2
base = (hhv(s, n2) + llv(s, n2)) / 2
spana = (conv + base) / 2
spanb = (hhv(s, n3) + llv(s, n3)) / 2
return DataFrame(dict(conv=conv, base=base, spana=spana.shift(n2),
spanb=spanb.shift(n2), lspan=s.close.shift(-n2)))
def ultimate(s, n1=7, n2=14, n3=28):
cs = s.close.shift(1)
bp = s.close - s.low.combine(cs, min)
tr = s.high.combine(cs, max) - s.low.combine(cs, min)
avg1 = moments.rolling_sum(bp, n1) / moments.rolling_sum(tr, n1)
avg2 = moments.rolling_sum(bp, n2) / moments.rolling_sum(tr, n2)
avg3 = moments.rolling_sum(bp, n3) / moments.rolling_sum(tr, n3)
return 100*(4*avg1 + 2*avg2 + avg3) / 7
def auto_envelope(s, nema=22, nsmooth=100, ndev=2.7):
sema = ema(s.close, nema)
mdiff = s[['high','low']].sub(sema, axis=0).abs().max(axis=1)
csize = moments.ewmstd(mdiff, nsmooth)*ndev
return DataFrame(dict(ema=sema, lenv=sema - csize, henv=sema + csize))
@series_indicator('close')
def bbands(s, n=20, ndev=2):
mavg = moments.rolling_mean(s, n)
mstd = moments.rolling_std(s, n)
hband = mavg + ndev*mstd
lband = mavg - ndev*mstd
return DataFrame(dict(ma=mavg, lband=lband, hband=hband))
def safezone(s, position, nmean=10, npen=2.0, nagg=3):
if isinstance(s, DataFrame):
s = s.low if position == 'long' else s.high
sgn = -1.0 if position == 'long' else 1.0
# Compute the average upside/downside penetration
pen = moments.rolling_apply(
sgn*s.diff(), nmean,
lambda x: x[x > 0].mean() if (x > 0).any() else 0
)
stop = s + sgn*npen*pen
return hhv(stop, nagg) if position == 'long' else llv(stop, nagg)
def sar(s, af=0.02, amax=0.2):
high, low = s.high, s.low
# Starting values
sig0, xpt0, af0 = True, high[0], af
sar = [low[0] - (high - low).std()]
for i in xrange(1, len(s)):
sig1, xpt1, af1 = sig0, xpt0, af0
lmin = min(low[i - 1], low[i])
lmax = max(high[i - 1], high[i])
if sig1:
sig0 = low[i] > sar[-1]
xpt0 = max(lmax, xpt1)
else:
sig0 = high[i] >= sar[-1]
xpt0 = min(lmin, xpt1)
if sig0 == sig1:
sari = sar[-1] + (xpt1 - sar[-1])*af1
af0 = min(amax, af1 + af)
if sig0:
af0 = af0 if xpt0 > xpt1 else af1
sari = min(sari, lmin)
else:
af0 = af0 if xpt0 < xpt1 else af1
sari = max(sari, lmax)
else:
af0 = af
sari = xpt0
sar.append(sari)
return Series(sar, index=s.index)
def adx(s, n=14):
cs = s.close.shift(1)
tr = s.high.combine(cs, max) - s.low.combine(cs, min)
trs = _wilder_sum(tr, n)
up = s.high - s.high.shift(1)
dn = s.low.shift(1) - s.low
pos = ((up > dn) & (up > 0)) * up
neg = ((dn > up) & (dn > 0)) * dn
dip = 100 * _wilder_sum(pos, n) / trs
din = 100 * _wilder_sum(neg, n) / trs
dx = 100 * np.abs((dip - din)/(dip + din))
adx = ema(dx, n, wilder=True)
return DataFrame(dict(adx=adx, dip=dip, din=din))
def chandelier(s, position, n=22, npen=3):
if position == 'long':
return hhv(s, n) - npen*atr(s, n)
else:
return llv(s, n) + npen*atr(s, n)
def vortex(s, n=14):
ss = s.shift(1)
tr = s.high.combine(ss.close, max) - s.low.combine(ss.close, min)
trn = moments.rolling_sum(tr, n)
vmp = np.abs(s.high - ss.low)
vmm = np.abs(s.low - ss.high)
vip = moments.rolling_sum(vmp, n) / trn
vin = moments.rolling_sum(vmm, n) / trn
return DataFrame(dict(vin=vin, vip=vip))
@series_indicator('close')
def gmma(s, nshort=[3, 5, 8, 10, 12, 15],
nlong=[30, 35, 40, 45, 50, 60]):
short = {str(n): ema(s, n) for n in nshort}
long = {str(n): ema(s, n) for n in nlong}
return DataFrame(short), DataFrame(long)
def zigzag(s, pct=5):
ut = 1 + pct / 100
dt = 1 - pct / 100
ld = s.index[0]
lp = s.close[ld]
tr = None
zzd, zzp = [ld], [lp]
for ix, ch, cl in zip(s.index, s.high, s.low):
# No initial trend
if tr is None:
if ch / lp > ut:
tr = 1
elif cl / lp < dt:
tr = -1
# Trend is up
elif tr == 1:
# New high
if ch > lp:
ld, lp = ix, ch
# Reversal
elif cl / lp < dt:
zzd.append(ld)
zzp.append(lp)
tr, ld, lp = -1, ix, cl
# Trend is down
else:
# New low
if cl < lp:
ld, lp = ix, cl
# Reversal
elif ch / lp > ut:
zzd.append(ld)
zzp.append(lp)
tr, ld, lp = 1, ix, ch
# Extrapolate the current trend
if zzd[-1] != s.index[-1]:
zzd.append(s.index[-1])
if tr is None:
zzp.append(s.close[zzd[-1]])
elif tr == 1:
zzp.append(s.high[zzd[-1]])
else:
zzp.append(s.low[zzd[-1]])
return Series(zzp, index=zzd)
| mit | 5,374,225,152,968,924,000 | 24.365994 | 78 | 0.541241 | false |
TuSimple/mxnet | example/bi-lstm-sort/rnn_model.py | 19 | 2920 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C0111,too-many-arguments,too-many-instance-attributes,too-many-locals,redefined-outer-name,fixme
# pylint: disable=superfluous-parens, no-member, invalid-name
import sys
import numpy as np
import mxnet as mx
from lstm import LSTMState, LSTMParam, lstm, bi_lstm_inference_symbol
class BiLSTMInferenceModel(object):
def __init__(self,
seq_len,
input_size,
num_hidden,
num_embed,
num_label,
arg_params,
ctx=mx.cpu(),
dropout=0.):
self.sym = bi_lstm_inference_symbol(input_size, seq_len,
num_hidden,
num_embed,
num_label,
dropout)
batch_size = 1
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(2)]
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(2)]
data_shape = [("data", (batch_size, seq_len, ))]
input_shapes = dict(init_c + init_h + data_shape)
self.executor = self.sym.simple_bind(ctx=mx.cpu(), **input_shapes)
for key in self.executor.arg_dict.keys():
if key in arg_params:
arg_params[key].copyto(self.executor.arg_dict[key])
state_name = []
for i in range(2):
state_name.append("l%d_init_c" % i)
state_name.append("l%d_init_h" % i)
self.states_dict = dict(zip(state_name, self.executor.outputs[1:]))
self.input_arr = mx.nd.zeros(data_shape[0][1])
def forward(self, input_data, new_seq=False):
if new_seq == True:
for key in self.states_dict.keys():
self.executor.arg_dict[key][:] = 0.
input_data.copyto(self.executor.arg_dict["data"])
self.executor.forward()
for key in self.states_dict.keys():
self.states_dict[key].copyto(self.executor.arg_dict[key])
prob = self.executor.outputs[0].asnumpy()
return prob
| apache-2.0 | 6,278,898,827,945,955,000 | 39 | 114 | 0.594178 | false |
rosswhitfield/mantid | Framework/PythonInterface/test/python/plugins/algorithms/LoadWANDSCDTest.py | 3 | 2825 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.simpleapi import LoadWANDSCD
import unittest
class LoadWANDTest(unittest.TestCase):
def test(self):
LoadWANDTest_ws = LoadWANDSCD('HB2C_7000.nxs.h5,HB2C_7001.nxs.h5')
self.assertTrue(LoadWANDTest_ws)
self.assertEqual(LoadWANDTest_ws.getNumDims(), 3)
self.assertEqual(LoadWANDTest_ws.getNPoints(), 1966080*2)
self.assertEqual(LoadWANDTest_ws.getSignalArray().max(), 7)
d0 = LoadWANDTest_ws.getDimension(0)
self.assertEqual(d0.name, 'y')
self.assertEqual(d0.getNBins(), 512)
self.assertEqual(d0.getMinimum(), 0.5)
self.assertEqual(d0.getMaximum(), 512.5)
d1 = LoadWANDTest_ws.getDimension(1)
self.assertEqual(d1.name, 'x')
self.assertEqual(d1.getNBins(), 3840)
self.assertEqual(d1.getMinimum(), 0.5)
self.assertEqual(d1.getMaximum(), 3840.5)
d2 = LoadWANDTest_ws.getDimension(2)
self.assertEqual(d2.name, 'scanIndex')
self.assertEqual(d2.getNBins(), 2)
self.assertEqual(d2.getMinimum(), 0.5)
self.assertEqual(d2.getMaximum(), 2.5)
self.assertEqual(LoadWANDTest_ws.getNumExperimentInfo(), 1)
self.assertEqual(LoadWANDTest_ws.getExperimentInfo(0).getInstrument().getName(), 'WAND')
run = LoadWANDTest_ws.getExperimentInfo(0).run()
s1 = run.getProperty('s1').value
self.assertEqual(len(s1), 2)
self.assertEqual(s1[0], -142.6)
self.assertEqual(s1[1], -142.5)
run_number = run.getProperty('run_number').value
self.assertEqual(len(run_number), 2)
self.assertEqual(run_number[0], 7000)
self.assertEqual(run_number[1], 7001)
monitor_count=run.getProperty('monitor_count').value
self.assertEqual(len(monitor_count), 2)
self.assertEqual(monitor_count[0], 907880)
self.assertEqual(monitor_count[1], 908651)
duration = run.getProperty('duration').value
self.assertEqual(len(duration), 2)
self.assertAlmostEqual(duration[0], 40.05, 5)
self.assertAlmostEqual(duration[1], 40.05, 5)
# test that the goniometer has been set correctly
self.assertEqual(run.getNumGoniometers(), 2)
self.assertAlmostEqual(run.getGoniometer(0).getEulerAngles('YZY')[0], -142.6) # s1 from HB2C_7000
self.assertAlmostEqual(run.getGoniometer(1).getEulerAngles('YZY')[0], -142.5) # s1 from HB2C_7001
LoadWANDTest_ws.delete()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -1,543,049,708,522,734,000 | 40.544118 | 105 | 0.664425 | false |
lkundrak/scraperwiki | web/codewiki/migrations/0013_auto__add_codepermission.py | 1 | 13483 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CodePermission'
db.create_table('codewiki_codepermission', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.related.ForeignKey')(related_name='permissions', to=orm['codewiki.Code'])),
('can_read', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_write', self.gf('django.db.models.fields.BooleanField')(default=False)),
('permitted_object', self.gf('django.db.models.fields.related.ForeignKey')(related_name='permitted', to=orm['codewiki.Code'])),
))
db.send_create_signal('codewiki', ['CodePermission'])
def backwards(self, orm):
# Deleting model 'CodePermission'
db.delete_table('codewiki_codepermission')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'codewiki.code': {
'Meta': {'object_name': 'Code'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'forked_from': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['codewiki.Code']", 'null': 'True', 'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isstartup': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'istutorial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'python'", 'max_length': '32'}),
'line_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'privacy_status': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '32'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'relations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'relations_rel_+'", 'blank': 'True', 'to': "orm['codewiki.Code']"}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'ok'", 'max_length': '10', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Untitled'", 'max_length': '100'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'through': "orm['codewiki.UserCodeRole']", 'symmetrical': 'False'}),
'wiki_type': ('django.db.models.fields.CharField', [], {'default': "'scraper'", 'max_length': '32'})
},
'codewiki.codecommitevent': {
'Meta': {'object_name': 'CodeCommitEvent'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.IntegerField', [], {})
},
'codewiki.codepermission': {
'Meta': {'object_name': 'CodePermission'},
'can_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'code': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'permissions'", 'to': "orm['codewiki.Code']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permitted_object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'permitted'", 'to': "orm['codewiki.Code']"})
},
'codewiki.codesetting': {
'Meta': {'object_name': 'CodeSetting'},
'code': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'settings'", 'to': "orm['codewiki.Code']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'last_edit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_edited': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'codewiki.domainscrape': {
'Meta': {'object_name': 'DomainScrape'},
'bytes_scraped': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pages_scraped': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'scraper_run_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['codewiki.ScraperRunEvent']"})
},
'codewiki.scraper': {
'Meta': {'object_name': 'Scraper', '_ormbases': ['codewiki.Code']},
'code_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['codewiki.Code']", 'unique': 'True', 'primary_key': 'True'}),
'has_geo': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_temporal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_run': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'Unknown'", 'max_length': '100', 'blank': 'True'}),
'license_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'record_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'run_interval': ('django.db.models.fields.IntegerField', [], {'default': '86400'})
},
'codewiki.scrapermetadata': {
'Meta': {'object_name': 'ScraperMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'run_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'scraper': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['codewiki.Scraper']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'codewiki.scraperrunevent': {
'Meta': {'object_name': 'ScraperRunEvent'},
'exception_message': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'first_url_scraped': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'output': ('django.db.models.fields.TextField', [], {}),
'pages_scraped': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pid': ('django.db.models.fields.IntegerField', [], {}),
'records_produced': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'run_ended': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'run_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'run_started': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'scraper': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['codewiki.Scraper']"})
},
'codewiki.usercodeediting': {
'Meta': {'object_name': 'UserCodeEditing'},
'closedsince': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['codewiki.Code']", 'null': 'True'}),
'editingsince': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'runningsince': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'twisterclientnumber': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'twisterscraperpriority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'codewiki.usercoderole': {
'Meta': {'object_name': 'UserCodeRole'},
'code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['codewiki.Code']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'codewiki.view': {
'Meta': {'object_name': 'View', '_ormbases': ['codewiki.Code']},
'code_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['codewiki.Code']", 'unique': 'True', 'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'render_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['codewiki']
| agpl-3.0 | -6,763,019,195,457,940,000 | 74.747191 | 182 | 0.552844 | false |
denizs/swampdragon | tests/test_model_serializer_deserialize.py | 3 | 2539 | from swampdragon.serializers.model_serializer import ModelSerializer
from swampdragon.testing.dragon_testcase import DragonTestCase
from .models import TextModel, SDModel
from datetime import datetime
from django.db import models
# to make sure none of the ModelSerializer variables are clobbering the data
MODEL_KEYWORDS = ('data', )
# TODO: support the rest of these field names
# MODEL_KEYWORDS = ('data', 'opts', 'initial', 'base_fields', 'm2m_fields', 'related_fields', 'errors')
class KeywordModel(SDModel):
data = models.TextField()
# TODO: support the rest of these field names
# opts = models.TextField()
# initial = models.TextField()
# base_fields = models.TextField()
# m2m_fields = models.TextField()
# related_fields = models.TextField()
# errors = models.TextField()
class KeywordModelSerializer(ModelSerializer):
class Meta:
model = KeywordModel
publish_fields = MODEL_KEYWORDS
update_fields = MODEL_KEYWORDS
class DateModel(SDModel):
date = models.DateTimeField()
class DateModelSerializer(ModelSerializer):
class Meta:
model = DateModel
publish_fields = ('date')
update_fields = ('date')
class TextModelSerializer(ModelSerializer):
class Meta:
model = TextModel
publish_fields = ('text')
update_fields = ('text')
class TestModelSerializer(DragonTestCase):
def test_deserialize_model(self):
data = {'text': 'foo'}
serializer = TextModelSerializer(data)
model_instance = serializer.save()
self.assertEqual(model_instance.text, data['text'])
def test_passing_invalid_data(self):
foo = 'text'
with self.assertRaises(Exception):
TextModelSerializer(foo)
def test_ignore_non_model_fields(self):
data = {'text': 'foo', 'random_field': 'val'}
serializer = TextModelSerializer(data)
model_instance = serializer.deserialize()
self.assertEqual(model_instance.text, data['text'])
def test_deserialize_field(self):
date = datetime.now()
data = {'date': str(date)}
serializer = DateModelSerializer(data)
object = serializer.save()
self.assertEqual(object.date, date)
def test_deserialize_keyword_field(self):
data = dict(zip(MODEL_KEYWORDS, MODEL_KEYWORDS))
serializer = KeywordModelSerializer(data)
object = serializer.save()
for attr in MODEL_KEYWORDS:
self.assertEqual(getattr(object, attr), attr)
| bsd-3-clause | -6,294,718,001,304,070,000 | 30.7375 | 103 | 0.669949 | false |
highweb-project/highweb-webcl-html5spec | native_client_sdk/src/build_tools/test_projects.py | 37 | 12488 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
import time
import build_projects
import build_version
import buildbot_common
import parse_dsc
from build_paths import OUT_DIR, SRC_DIR, SDK_SRC_DIR, SCRIPT_DIR
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
import getos
platform = getos.GetPlatform()
# TODO(binji): ugly hack -- can I get the browser in a cleaner way?
sys.path.append(os.path.join(SRC_DIR, 'chrome', 'test', 'nacl_test_injection'))
import find_chrome
browser_path = find_chrome.FindChrome(SRC_DIR, ['Debug', 'Release'])
# Fall back to using CHROME_PATH (same as in common.mk)
if not browser_path:
browser_path = os.environ.get('CHROME_PATH')
pepper_ver = str(int(build_version.ChromeMajorVersion()))
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
browser_tester_py = os.path.join(SRC_DIR, 'ppapi', 'native_client', 'tools',
'browser_tester', 'browser_tester.py')
ALL_CONFIGS = ['Debug', 'Release']
ALL_TOOLCHAINS = [
'newlib',
'glibc',
'pnacl',
'win',
'linux',
'mac',
'clang-newlib',
]
# Values you can filter by:
# name: The name of the test. (e.g. "pi_generator")
# config: See ALL_CONFIGS above.
# toolchain: See ALL_TOOLCHAINS above.
# platform: mac/win/linux.
#
# All keys must be matched, but any value that matches in a sequence is
# considered a match for that key. For example:
#
# {'name': ('pi_generator', 'input_event'), 'toolchain': ('newlib', 'pnacl')}
#
# Will match 8 tests:
# pi_generator.newlib_debug_test
# pi_generator.newlib_release_test
# input_event.newlib_debug_test
# input_event.newlib_release_test
# pi_generator.glibc_debug_test
# pi_generator.glibc_release_test
# input_event.glibc_debug_test
# input_event.glibc_release_test
DISABLED_TESTS = [
# TODO(binji): Disable 3D examples on linux/win/mac. See
# http://crbug.com/262379.
{'name': 'graphics_3d', 'platform': ('win', 'linux', 'mac')},
{'name': 'video_decode', 'platform': ('win', 'linux', 'mac')},
{'name': 'video_encode', 'platform': ('win', 'linux', 'mac')},
# TODO(sbc): Disable pi_generator on linux/win/mac. See
# http://crbug.com/475255.
{'name': 'pi_generator', 'platform': ('win', 'linux', 'mac')},
# media_stream_audio uses audio input devices which are not supported.
{'name': 'media_stream_audio', 'platform': ('win', 'linux', 'mac')},
# media_stream_video uses 3D and webcam which are not supported.
{'name': 'media_stream_video', 'platform': ('win', 'linux', 'mac')},
# TODO(binji): These tests timeout on the trybots because the NEXEs take
# more than 40 seconds to load (!). See http://crbug.com/280753
{'name': 'nacl_io_test', 'platform': 'win', 'toolchain': 'glibc'},
# We don't test "getting_started/part1" because it would complicate the
# example.
# TODO(binji): figure out a way to inject the testing code without
# modifying the example; maybe an extension?
{'name': 'part1'},
]
def ValidateToolchains(toolchains):
invalid_toolchains = set(toolchains) - set(ALL_TOOLCHAINS)
if invalid_toolchains:
buildbot_common.ErrorExit('Invalid toolchain(s): %s' % (
', '.join(invalid_toolchains)))
def GetServingDirForProject(desc):
dest = desc['DEST']
path = os.path.join(pepperdir, *dest.split('/'))
return os.path.join(path, desc['NAME'])
def GetRepoServingDirForProject(desc):
# This differs from GetServingDirForProject, because it returns the location
# within the Chrome repository of the project, not the "pepperdir".
return os.path.dirname(desc['FILEPATH'])
def GetExecutableDirForProject(desc, toolchain, config):
return os.path.join(GetServingDirForProject(desc), toolchain, config)
def GetBrowserTesterCommand(desc, toolchain, config):
if browser_path is None:
buildbot_common.ErrorExit('Failed to find chrome browser using FindChrome.')
args = [
sys.executable,
browser_tester_py,
'--browser_path', browser_path,
'--timeout', '30.0', # seconds
# Prevent the infobar that shows up when requesting filesystem quota.
'--browser_flag', '--unlimited-storage',
'--enable_sockets',
# Prevent installing a new copy of PNaCl.
'--browser_flag', '--disable-component-update',
]
args.extend(['--serving_dir', GetServingDirForProject(desc)])
# Fall back on the example directory in the Chromium repo, to find test.js.
args.extend(['--serving_dir', GetRepoServingDirForProject(desc)])
# If it is not found there, fall back on the dummy one (in this directory.)
args.extend(['--serving_dir', SCRIPT_DIR])
if toolchain == platform:
exe_dir = GetExecutableDirForProject(desc, toolchain, config)
ppapi_plugin = os.path.join(exe_dir, desc['NAME'])
if platform == 'win':
ppapi_plugin += '.dll'
else:
ppapi_plugin += '.so'
args.extend(['--ppapi_plugin', ppapi_plugin])
ppapi_plugin_mimetype = 'application/x-ppapi-%s' % config.lower()
args.extend(['--ppapi_plugin_mimetype', ppapi_plugin_mimetype])
if toolchain == 'pnacl':
args.extend(['--browser_flag', '--enable-pnacl'])
url = 'index.html'
url += '?tc=%s&config=%s&test=true' % (toolchain, config)
args.extend(['--url', url])
return args
def GetBrowserTesterEnv():
# browser_tester imports tools/valgrind/memcheck_analyze, which imports
# tools/valgrind/common. Well, it tries to, anyway, but instead imports
# common from PYTHONPATH first (which on the buildbots, is a
# common/__init__.py file...).
#
# Clear the PYTHONPATH so it imports the correct file.
env = dict(os.environ)
env['PYTHONPATH'] = ''
return env
def RunTestOnce(desc, toolchain, config):
args = GetBrowserTesterCommand(desc, toolchain, config)
env = GetBrowserTesterEnv()
start_time = time.time()
try:
subprocess.check_call(args, env=env)
result = True
except subprocess.CalledProcessError:
result = False
elapsed = (time.time() - start_time) * 1000
return result, elapsed
def RunTestNTimes(desc, toolchain, config, times):
total_elapsed = 0
for _ in xrange(times):
result, elapsed = RunTestOnce(desc, toolchain, config)
total_elapsed += elapsed
if result:
# Success, stop retrying.
break
return result, total_elapsed
def RunTestWithGtestOutput(desc, toolchain, config, retry_on_failure_times):
test_name = GetTestName(desc, toolchain, config)
WriteGtestHeader(test_name)
result, elapsed = RunTestNTimes(desc, toolchain, config,
retry_on_failure_times)
WriteGtestFooter(result, test_name, elapsed)
return result
def WriteGtestHeader(test_name):
print '\n[ RUN ] %s' % test_name
sys.stdout.flush()
sys.stderr.flush()
def WriteGtestFooter(success, test_name, elapsed):
sys.stdout.flush()
sys.stderr.flush()
if success:
message = '[ OK ]'
else:
message = '[ FAILED ]'
print '%s %s (%d ms)' % (message, test_name, elapsed)
def GetTestName(desc, toolchain, config):
return '%s.%s_%s_test' % (desc['NAME'], toolchain, config.lower())
def IsTestDisabled(desc, toolchain, config):
def AsList(value):
if type(value) not in (list, tuple):
return [value]
return value
def TestMatchesDisabled(test_values, disabled_test):
for key in test_values:
if key in disabled_test:
if test_values[key] not in AsList(disabled_test[key]):
return False
return True
test_values = {
'name': desc['NAME'],
'toolchain': toolchain,
'config': config,
'platform': platform
}
for disabled_test in DISABLED_TESTS:
if TestMatchesDisabled(test_values, disabled_test):
return True
return False
def WriteHorizontalBar():
print '-' * 80
def WriteBanner(message):
WriteHorizontalBar()
print message
WriteHorizontalBar()
def RunAllTestsInTree(tree, toolchains, configs, retry_on_failure_times):
tests_run = 0
total_tests = 0
failed = []
disabled = []
for _, desc in parse_dsc.GenerateProjects(tree):
desc_configs = desc.get('CONFIGS', ALL_CONFIGS)
valid_toolchains = set(toolchains) & set(desc['TOOLS'])
valid_configs = set(configs) & set(desc_configs)
for toolchain in sorted(valid_toolchains):
for config in sorted(valid_configs):
test_name = GetTestName(desc, toolchain, config)
total_tests += 1
if IsTestDisabled(desc, toolchain, config):
disabled.append(test_name)
continue
tests_run += 1
success = RunTestWithGtestOutput(desc, toolchain, config,
retry_on_failure_times)
if not success:
failed.append(test_name)
if failed:
WriteBanner('FAILED TESTS')
for test in failed:
print ' %s failed.' % test
if disabled:
WriteBanner('DISABLED TESTS')
for test in disabled:
print ' %s disabled.' % test
WriteHorizontalBar()
print 'Tests run: %d/%d (%d disabled).' % (
tests_run, total_tests, len(disabled))
print 'Tests succeeded: %d/%d.' % (tests_run - len(failed), tests_run)
success = len(failed) != 0
return success
def BuildAllTestsInTree(tree, toolchains, configs):
for branch, desc in parse_dsc.GenerateProjects(tree):
desc_configs = desc.get('CONFIGS', ALL_CONFIGS)
valid_toolchains = set(toolchains) & set(desc['TOOLS'])
valid_configs = set(configs) & set(desc_configs)
for toolchain in sorted(valid_toolchains):
for config in sorted(valid_configs):
name = '%s/%s' % (branch, desc['NAME'])
build_projects.BuildProjectsBranch(pepperdir, name, deps=False,
clean=False, config=config,
args=['TOOLCHAIN=%s' % toolchain])
def GetProjectTree(include):
# Everything in src is a library, and cannot be run.
exclude = {'DEST': 'src'}
try:
return parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=include,
exclude=exclude)
except parse_dsc.ValidationError as e:
buildbot_common.ErrorExit(str(e))
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-c', '--config',
help='Choose configuration to run (Debug or Release). Runs both '
'by default', action='append')
parser.add_argument('-x', '--experimental',
help='Run experimental projects', action='store_true')
parser.add_argument('-t', '--toolchain',
help='Run using toolchain. Can be passed more than once.',
action='append', default=[])
parser.add_argument('-d', '--dest',
help='Select which destinations (project types) are valid.',
action='append')
parser.add_argument('-b', '--build',
help='Build each project before testing.', action='store_true')
parser.add_argument('--retry-times',
help='Number of types to retry on failure', type=int, default=1)
parser.add_argument('projects', nargs='*')
options = parser.parse_args(args)
if not options.toolchain:
options.toolchain = ['newlib', 'glibc', 'pnacl', 'host']
if 'host' in options.toolchain:
options.toolchain.remove('host')
options.toolchain.append(platform)
print 'Adding platform: ' + platform
ValidateToolchains(options.toolchain)
include = {}
if options.toolchain:
include['TOOLS'] = options.toolchain
print 'Filter by toolchain: ' + str(options.toolchain)
if not options.experimental:
include['EXPERIMENTAL'] = False
if options.dest:
include['DEST'] = options.dest
print 'Filter by type: ' + str(options.dest)
if options.projects:
include['NAME'] = options.projects
print 'Filter by name: ' + str(options.projects)
if not options.config:
options.config = ALL_CONFIGS
project_tree = GetProjectTree(include)
if options.build:
BuildAllTestsInTree(project_tree, options.toolchain, options.config)
return RunAllTestsInTree(project_tree, options.toolchain, options.config,
options.retry_times)
if __name__ == '__main__':
script_name = os.path.basename(sys.argv[0])
try:
sys.exit(main(sys.argv[1:]))
except parse_dsc.ValidationError as e:
buildbot_common.ErrorExit('%s: %s' % (script_name, e))
except KeyboardInterrupt:
buildbot_common.ErrorExit('%s: interrupted' % script_name)
| bsd-3-clause | 109,101,123,472,828,430 | 31.185567 | 80 | 0.664958 | false |
rleigh-dundee/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/controller/basket.py | 4 | 1856 | #!/usr/bin/env python
#
#
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from django.core.urlresolvers import reverse
from webclient.controller import BaseController
class BaseBasket(BaseController):
imageInBasket = None
imgSize = 0
dsSize = 0
prSize = 0
sizeOfBasket = 0
def __init__(self, conn, **kw):
BaseController.__init__(self, conn)
def load_basket(self, request):
imInBasket = list()
dsInBasket = list()
prInBasket = list()
for imgId in request.session['imageInBasket']:
imInBasket.append(imgId)
#for dsId in request.session['datasetInBasket']:
# dsInBasket.append(dsId)
if len(imInBasket) > 0:
self.imageInBasket = list(self.conn.getObjects("Image", imInBasket))
self.imgSize = len(self.imageInBasket)
#if len(dsInBasket) > 0:
# self.datasetInBasket = list(self.conn.getDatasetsWithImages(dsInBasket))
# self.dsSize = len(self.datasetInBasket)
self.sizeOfBasket = self.imgSize#+self.dsSize
| gpl-2.0 | 1,066,365,171,015,853,400 | 31.017241 | 85 | 0.678879 | false |
bixbydev/Bixby | google/gdata/marketplace/client.py | 48 | 6497 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LicensingClient simplifies Google Apps Marketplace Licensing API calls.
LicensingClient extends gdata.client.GDClient to ease interaction with
the Google Apps Marketplace Licensing API. These interactions include the ability
to retrieve License informations for an application in the Google Apps Marketplace.
"""
__author__ = 'Alexandre Vivien <[email protected]>'
import gdata.marketplace.data
import gdata.client
import urllib
# Feed URI template. This must end with a /
# The strings in this template are eventually replaced with the API version
# and Google Apps domain name, respectively.
LICENSE_ROOT_URL = 'http://feedserver-enterprise.googleusercontent.com'
LICENSE_FEED_TEMPLATE = '%s/license?bq=' % LICENSE_ROOT_URL
LICENSE_NOTIFICATIONS_FEED_TEMPLATE = '%s/licensenotification?bq=' % LICENSE_ROOT_URL
class LicensingClient(gdata.client.GDClient):
"""Client extension for the Google Apps Marketplace Licensing API service.
Attributes:
host: string The hostname for the Google Apps Marketplace Licensing API service.
api_version: string The version of the Google Apps Marketplace Licensing API.
"""
api_version = '1.0'
auth_service = 'apps'
auth_scopes = gdata.gauth.AUTH_SCOPES['apps']
ssl = False
def __init__(self, domain, auth_token=None, **kwargs):
"""Constructs a new client for the Google Apps Marketplace Licensing API.
Args:
domain: string The Google Apps domain with the application installed.
auth_token: (optional) gdata.gauth.OAuthToken which authorizes this client to retrieve the License information.
kwargs: The other parameters to pass to the gdata.client.GDClient constructor.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
self.domain = domain
def make_license_feed_uri(self, app_id=None, params=None):
"""Creates a license feed URI for the Google Apps Marketplace Licensing API.
Using this client's Google Apps domain, create a license feed URI for a particular application
in this domain. If params are provided, append them as GET params.
Args:
app_id: string The ID of the application for which to make a license feed URI.
params: dict (optional) key -> value params to append as GET vars to the
URI. Example: params={'start': 'my-resource-id'}
Returns:
A string giving the URI for the application's license for this client's Google
Apps domain.
"""
parameters = '[appid=%s][domain=%s]' % (app_id, self.domain)
uri = LICENSE_FEED_TEMPLATE + urllib.quote_plus(parameters)
if params:
uri += '&' + urllib.urlencode(params)
return uri
MakeLicenseFeedUri = make_license_feed_uri
def make_license_notifications_feed_uri(self, app_id=None, startdatetime=None, max_results=None, params=None):
"""Creates a license notifications feed URI for the Google Apps Marketplace Licensing API.
Using this client's Google Apps domain, create a license notifications feed URI for a particular application.
If params are provided, append them as GET params.
Args:
app_id: string The ID of the application for which to make a license feed URI.
startdatetime: Start date to retrieve the License notifications.
max_results: Number of results per page. Maximum is 100.
params: dict (optional) key -> value params to append as GET vars to the
URI. Example: params={'start': 'my-resource-id'}
Returns:
A string giving the URI for the application's license notifications for this client's Google
Apps domain.
"""
parameters = '[appid=%s]' % (app_id)
if startdatetime:
parameters += '[startdatetime=%s]' % startdatetime
else:
parameters += '[startdatetime=1970-01-01T00:00:00Z]'
if max_results:
parameters += '[max-results=%s]' % max_results
else:
parameters += '[max-results=100]'
uri = LICENSE_NOTIFICATIONS_FEED_TEMPLATE + urllib.quote_plus(parameters)
if params:
uri += '&' + urllib.urlencode(params)
return uri
MakeLicenseNotificationsFeedUri = make_license_notifications_feed_uri
def get_license(self, uri=None, app_id=None, **kwargs):
"""Fetches the application's license by application ID.
Args:
uri: string The base URI of the feed from which to fetch the license.
app_id: string The string ID of the application for which to fetch the license.
kwargs: The other parameters to pass to gdata.client.GDClient.get_entry().
Returns:
A License feed object representing the license with the given
base URI and application ID.
"""
if uri is None:
uri = self.MakeLicenseFeedUri(app_id)
return self.get_feed(uri,
desired_class=gdata.marketplace.data.LicenseFeed,
**kwargs)
GetLicense = get_license
def get_license_notifications(self, uri=None, app_id=None, startdatetime=None, max_results=None, **kwargs):
"""Fetches the application's license notifications by application ID.
Args:
uri: string The base URI of the feed from which to fetch the license.
app_id: string The string ID of the application for which to fetch the license.
startdatetime: Start date to retrieve the License notifications.
max_results: Number of results per page. Maximum is 100.
kwargs: The other parameters to pass to gdata.client.GDClient.get_entry().
Returns:
A License feed object representing the license notifications with the given
base URI and application ID.
"""
if uri is None:
uri = self.MakeLicenseNotificationsFeedUri(app_id, startdatetime, max_results)
return self.get_feed(uri,
desired_class=gdata.marketplace.data.LicenseFeed,
**kwargs)
GetLicenseNotifications = get_license_notifications
| gpl-3.0 | 581,474,232,804,723,500 | 39.60625 | 117 | 0.709866 | false |
jzbontar/orange-tree | Orange/widgets/visualize/owmosaic.py | 1 | 55141 | import os
import sys
from collections import defaultdict
from functools import reduce
from itertools import product
from math import sqrt
import numpy
from PyQt4.QtCore import QPoint, Qt, QRectF
from PyQt4.QtGui import (QGraphicsRectItem, QGraphicsView, QColor,
QGraphicsScene, QPainter, QIcon, QDialog, QPen,
QVBoxLayout, QListWidget, QSizePolicy, QApplication,
QGraphicsTextItem, QBrush, QGraphicsLineItem,
QGraphicsEllipseItem)
from Orange.widgets.settings import (Setting, DomainContextHandler,
ContextSetting)
from Orange.canvas.utils import environ
from Orange.classification import Learner
from Orange.data import Table, Variable, filter, DiscreteVariable, ContinuousVariable
from Orange.data.discretization import DiscretizeTable
from Orange.data.sql.table import SqlTable, LARGE_TABLE, DEFAULT_SAMPLE_TIME
from Orange.feature.discretization import EqualFreq
from Orange.statistics.distribution import get_distribution
from Orange.widgets import gui
from Orange.widgets.settings import DomainContextHandler
from Orange.widgets.utils import getHtmlCompatibleString
from Orange.widgets.utils.colorpalette import ColorPaletteDlg, DefaultRGBColors
from Orange.widgets.utils.scaling import get_variable_values_sorted
from Orange.widgets.widget import OWWidget, Default
PEARSON = 0
CLASS_DISTRIBUTION = 1
BOTTOM = 0
LEFT = 1
TOP = 2
RIGHT = 3
# using function with same name from owtools.py
# def get_variable_values_sorted(param):
# if hasattr(param, "values"):
# return param.values
# return []
class SelectionRectangle(QGraphicsRectItem):
pass
class MosaicSceneView(QGraphicsView):
def __init__(self, widget, *args):
QGraphicsView.__init__(self, *args)
self.widget = widget
self.bMouseDown = False
self.mouseDownPosition = QPoint(0, 0)
self.tempRect = None
# mouse button was pressed
def mousePressEvent(self, ev):
QGraphicsView.mousePressEvent(self, ev)
self.mouseDownPosition = QPoint(ev.pos().x(), ev.pos().y())
self.bMouseDown = True
self.mouseMoveEvent(ev)
# mouse button was pressed and mouse is moving ######################
def mouseMoveEvent(self, ev):
QGraphicsView.mouseMoveEvent(self, ev)
if ev.button() == Qt.RightButton:
return
if not self.bMouseDown:
if self.tempRect:
self.scene().removeItem(self.tempRect)
self.tempRect = None
else:
if not self.tempRect:
self.tempRect = SelectionRectangle(None, self.scene())
rect = QRectF(min(self.mouseDownPosition.x(), ev.pos().x()),
min(self.mouseDownPosition.y(), ev.pos().y()),
max(abs(self.mouseDownPosition.x() - ev.pos().x()), 1),
max(abs(self.mouseDownPosition.y() - ev.pos().y()), 1))
self.tempRect.setRect(rect)
# mouse button was released #########################################
def mouseReleaseEvent(self, ev):
self.bMouseDown = False
if ev.button() == Qt.RightButton:
self.widget.removeLastSelection()
elif self.tempRect:
self.widget.addSelection(self.tempRect)
self.scene().removeItem(self.tempRect)
self.tempRect = None
class OWMosaicDisplay(OWWidget):
name = "Mosaic Display"
description = "Shows mosaic displays"
icon = "icons/MosaicDisplay.svg"
inputs = [("Data", Table, "setData", Default),
("Data Subset", Table, "setSubsetData")]
outputs = [("Selected Data", Table), ("Learner", Learner)]
settingsHandler = DomainContextHandler()
show_apriori_distribution_lines = Setting(False)
show_apriori_distribution_boxes = Setting(True)
use_boxes = Setting(True)
interior_coloring = Setting(0)
color_settings = Setting(None)
selected_schema_index = Setting(0)
show_subset_data_boxes = Setting(True)
remove_unused_values = Setting(True)
variable1 = ContextSetting("")
variable2 = ContextSetting("")
variable3 = ContextSetting("")
variable4 = ContextSetting("")
interior_coloring_opts = ["Pearson residuals",
"Class distribution"]
subboxesOpts = ["Expected distribution",
"Apriori distribution"]
_apriori_pen_color = QColor(255, 255, 255, 128)
_box_size = 5
_cellspace = 4
def __init__(self, parent=None):
super().__init__(self, parent)
#set default settings
self.data = None
self.unprocessed_subset_data = None
self.subset_data = None
self.names = [] # class values
self.exploreAttrPermutations = 0
self.attributeNameOffset = 30
self.attributeValueOffset = 15
self.residuals = [] # residual values if the residuals are visualized
self.aprioriDistributions = []
self.colorPalette = None
self.permutationDict = {}
self.manualAttributeValuesDict = {}
self.conditionalDict = None
self.conditionalSubsetDict = None
self.activeRule = None
self.selectionRectangle = None
self.selectionConditionsHistorically = []
self.selectionConditions = []
# color paletes for visualizing pearsons residuals
self.blue_colors = [QColor(255, 255, 255), QColor(210, 210, 255),
QColor(110, 110, 255), QColor(0, 0, 255)]
self.red_colors = [QColor(255, 255, 255), QColor(255, 200, 200),
QColor(255, 100, 100), QColor(255, 0, 0)]
self.canvas = QGraphicsScene()
self.canvas_view = MosaicSceneView(self, self.canvas, self.mainArea)
self.mainArea.layout().addWidget(self.canvas_view)
self.canvas_view.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.canvas_view.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.canvas_view.setRenderHint(QPainter.Antialiasing)
#self.canvasView.setAlignment(Qt.AlignLeft | Qt.AlignTop)
#GUI
#add controls to self.controlArea widget
#self.controlArea.setMinimumWidth(235)
box = gui.widgetBox(self.controlArea, "Variables")
for i in range(1, 5):
inbox = gui.widgetBox(box, orientation="horizontal")
combo = gui.comboBox(inbox, self, value="variable{}".format(i),
#label="Variable {}".format(i),
orientation="horizontal",
callback=self.updateGraphAndPermList,
sendSelectedValue=True, valueType=str)
butt = gui.button(inbox, self, "", callback=self.orderAttributeValues,
tooltip="Change the order of attribute values")
butt.setFixedSize(26, 24)
butt.setCheckable(1)
butt.setIcon(QIcon(os.path.join(environ.widget_install_dir,
"icons/Dlg_sort.png")))
setattr(self, "sort{}".format(i), butt)
setattr(self, "attr{}".format(i) + "Combo", combo)
# self.optimizationDlg = OWMosaicOptimization(self, self.signalManager)
# optimizationButtons = gui.widgetBox(self.GeneralTab, "Dialogs", orientation="horizontal")
# gui.button(optimizationButtons, self, "VizRank", callback=self.optimizationDlg.reshow, debuggingEnabled=0,
# tooltip="Find attribute combinations that will separate different classes as clearly as possible.")
# self.collapsableWBox = gui.collapsableWidgetBox(self.GeneralTab, "Explore Attribute Permutations", self,
# "exploreAttrPermutations",
# callback=self.permutationListToggle)
# self.permutationList = gui.listBox(self.collapsableWBox, self, callback=self.setSelectedPermutation)
#self.permutationList.hide()
box5 = gui.widgetBox(self.controlArea, "Visual Settings")
gui.comboBox(box5, self, "interior_coloring",
label="Color", orientation="horizontal",
items=self.interior_coloring_opts,
callback=self.updateGraph)
gui.checkBox(box5, self, "remove_unused_values",
"Remove unused attribute values")
gui.checkBox(box5, self, 'show_apriori_distribution_lines',
'Show apriori distribution with lines',
callback=self.updateGraph)
self.box8 = gui.widgetBox(self.controlArea, "Boxes in Cells")
self.cb_show_subset = gui.checkBox(
self.box8, self, 'show_subset_data_boxes',
'Show subset data distribution', callback=self.updateGraph)
self.cb_show_subset.setDisabled(self.subset_data is None)
cb = gui.checkBox(self.box8, self, 'use_boxes', 'Display sub-box...',
callback=self.updateGraph)
ind_box = gui.indentedBox(self.box8, sep=gui.checkButtonOffsetHint(cb))
gui.comboBox(ind_box, self, 'show_apriori_distribution_boxes',
items=self.subboxesOpts, callback=self.updateGraph)
hbox = gui.widgetBox(self.controlArea, "Colors", addSpace=1)
gui.button(hbox, self, "Set Colors", self.setColors,
tooltip="Set the color palette for class values")
#self.box6.setSizePolicy(QSizePolicy(QSizePolicy.Minimum , QSizePolicy.Fixed ))
self.controlArea.layout().addStretch(1)
# self.graphButton.clicked.connect(saveToFileCanvas)
self.icons = gui.attributeIconDict
self.resize(830, 550)
# self.VizRankLearner = MosaicTreeLearner(self.optimizationDlg)
# self.send("Learner", self.VizRankLearner)
# self.wdChildDialogs = [self.optimizationDlg] # used when running widget debugging
# self.collapsableWBox.updateControls()
dlg = self.createColorDialog()
self.colorPalette = dlg.getDiscretePalette("discPalette")
self.selectionColorPalette = [QColor(*col) for col in DefaultRGBColors]
gui.rubber(self.controlArea)
def permutationListToggle(self):
if self.exploreAttrPermutations:
self.updateGraphAndPermList()
def setSelectedPermutation(self):
newRow = self.permutationList.currentRow()
if self.permutationList.count() > 0 and self.bestPlacements and newRow < len(self.bestPlacements):
self.removeAllSelections()
val, attrList, valueOrder = self.bestPlacements[newRow]
if len(attrList) > 0: self.variable1 = attrList[0]
if len(attrList) > 1: self.variable2 = attrList[1]
if len(attrList) > 2: self.variable3 = attrList[2]
if len(attrList) > 3: self.variable4 = attrList[3]
self.updateGraph(
customValueOrderDict=dict([(attrList[i], tuple(valueOrder[i])) for i in range(len(attrList))]))
def orderAttributeValues(self):
attr = None
if self.sort1.isChecked():
attr = self.variable1
elif self.sort2.isChecked():
attr = self.variable2
elif self.sort3.isChecked():
attr = self.variable3
elif self.sort4.isChecked():
attr = self.variable4
if self.data and attr != "" and attr != "(None)":
dlg = SortAttributeValuesDlg(attr,
self.manualAttributeValuesDict.get(attr, None) or get_variable_values_sorted(
self.data.domain[attr]))
if dlg.exec_() == QDialog.Accepted:
self.manualAttributeValuesDict[attr] = [str(dlg.attributeList.item(i).text()) for i in
range(dlg.attributeList.count())]
for control in [self.sort1, self.sort2, self.sort3, self.sort4]:
control.setChecked(0)
self.updateGraph()
# initialize combo boxes with discrete attributes
def initCombos(self, data):
for combo in [self.attr1Combo, self.attr2Combo, self.attr3Combo, self.attr4Combo]:
combo.clear()
if data == None: return
self.attr2Combo.addItem("(None)")
self.attr3Combo.addItem("(None)")
self.attr4Combo.addItem("(None)")
for attr in data.domain:
if isinstance(attr, DiscreteVariable):
for combo in [self.attr1Combo, self.attr2Combo, self.attr3Combo, self.attr4Combo]:
combo.addItem(self.icons[attr], attr.name)
if self.attr1Combo.count() > 0:
self.variable1 = str(self.attr1Combo.itemText(0))
self.variable2 = str(self.attr2Combo.itemText(0 + 2 * (self.attr2Combo.count() > 2)))
self.variable3 = str(self.attr3Combo.itemText(0))
self.variable4 = str(self.attr4Combo.itemText(0))
# when we resize the widget, we have to redraw the data
def resizeEvent(self, e):
OWWidget.resizeEvent(self, e)
self.updateGraph()
def showEvent(self, ev):
OWWidget.showEvent(self, ev)
self.updateGraph()
def closeEvent(self, ce):
# self.optimizationDlg.hide()
QDialog.closeEvent(self, ce)
# ------------- SIGNALS --------------------------
# # DATA signal - receive new data and update all fields
def setData(self, data):
if type(data) == SqlTable and data.approx_len() > LARGE_TABLE:
data = data.sample_time(DEFAULT_SAMPLE_TIME)
self.closeContext()
self.data = data
self.bestPlacements = None
self.manualAttributeValuesDict = {}
self.attributeValuesDict = {}
self.information([0, 1, 2])
if not self.data:
return
if any(isinstance(attr, ContinuousVariable)
for attr in self.data.domain):
self.information(0, "Data contains continuous variables. " +
"Discretize the data to use them.")
""" TODO: check
if data.has_missing_class():
self.information(1, "Examples with missing classes were removed.")
if self.removeUnusedValues and len(data) != len(self.data):
self.information(2, "Unused attribute values were removed.")
"""
if isinstance(self.data.domain.class_var, DiscreteVariable):
self.interior_coloring = CLASS_DISTRIBUTION
self.colorPalette.set_number_of_colors(
len(self.data.domain.class_var.values))
else:
self.interior_coloring = PEARSON
self.initCombos(self.data)
self.openContext(self.data)
# if we first received subset data
# we now have to call setSubsetData to process it
if self.unprocessed_subset_data:
self.setSubsetData(self.unprocessed_subset_data)
self.unprocessed_subset_data = None
def setSubsetData(self, data):
if not self.data:
self.unprocessed_subset_data = data
self.warning(10)
else:
try:
self.subset_data = data.from_table(self.data.domain, data)
self.warning(10)
except:
self.subset_data = None
self.warning(10, data and "'Data' and 'Data Subset'" +
" do not have compatible domains." or "")
self.cb_show_subset.setDisabled(self.subset_data is None)
# this is called by OWBaseWidget after setData and setSubsetData are called.
# this way the graph is updated only once
def handleNewSignals(self):
self.updateGraphAndPermList()
# ------------------------------------------------
def setShownAttributes(self, attrList, **args):
if not attrList: return
self.variable1 = attrList[0]
if len(attrList) > 1:
self.variable2 = attrList[1]
else:
self.variable2 = "(None)"
if len(attrList) > 2:
self.variable3 = attrList[2]
else:
self.variable3 = "(None)"
if len(attrList) > 3:
self.variable4 = attrList[3]
else:
self.variable4 = "(None)"
self.attributeValuesDict = args.get("customValueOrderDict", None)
self.updateGraphAndPermList()
def getShownAttributeList(self):
attrList = [self.variable1, self.variable2, self.variable3, self.variable4]
while "(None)" in attrList: attrList.remove("(None)")
while "" in attrList: attrList.remove("")
return attrList
def updateGraphAndPermList(self, **args):
self.removeAllSelections()
# self.permutationList.clear()
if self.exploreAttrPermutations:
attrList = self.getShownAttributeList()
if not getattr(self, "bestPlacements", []) or 0 in [attr in self.bestPlacements[0][1] for attr in
attrList]: # we might have bestPlacements for a different set of attributes
self.setStatusBarText(
"Evaluating different attribute permutations. You can stop evaluation by opening VizRank dialog and pressing 'Stop optimization' button.")
self.bestPlacements = self.optimizationDlg.optimizeCurrentAttributeOrder(attrList, updateGraph=0)
self.setStatusBarText("")
if self.bestPlacements:
self.permutationList.addItems(
["%.2f - %s" % (val, attrs) for (val, attrs, order) in self.bestPlacements])
attrList, valueOrder = self.bestPlacements[0][1], self.bestPlacements[0][2]
self.attributeValuesDict = dict([(attrList[i], tuple(valueOrder[i])) for i in range(len(attrList))])
self.updateGraph(**args)
# ############################################################################
# updateGraph - gets called every time the graph has to be updated
def updateGraph(self, data=-1, subsetData=-1, attrList=-1, **args):
# do we want to erase previous diagram?
if args.get("erasePrevious", 1):
for item in list(self.canvas.items()):
if not isinstance(item, SelectionRectangle):
self.canvas.removeItem(item) # remove all canvas items, except SelectionCurves
self.names = []
if data == -1:
data = self.data
if subsetData == -1:
subsetData = self.subset_data
if attrList == -1:
attrList = [self.variable1, self.variable2, self.variable3, self.variable4]
if data == None: return
while "(None)" in attrList: attrList.remove("(None)")
while "" in attrList: attrList.remove("")
if attrList == []:
return
selectList = attrList
if type(data) == SqlTable and data.domain.class_var:
cv = data.domain.class_var # shranim class_var, ker se v naslednji vrstici zbrise (v primeru da je SqlTable)
data = data[:, attrList + [data.domain.class_var]]
data.domain.class_var = cv
elif data.domain.class_var:
cv = data.domain.class_var # shranim class_var, ker se v naslednji vrstici zbrise (v primeru da si izbral atribut, ki je class_var)
name = data.name
data = data[:, attrList + [data.domain.class_var]]
data.domain.class_var = cv
data.name = name
else:
data = data[:, attrList]
# TODO: preveri kaj je stem
# data = Preprocessor_dropMissing(data)
if len(data) == 0:
self.warning(5,
"No data instances with valid values for currently visualized attributes.")
return
else:
self.warning(5)
self.aprioriDistributions = []
if self.interior_coloring == PEARSON:
self.aprioriDistributions = [get_distribution(data, attr) for attr in attrList]
if args.get("positions"):
xOff, yOff, squareSize = args.get("positions")
else:
# get the maximum width of rectangle
xOff = 50
width = 50
if len(attrList) > 1:
text = OWCanvasText(self.canvas, attrList[1], bold=1, show=0)
width = text.boundingRect().height() + 30 + 20
xOff = width
if len(attrList) == 4:
text = OWCanvasText(self.canvas, attrList[3], bold=1, show=0)
width += text.boundingRect().height() + 30 + 20
# get the maximum height of rectangle
height = 90
yOff = 40
squareSize = min(self.canvas_view.width() - width - 20, self.canvas_view.height() - height - 20)
if squareSize < 0: return # canvas is too small to draw rectangles
self.canvas_view.setSceneRect(0, 0, self.canvas_view.width(), self.canvas_view.height())
self.legend = {} # dictionary that tells us, for what attributes did we already show the legend
for attr in attrList:
self.legend[attr] = 0
self.drawnSides = dict([(0, 0), (1, 0), (2, 0), (3, 0)])
self.drawPositions = {}
if not getattr(self, "attributeValuesDict", None):
self.attributeValuesDict = self.manualAttributeValuesDict
# compute distributions
self.conditionalDict = self.getConditionalDistributions(data, attrList)
self.conditionalDict[""] = len(data)
self.conditionalSubsetDict = None
if subsetData:
self.conditionalSubsetDict = self.getConditionalDistributions(subsetData, attrList)
self.conditionalSubsetDict[""] = len(subsetData)
# draw rectangles
self.DrawData(attrList, (xOff, xOff + squareSize), (yOff, yOff + squareSize), 0, "", len(attrList), **args)
if args.get("drawLegend", 1):
self.DrawLegend(data, (xOff, xOff + squareSize), (yOff, yOff + squareSize)) # draw class legend
if args.get("drillUpdateSelection", 1):
# self.optimizationDlg.mtUpdateState()
pass
# self.canvas.update()
# create a dictionary with all possible pairs of "combination-of-attr-values" : count
## TODO: this function is used both in owmosaic and owsieve --> where to put it?
def getConditionalDistributions(self, data, attrs):
cond_dist = defaultdict(int)
all_attrs = [data.domain[a] for a in attrs]
if data.domain.class_var is not None:
all_attrs.append(data.domain.class_var)
for i in range(1, len(all_attrs)+1):
attr = all_attrs[:i]
if type(data) == SqlTable:
# make all possible pairs of attributes + class_var
attr = [a.to_sql() for a in attr]
fields = attr + ["COUNT(*)"]
query = data._sql_query(fields, group_by=attr)
with data._execute_sql_query(query) as cur:
res = cur.fetchall()
for r in res:
str_values =[a.repr_val(a.to_val(x)) for a, x in zip(all_attrs, r[:-1])]
str_values = [x if x != '?' else 'None' for x in str_values]
cond_dist['-'.join(str_values)] = r[-1]
else:
for indices in product(*(range(len(a.values)) for a in attr)):
vals = []
conditions = []
for k, ind in enumerate(indices):
vals.append(attr[k].values[ind])
fd = filter.FilterDiscrete(column=attr[k], values=[attr[k].values[ind]])
conditions.append(fd)
filt = filter.Values(conditions)
filtdata = filt(data)
cond_dist['-'.join(vals)] = len(filtdata)
return cond_dist
# ############################################################################
# ############################################################################
## DRAW DATA - draw rectangles for attributes in attrList inside rect (x0,x1), (y0,y1)
def DrawData(self, attrList, x0_x1, y0_y1, side, condition, totalAttrs, used_attrs=[], used_vals=[],
attrVals="", **args):
x0, x1 = x0_x1
y0, y1 = y0_y1
if self.conditionalDict[attrVals] == 0:
self.addRect(x0, x1, y0, y1, "", used_attrs, used_vals, attrVals=attrVals)
self.DrawText(side, attrList[0], (x0, x1), (y0, y1), totalAttrs, used_attrs, used_vals,
attrVals) # store coordinates for later drawing of labels
return
attr = attrList[0]
edge = len(attrList) * self._cellspace # how much smaller rectangles do we draw
values = self.attributeValuesDict.get(attr, None) or get_variable_values_sorted(self.data.domain[attr])
if side % 2: values = values[::-1] # reverse names if necessary
if side % 2 == 0: # we are drawing on the x axis
whole = max(0, (x1 - x0) - edge * (
len(values) - 1)) # we remove the space needed for separating different attr. values
if whole == 0: edge = (x1 - x0) / float(len(values) - 1)
else: # we are drawing on the y axis
whole = max(0, (y1 - y0) - edge * (len(values) - 1))
if whole == 0: edge = (y1 - y0) / float(len(values) - 1)
if attrVals == "":
counts = [self.conditionalDict[val] for val in values]
else:
counts = [self.conditionalDict[attrVals + "-" + val] for val in values]
total = sum(counts)
# if we are visualizing the third attribute and the first attribute has the last value, we have to reverse the order in which the boxes will be drawn
# otherwise, if the last cell, nearest to the labels of the fourth attribute, is empty, we wouldn't be able to position the labels
valRange = list(range(len(values)))
if len(attrList + used_attrs) == 4 and len(used_attrs) == 2:
attr1Values = self.attributeValuesDict.get(used_attrs[0], None) or get_variable_values_sorted(
self.data.domain[used_attrs[0]])
if used_vals[0] == attr1Values[-1]:
valRange = valRange[::-1]
for i in valRange:
start = i * edge + whole * float(sum(counts[:i]) / float(total))
end = i * edge + whole * float(sum(counts[:i + 1]) / float(total))
val = values[i]
htmlVal = getHtmlCompatibleString(val)
if attrVals != "":
newAttrVals = attrVals + "-" + val
else:
newAttrVals = val
if side % 2 == 0: # if we are moving horizontally
if len(attrList) == 1:
self.addRect(x0 + start, x0 + end, y0, y1,
condition + 4 * " " + attr + ": <b>" + htmlVal + "</b><br>", used_attrs + [attr],
used_vals + [val], newAttrVals, **args)
else:
self.DrawData(attrList[1:], (x0 + start, x0 + end), (y0, y1), side + 1,
condition + 4 * " " + attr + ": <b>" + htmlVal + "</b><br>", totalAttrs,
used_attrs + [attr], used_vals + [val], newAttrVals, **args)
else:
if len(attrList) == 1:
self.addRect(x0, x1, y0 + start, y0 + end,
condition + 4 * " " + attr + ": <b> " + htmlVal + "</b><br>", used_attrs + [attr],
used_vals + [val], newAttrVals, **args)
else:
self.DrawData(attrList[1:], (x0, x1), (y0 + start, y0 + end), side + 1,
condition + 4 * " " + attr + ": <b>" + htmlVal + "</b><br>", totalAttrs,
used_attrs + [attr], used_vals + [val], newAttrVals, **args)
self.DrawText(side, attrList[0], (x0, x1), (y0, y1), totalAttrs, used_attrs, used_vals, attrVals)
######################################################################
## DRAW TEXT - draw legend for all attributes in attrList and their possible values
def DrawText(self, side, attr, x0_x1, y0_y1, totalAttrs, used_attrs, used_vals, attrVals):
x0, x1 = x0_x1
y0, y1 = y0_y1
if self.drawnSides[side]: return
# the text on the right will be drawn when we are processing visualization of the last value of the first attribute
if side == RIGHT:
attr1Values = self.attributeValuesDict.get(used_attrs[0], None) or get_variable_values_sorted(
self.data.domain[used_attrs[0]])
if used_vals[0] != attr1Values[-1]:
return
if not self.conditionalDict[attrVals]:
if side not in self.drawPositions: self.drawPositions[side] = (x0, x1, y0, y1)
return
else:
if side in self.drawPositions: (x0, x1, y0, y1) = self.drawPositions[
side] # restore the positions where we have to draw the attribute values and attribute name
self.drawnSides[side] = 1
values = self.attributeValuesDict.get(attr, None) or get_variable_values_sorted(self.data.domain[attr])
if side % 2: values = values[::-1]
width = x1 - x0 - (side % 2 == 0) * self._cellspace * (totalAttrs - side) * (len(values) - 1)
height = y1 - y0 - (side % 2 == 1) * self._cellspace * (totalAttrs - side) * (len(values) - 1)
#calculate position of first attribute
currPos = 0
if attrVals == "":
counts = [self.conditionalDict.get(val, 1) for val in values]
else:
counts = [self.conditionalDict.get(attrVals + "-" + val, 1) for val in values]
total = sum(counts)
if total == 0:
counts = [1] * len(values)
total = sum(counts)
max_ylabel_w1 = 0
max_ylabel_w2 = 0
for i in range(len(values)):
val = values[i]
perc = counts[i] / float(total)
if side == 0:
OWCanvasText(self.canvas, str(val), x0 + currPos + width * 0.5 * perc, y1 + self.attributeValueOffset,
Qt.AlignCenter, bold=0)
elif side == 1:
t = OWCanvasText(self.canvas, str(val), x0 - self.attributeValueOffset, y0 + currPos + height * 0.5 * perc,
Qt.AlignRight | Qt.AlignVCenter, bold=0)
max_ylabel_w1 = max(int(t.boundingRect().width()), max_ylabel_w1)
elif side == 2:
OWCanvasText(self.canvas, str(val), x0 + currPos + width * perc * 0.5, y0 - self.attributeValueOffset,
Qt.AlignCenter, bold=0)
else:
t = OWCanvasText(self.canvas, str(val), x1 + self.attributeValueOffset, y0 + currPos + height * 0.5 * perc,
Qt.AlignLeft | Qt.AlignVCenter, bold=0)
max_ylabel_w2 = max(int(t.boundingRect().width()), max_ylabel_w2)
if side % 2 == 0:
currPos += perc * width + self._cellspace * (totalAttrs - side)
else:
currPos += perc * height + self._cellspace * (totalAttrs - side)
if side == 0:
OWCanvasText(self.canvas, attr, x0 + (x1 - x0) / 2, y1 + self.attributeNameOffset, Qt.AlignCenter, bold=1)
elif side == 1:
OWCanvasText(self.canvas, attr, max(x0 - max_ylabel_w1 - self.attributeValueOffset, 20), y0 + (y1 - y0) / 2,
Qt.AlignRight | Qt.AlignVCenter, bold=1, vertical=True)
elif side == 2:
OWCanvasText(self.canvas, attr, x0 + (x1 - x0) / 2, y0 - self.attributeNameOffset, Qt.AlignCenter, bold=1)
else:
OWCanvasText(self.canvas, attr, min(x1+50, x1 + max_ylabel_w2 + self.attributeValueOffset), y0 + (y1 - y0) / 2,
Qt.AlignLeft | Qt.AlignVCenter, bold=1, vertical=True)
# draw a rectangle, set it to back and add it to rect list
def addRect(self, x0, x1, y0, y1, condition="", used_attrs=[], used_vals=[], attrVals="", **args):
if x0 == x1:
x1 += 1
if y0 == y1:
y1 += 1
if x1 - x0 + y1 - y0 == 2:
y1 += 1 # if we want to show a rectangle of width and height 1 it doesn't show anything. in such cases we therefore have to increase size of one edge
if ("selectionDict" in args and
tuple(used_vals) in args["selectionDict"]):
d = 2
OWCanvasRectangle(self.canvas, x0 - d, y0 - d, x1 - x0 + 1 + 2 * d, y1 - y0 + 1 + 2 * d,
penColor=args["selectionDict"][tuple(used_vals)], penWidth=2, z=-100)
# if we have selected a rule that contains this combination of attr values then show a kind of selection of this rectangle
if self.activeRule and len(used_attrs) == len(self.activeRule[0]) and sum(
[v in used_attrs for v in self.activeRule[0]]) == len(self.activeRule[0]):
for vals in self.activeRule[1]:
if used_vals == [vals[self.activeRule[0].index(a)] for a in used_attrs]:
values = list(
self.attributeValuesDict.get(self.data.domain.classVar.name, [])) or get_variable_values_sorted(
self.data.domain.class_var)
counts = [self.conditionalDict[attrVals + "-" + val] for val in values]
d = 2
r = OWCanvasRectangle(self.canvas, x0 - d, y0 - d, x1 - x0 + 2 * d + 1, y1 - y0 + 2 * d + 1, z=50)
r.setPen(QPen(self.colorPalette[counts.index(max(counts))], 2, Qt.DashLine))
aprioriDist = ()
pearson = None
expected = None
outerRect = OWCanvasRectangle(self.canvas, x0, y0, x1 - x0, y1 - y0, z=30)
if not self.conditionalDict[attrVals]: return
# we have to remember which conditions were new in this update so that when we right click we can only remove the last added selections
if self.selectionRectangle != None and self.selectionRectangle.collidesWithItem(outerRect) and tuple(
used_vals) not in self.selectionConditions:
self.recentlyAdded = getattr(self, "recentlyAdded", []) + [tuple(used_vals)]
self.selectionConditions = self.selectionConditions + [tuple(used_vals)]
# show rectangle selected or not
if tuple(used_vals) in self.selectionConditions:
outerRect.setPen(QPen(Qt.black, 3, Qt.DotLine))
if self.interior_coloring == CLASS_DISTRIBUTION and (
not self.data.domain.class_var or not isinstance(self.data.domain.class_var, DiscreteVariable)):
return
# draw pearsons residuals
if self.interior_coloring == PEARSON or not self.data.domain.class_var or not isinstance(self.data.domain.class_var, DiscreteVariable):
s = sum(self.aprioriDistributions[0])
expected = s * reduce(lambda x, y: x * y,
[self.aprioriDistributions[i][used_vals[i]] / float(s) for i in range(len(used_vals))])
actual = self.conditionalDict[attrVals]
pearson = float(actual - expected) / sqrt(expected)
if abs(pearson) < 2:
ind = 0
elif abs(pearson) < 4:
ind = 1
elif abs(pearson) < 8:
ind = 2
else:
ind = 3
if pearson > 0:
color = self.blue_colors[ind]
else:
color = self.red_colors[ind]
OWCanvasRectangle(self.canvas, x0, y0, x1 - x0, y1 - y0, color, color, z=-20)
# draw class distribution - actual and apriori
# we do have a discrete class
else:
clsValues = list(
self.attributeValuesDict.get(self.data.domain.class_var.name, [])) or get_variable_values_sorted(
self.data.domain.class_var)
aprioriDist = get_distribution(self.data, self.data.domain.class_var.name)
total = 0
for i in range(len(clsValues)):
val = self.conditionalDict[attrVals + "-" + clsValues[i]]
if val == 0:
continue
if i == len(clsValues) - 1:
v = y1 - y0 - total
else:
v = ((y1 - y0) * val) / self.conditionalDict[attrVals]
OWCanvasRectangle(self.canvas, x0, y0 + total, x1 - x0, v, self.colorPalette[i],
self.colorPalette[i], z=-20)
total += v
# show apriori boxes and lines
if (self.show_apriori_distribution_lines or self.use_boxes) and \
abs(x1 - x0) > self._box_size and \
abs(y1 - y0) > self._box_size:
apriori = [aprioriDist[val] / float(len(self.data))
for val in clsValues]
if self.show_apriori_distribution_boxes or \
self.data.domain.class_var.name in used_attrs:
box_counts = apriori
else:
contingencies = \
self.optimizationDlg.getContingencys(used_attrs)
box_counts = []
for clsVal in clsValues:
# compute: P(c_i) * prod (P(c_i|attr_k) / P(c_i))
# for each class value
pci = aprioriDist[clsVal] / float(sum(aprioriDist.values()))
tempVal = pci
if pci > 0:
#tempVal = 1.0 / Pci
for ua, uv in zip(used_attrs, used_vals):
tempVal *= contingencies[ua][uv] / pci
box_counts.append(tempVal)
#boxCounts.append(aprioriDist[val]/float(sum(aprioriDist.values())) * reduce(operator.mul, [contingencies[used_attrs[i]][used_vals[i]][clsVal]/float(sum(contingencies[used_attrs[i]][used_vals[i]].values())) for i in range(len(used_attrs))]))
total1 = 0
total2 = 0
if self.use_boxes:
OWCanvasLine(self.canvas, x0 + self._box_size, y0, x0 + self._box_size, y1, z=30)
for i in range(len(clsValues)):
val1 = apriori[i]
if self.show_apriori_distribution_boxes:
val2 = apriori[i]
else:
val2 = box_counts[i] / float(sum(box_counts))
if i == len(clsValues) - 1:
v1 = y1 - y0 - total1
v2 = y1 - y0 - total2
else:
v1 = (y1 - y0) * val1
v2 = (y1 - y0) * val2
x, y, w, h, xL1, yL1, xL2, yL2 = x0, y0 + total2, self._box_size, v2, x0, y0 + total1 + v1, x1, y0 + total1 + v1
if self.use_boxes:
OWCanvasRectangle(self.canvas, x, y, w, h, self.colorPalette[i], self.colorPalette[i], z=20)
if i < len(clsValues) - 1 and self.show_apriori_distribution_lines:
OWCanvasLine(self.canvas, xL1, yL1, xL2, yL2, z=10, penColor=self._apriori_pen_color)
total1 += v1
total2 += v2
# show subset distribution
if self.conditionalSubsetDict:
# show a rect around the box if subset examples belong to this box
if self.conditionalSubsetDict[attrVals]:
#counts = [self.conditionalSubsetDict[attrVals + "-" + val] for val in clsValues]
#if sum(counts) == 1: color = self.colorPalette[counts.index(1)]
#else: color = Qt.black
#OWCanvasRectangle(self.canvas, x0-2, y0-2, x1-x0+5, y1-y0+5, color, QColor(Qt.white), penWidth = 2, z=-50, penStyle = Qt.DashLine)
counts = [self.conditionalSubsetDict[attrVals + "-" + val] for val in clsValues]
if sum(counts) == 1:
OWCanvasRectangle(self.canvas, x0 - 2, y0 - 2, x1 - x0 + 5, y1 - y0 + 5,
self.colorPalette[counts.index(1)], QColor(Qt.white), penWidth=2, z=-50,
penStyle=Qt.DashLine)
if self.show_subset_data_boxes: # do we want to show exact distribution in the right edge of each cell
OWCanvasLine(self.canvas, x1 - self._box_size, y0, x1 - self._box_size, y1, z=30)
total = 0
for i in range(len(aprioriDist)):
val = self.conditionalSubsetDict[attrVals + "-" + clsValues[i]]
if not self.conditionalSubsetDict[attrVals] or val == 0: continue
if i == len(aprioriDist) - 1:
v = y1 - y0 - total
else:
v = ((y1 - y0) * val) / float(self.conditionalSubsetDict[attrVals])
OWCanvasRectangle(self.canvas, x1 - self._box_size, y0 + total, self._box_size, v,
self.colorPalette[i], self.colorPalette[i], z=15)
total += v
tooltipText = "Examples in this area have:<br>" + condition
if any(aprioriDist):
clsValues = list(
self.attributeValuesDict.get(self.data.domain.class_var.name, [])) or get_variable_values_sorted(
self.data.domain.class_var)
actual = [self.conditionalDict[attrVals + "-" + clsValues[i]] for i in range(len(aprioriDist))]
if sum(actual) > 0:
apriori = [aprioriDist[key] for key in clsValues]
aprioriText = ""
actualText = ""
text = ""
for i in range(len(clsValues)):
text += 4 * " " + "<b>%s</b>: %d / %.1f%% (Expected %.1f / %.1f%%)<br>" % (
clsValues[i], actual[i], 100.0 * actual[i] / float(sum(actual)),
(apriori[i] * sum(actual)) / float(sum(apriori)), 100.0 * apriori[i] / float(sum(apriori)))
tooltipText += "Number of examples: " + str(int(sum(actual))) + "<br> Class distribution:<br>" + text[
:-4]
elif pearson and expected:
tooltipText += "<hr>Expected number of examples: %.1f<br>Actual number of examples: %d<br>Standardized (Pearson) residual: %.1f" % (
expected, self.conditionalDict[attrVals], pearson)
outerRect.setToolTip(tooltipText)
# draw the class legend below the square
def DrawLegend(self, data, x0_x1, y0_y1):
x0, x1 = x0_x1
y0, y1 = y0_y1
if self.interior_coloring == CLASS_DISTRIBUTION and (
not data.domain.class_var or isinstance(data.domain.class_var, ContinuousVariable)):
return
if self.interior_coloring == PEARSON:
names = ["<-8", "-8:-4", "-4:-2", "-2:2", "2:4", "4:8", ">8", "Residuals:"]
colors = self.red_colors[::-1] + self.blue_colors[1:]
else:
names = (list(self.attributeValuesDict.get(data.domain.class_var.name, [])) or get_variable_values_sorted(
data.domain.class_var)) + [data.domain.class_var.name + ":"]
colors = [self.colorPalette[i] for i in range(len(data.domain.class_var.values))]
self.names = [OWCanvasText(self.canvas, name, alignment=Qt.AlignVCenter) for name in names]
totalWidth = sum([text.boundingRect().width() for text in self.names])
# compute the x position of the center of the legend
y = y1 + self.attributeNameOffset + 20
distance = 30
startX = (x0 + x1) / 2 - (totalWidth + (len(names)) * distance) / 2
self.names[-1].setPos(startX + 15, y)
self.names[-1].show()
xOffset = self.names[-1].boundingRect().width() + distance
size = 8 # 8 + 8*(self.interiorColoring == PEARSON)
for i in range(len(names) - 1):
if self.interior_coloring == PEARSON:
edgeColor = Qt.black
else:
edgeColor = colors[i]
OWCanvasRectangle(self.canvas, startX + xOffset, y - size / 2, size, size, edgeColor, colors[i])
self.names[i].setPos(startX + xOffset + 10, y)
xOffset += distance + self.names[i].boundingRect().width()
# def saveToFileCanvas(self):
# sizeDlg = OWDlgs.OWChooseImageSizeDlg(self.canvas, parent=self)
# sizeDlg.exec_()
def setColors(self):
dlg = self.createColorDialog()
if dlg.exec_():
self.color_settings = dlg.getColorSchemas()
self.selected_schema_index = dlg.selectedSchemaIndex
self.colorPalette = dlg.getDiscretePalette("discPalette")
if self.data and self.data.domain.class_var and isinstance(self.data.domain.class_var, DiscreteVariable):
self.colorPalette.set_number_of_colors(len(self.data.domain.class_var.values))
self.updateGraph()
def createColorDialog(self):
c = ColorPaletteDlg(self, "Color Palette")
c.createDiscretePalette("discPalette", "Discrete Palette",
DefaultRGBColors) #defaultColorBrewerPalette)
c.setColorSchemas(self.color_settings, self.selected_schema_index)
return c
# ########################################
# cell/example selection
def sendSelectedData(self):
# send the selected examples
self.send("Selected Data", self.getSelectedExamples())
# add a new rectangle. update the graph and see which mosaics does it intersect. add this mosaics to the recentlyAdded list
def addSelection(self, rect):
self.selectionRectangle = rect
self.updateGraph(drillUpdateSelection=0)
self.sendSelectedData()
if getattr(self, "recentlyAdded", []):
self.selectionConditionsHistorically = self.selectionConditionsHistorically + [self.recentlyAdded]
self.recentlyAdded = []
# self.optimizationDlg.mtUpdateState() # we have already called this in self.updateGraph() call
self.selectionRectangle = None
# remove the mosaics that were added with the last selection rectangle
def removeLastSelection(self):
if self.selectionConditionsHistorically:
vals = self.selectionConditionsHistorically.pop()
for val in vals:
if tuple(val) in self.selectionConditions:
self.selectionConditions.remove(tuple(val))
self.updateGraph()
## self.optimizationDlg.mtUpdateState() # we have already called this in self.updateGraph() call
self.sendSelectedData()
def removeAllSelections(self):
self.selectionConditions = []
self.selectionConditionsHistorically = []
## self.optimizationDlg.mtUpdateState() # removeAllSelections is always called before updateGraph() - where mtUpdateState is called
self.sendSelectedData()
# return examples in currently selected boxes as example table or array of 0/1 values
def getSelectedExamples(self, asExampleTable=1, negate=0, selectionConditions=None, data=None, attrs=None):
if attrs == None: attrs = self.getShownAttributeList()
if data == None: data = self.data
if selectionConditions == None: selectionConditions = self.selectionConditions
if attrs == [] or not data:
return None
# TODO: poglej kaj je s tem
# pp = orange.Preprocessor_take()
sumIndices = numpy.zeros(len(data))
# for val in selectionConditions:
# for i, attr in enumerate(attrs):
# pp.values[data.domain[attr]] = val[i]
# indices = numpy.array(pp.selectionVector(data))
# sumIndices += indices
selectedIndices = list(numpy.where(sumIndices > 0, 1 - negate, 0 + negate))
# if asExampleTable:
# return data.selectref(selectedIndices)
# else:
# return selectedIndices
def saveSettings(self):
OWWidget.saveSettings(self)
# self.optimizationDlg.saveSettings()
class SortAttributeValuesDlg(OWWidget):
def __init__(self, attr="", valueList=[]):
super().__init__(self)
self.setLayout(QVBoxLayout())
#self.space = QWidget(self)
#self.layout = QVBoxLayout(self, 4)
#self.layout.addWidget(self.space)
box1 = gui.widgetBox(self, "Select Value Order for Attribute \"" + attr + '"', orientation="horizontal")
self.attributeList = gui.listBox(box1, self, selectionMode=QListWidget.ExtendedSelection, enableDragDrop=1)
self.attributeList.addItems(valueList)
vbox = gui.widgetBox(box1, "", orientation="vertical")
self.buttonUPAttr = gui.button(vbox, self, "", callback=self.moveAttrUP,
tooltip="Move selected attribute values up")
self.buttonDOWNAttr = gui.button(vbox, self, "", callback=self.moveAttrDOWN,
tooltip="Move selected attribute values down")
self.buttonUPAttr.setIcon(QIcon(os.path.join(environ.widget_install_dir, "icons/Dlg_up3.png")))
self.buttonUPAttr.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding))
self.buttonUPAttr.setFixedWidth(40)
self.buttonDOWNAttr.setIcon(QIcon(os.path.join(environ.widget_install_dir, "icons/Dlg_down3.png")))
self.buttonDOWNAttr.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding))
self.buttonDOWNAttr.setFixedWidth(40)
box2 = gui.widgetBox(self, 1, orientation="horizontal")
self.okButton = gui.button(box2, self, "OK", callback=self.accept)
self.cancelButton = gui.button(box2, self, "Cancel", callback=self.reject)
self.resize(300, 300)
# move selected attribute values
def moveAttrUP(self):
for i in range(1, self.attributeList.count()):
if self.attributeList.item(i).isSelected():
self.attributeList.insertItem(i - 1, self.attributeList.item(i).text())
self.attributeList.takeItem(i + 1)
self.attributeList.item(i - 1).setSelected(True)
def moveAttrDOWN(self):
for i in range(self.attributeList.count() - 2, -1, -1):
if self.attributeList.item(i).isSelected():
self.attributeList.insertItem(i + 2, self.attributeList.item(i).text())
self.attributeList.item(i + 2).setSelected(True)
self.attributeList.takeItem(i)
class OWCanvasText(QGraphicsTextItem):
def __init__(self, canvas, text="", x=0, y=0, alignment=Qt.AlignLeft | Qt.AlignTop, bold=0, font=None, z=0,
htmlText=None, tooltip=None, show=1, vertical=False):
QGraphicsTextItem.__init__(self, text, None, canvas)
if font:
self.setFont(font)
if bold:
font = self.font()
font.setBold(bold)
self.setFont(font)
if htmlText:
self.setHtml(htmlText)
self.alignment = alignment
self.vertical = vertical
if vertical:
self.setRotation(-90)
self.setPos(x, y)
self.x, self.y = x, y
self.setZValue(z)
if tooltip: self.setToolTip(tooltip)
if show:
self.show()
else:
self.hide()
def setPos(self, x, y):
self.x, self.y = x, y
rect = QGraphicsTextItem.boundingRect(self)
if self.vertical:
h, w = rect.height(), rect.width()
rect.setWidth(h)
rect.setHeight(-w)
if int(self.alignment & Qt.AlignRight):
x -= rect.width()
elif int(self.alignment & Qt.AlignHCenter):
x -= rect.width() / 2.
if int(self.alignment & Qt.AlignBottom):
y -= rect.height()
elif int(self.alignment & Qt.AlignVCenter):
y -= rect.height() / 2.
QGraphicsTextItem.setPos(self, x, y)
def OWCanvasRectangle(canvas, x=0, y=0, width=0, height=0, penColor=QColor(128, 128, 128), brushColor=None, penWidth=1, z=0,
penStyle=Qt.SolidLine, pen=None, tooltip=None, show=1):
rect = QGraphicsRectItem(x, y, width, height, None, canvas)
if brushColor: rect.setBrush(QBrush(brushColor))
if pen:
rect.setPen(pen)
else:
rect.setPen(QPen(penColor, penWidth, penStyle))
rect.setZValue(z)
if tooltip: rect.setToolTip(tooltip)
if show:
rect.show()
else:
rect.hide()
return rect
def OWCanvasLine(canvas, x1=0, y1=0, x2=0, y2=0, penWidth=2, penColor=QColor(255, 255, 255, 128), pen=None, z=0, tooltip=None, show=1):
r = QGraphicsLineItem(x1, y1, x2, y2, None, canvas)
if pen != None:
r.setPen(pen)
else:
r.setPen(QPen(penColor, penWidth))
r.setZValue(z)
if tooltip: r.setToolTip(tooltip)
if show:
r.show()
else:
r.hide()
return r
def OWCanvasEllipse(canvas, x=0, y=0, width=0, height=0, penWidth=1, startAngle=0, angles=360, penColor=Qt.black,
brushColor=None, z=0, penStyle=Qt.SolidLine, pen=None, tooltip=None, show=1):
e = QGraphicsEllipseItem(x, y, width, height, None, canvas)
e.setZValue(z)
if brushColor != None:
e.setBrush(QBrush(brushColor))
if pen != None:
e.setPen(pen)
else:
e.setPen(QPen(penColor, penWidth))
e.setStartAngle(startAngle)
e.setSpanAngle(angles * 16)
if tooltip: e.setToolTip(tooltip)
if show:
e.show()
else:
e.hide()
return e
#test widget appearance
if __name__ == "__main__":
a = QApplication(sys.argv)
ow = OWMosaicDisplay()
ow.show()
# data = orange.ExampleTable(r"e:\Development\Orange Datasets\UCI\zoo.tab")
data = Table("zoo.tab")
ow.setData(data)
ow.handleNewSignals()
# for d in ["zoo.tab", "iris.tab", "zoo.tab"]:
# data = orange.ExampleTable(r"e:\Development\Orange Datasets\UCI\\" + d)
# ow.setData(data)
# ow.handleNewSignals()
a.exec_()
| gpl-3.0 | 4,299,228,967,826,711,600 | 44.049837 | 265 | 0.568615 | false |
nitzmahone/ansible | lib/ansible/modules/cloud/docker/docker_swarm.py | 5 | 18524 | #!/usr/bin/python
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_swarm
short_description: Manage Swarm cluster
version_added: "2.7"
description:
- Create a new Swarm cluster.
- Add/Remove nodes or managers to an existing cluster.
options:
advertise_addr:
description:
- Externally reachable address advertised to other nodes.
- This can either be an address/port combination
in the form C(192.168.1.1:4567), or an interface followed by a
port number, like C(eth0:4567).
- If the port number is omitted,
the port number from the listen address is used.
- If C(advertise_addr) is not specified, it will be automatically
detected when possible.
listen_addr:
description:
- Listen address used for inter-manager communication.
- This can either be an address/port combination in the form
C(192.168.1.1:4567), or an interface followed by a port number,
like C(eth0:4567).
- If the port number is omitted, the default swarm listening port
is used.
default: 0.0.0.0:2377
force:
description:
- Use with state C(present) to force creating a new Swarm, even if already part of one.
- Use with state C(absent) to Leave the swarm even if this node is a manager.
type: bool
default: 'no'
state:
description:
- Set to C(present), to create/update a new cluster.
- Set to C(join), to join an existing cluster.
- Set to C(absent), to leave an existing cluster.
- Set to C(remove), to remove an absent node from the cluster.
- Set to C(inspect) to display swarm informations.
required: true
default: present
choices:
- present
- join
- absent
- remove
- inspect
node_id:
description:
- Swarm id of the node to remove.
- Used with I(state=remove).
join_token:
description:
- Swarm token used to join a swarm cluster.
- Used with I(state=join).
remote_addrs:
description:
- Remote address of a manager to connect to.
- Used with I(state=join).
task_history_retention_limit:
description:
- Maximum number of tasks history stored.
- Docker default value is C(5).
snapshot_interval:
description:
- Number of logs entries between snapshot.
- Docker default value is C(10000).
keep_old_snapshots:
description:
- Number of snapshots to keep beyond the current snapshot.
- Docker default value is C(0).
log_entries_for_slow_followers:
description:
- Number of log entries to keep around to sync up slow followers after a snapshot is created.
heartbeat_tick:
description:
- Amount of ticks (in seconds) between each heartbeat.
- Docker default value is C(1s).
election_tick:
description:
- Amount of ticks (in seconds) needed without a leader to trigger a new election.
- Docker default value is C(10s).
dispatcher_heartbeat_period:
description:
- The delay for an agent to send a heartbeat to the dispatcher.
- Docker default value is C(5s).
node_cert_expiry:
description:
- Automatic expiry for nodes certificates.
- Docker default value is C(3months).
name:
description:
- The name of the swarm.
labels:
description:
- User-defined key/value metadata.
signing_ca_cert:
description:
- The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
signing_ca_key:
description:
- The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
ca_force_rotate:
description:
- An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
if none have been specified.
- Docker default value is C(0).
autolock_managers:
description:
- If set, generate a key and use it to lock data stored on the managers.
- Docker default value is C(no).
type: bool
rotate_worker_token:
description: Rotate the worker join token.
type: bool
default: 'no'
rotate_manager_token:
description: Rotate the manager join token.
type: bool
default: 'no'
extends_documentation_fragment:
- docker
requirements:
- python >= 2.7
- "docker >= 2.6.0"
- "Please note that the L(docker-py,https://pypi.org/project/docker-py/) Python
module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
Version 2.1.0 or newer is only available with the C(docker) module."
- Docker API >= 1.35
author:
- Thierry Bouvet (@tbouvet)
'''
EXAMPLES = '''
- name: Init a new swarm with default parameters
docker_swarm:
state: present
- name: Update swarm configuration
docker_swarm:
state: present
election_tick: 5
- name: Add nodes
docker_swarm:
state: join
advertise_addr: 192.168.1.2
join_token: SWMTKN-1--xxxxx
remote_addrs: [ '192.168.1.1:2377' ]
- name: Leave swarm for a node
docker_swarm:
state: absent
- name: Remove a swarm manager
docker_swarm:
state: absent
force: true
- name: Remove node from swarm
docker_swarm:
state: remove
node_id: mynode
- name: Inspect swarm
docker_swarm:
state: inspect
register: swarm_info
'''
RETURN = '''
swarm_facts:
description: Informations about swarm.
returned: success
type: complex
contains:
JoinTokens:
description: Tokens to connect to the Swarm.
returned: success
type: complex
contains:
Worker:
description: Token to create a new I(worker) node
returned: success
type: str
example: SWMTKN-1--xxxxx
Manager:
description: Token to create a new I(manager) node
returned: success
type: str
example: SWMTKN-1--xxxxx
actions:
description: Provides the actions done on the swarm.
returned: when action failed.
type: list
example: "['This cluster is already a swarm cluster']"
'''
import json
from time import sleep
try:
from docker.errors import APIError
except ImportError:
# missing docker-py handled in ansible.module_utils.docker_common
pass
from ansible.module_utils.docker_common import AnsibleDockerClient, DockerBaseClass
from ansible.module_utils._text import to_native
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.state = None
self.advertise_addr = None
self.listen_addr = None
self.force_new_cluster = None
self.remote_addrs = None
self.join_token = None
# Spec
self.snapshot_interval = None
self.task_history_retention_limit = None
self.keep_old_snapshots = None
self.log_entries_for_slow_followers = None
self.heartbeat_tick = None
self.election_tick = None
self.dispatcher_heartbeat_period = None
self.node_cert_expiry = None
self.external_cas = None
self.name = None
self.labels = None
self.log_driver = None
self.signing_ca_cert = None
self.signing_ca_key = None
self.ca_force_rotate = None
self.autolock_managers = None
self.rotate_worker_token = None
self.rotate_manager_token = None
for key, value in client.module.params.items():
setattr(self, key, value)
self.update_parameters(client)
def update_parameters(self, client):
self.spec = client.create_swarm_spec(
snapshot_interval=self.snapshot_interval,
task_history_retention_limit=self.task_history_retention_limit,
keep_old_snapshots=self.keep_old_snapshots,
log_entries_for_slow_followers=self.log_entries_for_slow_followers,
heartbeat_tick=self.heartbeat_tick,
election_tick=self.election_tick,
dispatcher_heartbeat_period=self.dispatcher_heartbeat_period,
node_cert_expiry=self.node_cert_expiry,
name=self.name,
labels=self.labels,
signing_ca_cert=self.signing_ca_cert,
signing_ca_key=self.signing_ca_key,
ca_force_rotate=self.ca_force_rotate,
autolock_managers=self.autolock_managers,
log_driver=self.log_driver
)
class SwarmManager(DockerBaseClass):
def __init__(self, client, results):
super(SwarmManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
self.parameters = TaskParameters(client)
def __call__(self):
choice_map = {
"present": self.init_swarm,
"join": self.join,
"absent": self.leave,
"remove": self.remove,
"inspect": self.inspect_swarm
}
choice_map.get(self.parameters.state)()
def __isSwarmManager(self):
try:
data = self.client.inspect_swarm()
json_str = json.dumps(data, ensure_ascii=False)
self.swarm_info = json.loads(json_str)
return True
except APIError:
return False
def inspect_swarm(self):
try:
data = self.client.inspect_swarm()
json_str = json.dumps(data, ensure_ascii=False)
self.swarm_info = json.loads(json_str)
self.results['changed'] = False
self.results['swarm_facts'] = self.swarm_info
except APIError:
return
def init_swarm(self):
if self.__isSwarmManager():
self.__update_swarm()
return
try:
self.client.init_swarm(
advertise_addr=self.parameters.advertise_addr, listen_addr=self.parameters.listen_addr,
force_new_cluster=self.parameters.force_new_cluster, swarm_spec=self.parameters.spec)
except APIError as exc:
self.client.fail(msg="Can not create a new Swarm Cluster: %s" % to_native(exc))
self.__isSwarmManager()
self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info['ID']))
self.results['changed'] = True
self.results['swarm_facts'] = {u'JoinTokens': self.swarm_info['JoinTokens']}
def __update_spec(self, spec):
if (self.parameters.node_cert_expiry is None):
self.parameters.node_cert_expiry = spec['CAConfig']['NodeCertExpiry']
if (self.parameters.dispatcher_heartbeat_period is None):
self.parameters.dispatcher_heartbeat_period = spec['Dispatcher']['HeartbeatPeriod']
if (self.parameters.snapshot_interval is None):
self.parameters.snapshot_interval = spec['Raft']['SnapshotInterval']
if (self.parameters.keep_old_snapshots is None):
self.parameters.keep_old_snapshots = spec['Raft']['KeepOldSnapshots']
if (self.parameters.heartbeat_tick is None):
self.parameters.heartbeat_tick = spec['Raft']['HeartbeatTick']
if (self.parameters.log_entries_for_slow_followers is None):
self.parameters.log_entries_for_slow_followers = spec['Raft']['LogEntriesForSlowFollowers']
if (self.parameters.election_tick is None):
self.parameters.election_tick = spec['Raft']['ElectionTick']
if (self.parameters.task_history_retention_limit is None):
self.parameters.task_history_retention_limit = spec['Orchestration']['TaskHistoryRetentionLimit']
if (self.parameters.autolock_managers is None):
self.parameters.autolock_managers = spec['EncryptionConfig']['AutoLockManagers']
if (self.parameters.name is None):
self.parameters.name = spec['Name']
if (self.parameters.labels is None):
self.parameters.labels = spec['Labels']
if 'LogDriver' in spec['TaskDefaults']:
self.parameters.log_driver = spec['TaskDefaults']['LogDriver']
self.parameters.update_parameters(self.client)
return self.parameters.spec
def __update_swarm(self):
try:
self.inspect_swarm()
version = self.swarm_info['Version']['Index']
spec = self.swarm_info['Spec']
new_spec = self.__update_spec(spec)
del spec['TaskDefaults']
if spec == new_spec:
self.results['actions'].append("No modification")
self.results['changed'] = False
return
self.client.update_swarm(
version=version, swarm_spec=new_spec, rotate_worker_token=self.parameters.rotate_worker_token,
rotate_manager_token=self.parameters.rotate_manager_token)
except APIError as exc:
self.client.fail(msg="Can not update a Swarm Cluster: %s" % to_native(exc))
return
self.inspect_swarm()
self.results['actions'].append("Swarm cluster updated")
self.results['changed'] = True
def __isSwarmNode(self):
info = self.client.info()
if info:
json_str = json.dumps(info, ensure_ascii=False)
self.swarm_info = json.loads(json_str)
if self.swarm_info['Swarm']['NodeID']:
return True
return False
def join(self):
if self.__isSwarmNode():
self.results['actions'].append("This node is already part of a swarm.")
return
try:
self.client.join_swarm(
remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token, listen_addr=self.parameters.listen_addr,
advertise_addr=self.parameters.advertise_addr)
except APIError as exc:
self.client.fail(msg="Can not join the Swarm Cluster: %s" % to_native(exc))
self.results['actions'].append("New node is added to swarm cluster")
self.results['changed'] = True
def leave(self):
if not(self.__isSwarmNode()):
self.results['actions'].append("This node is not part of a swarm.")
return
try:
self.client.leave_swarm(force=self.parameters.force)
except APIError as exc:
self.client.fail(msg="This node can not leave the Swarm Cluster: %s" % to_native(exc))
self.results['actions'].append("Node has leaved the swarm cluster")
self.results['changed'] = True
def __get_node_info(self):
try:
node_info = self.client.inspect_node(node_id=self.parameters.node_id)
except APIError as exc:
raise exc
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
return node_info
def __check_node_is_down(self):
for _x in range(0, 5):
node_info = self.__get_node_info()
if node_info['Status']['State'] == 'down':
return True
sleep(5)
return False
def remove(self):
if not(self.__isSwarmManager()):
self.client.fail(msg="This node is not a manager.")
try:
status_down = self.__check_node_is_down()
except APIError:
return
if not(status_down):
self.client.fail(msg="Can not remove the node. The status node is ready and not down.")
try:
self.client.remove_node(node_id=self.parameters.node_id, force=self.parameters.force)
except APIError as exc:
self.client.fail(msg="Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
self.results['actions'].append("Node is removed from swarm cluster.")
self.results['changed'] = True
def main():
argument_spec = dict(
advertise_addr=dict(type='str'),
state=dict(type='str', choices=['present', 'join', 'absent', 'remove', 'inspect'], default='present'),
force=dict(type='bool', default=False),
listen_addr=dict(type='str', default='0.0.0.0:2377'),
remote_addrs=dict(type='list', elements='str'),
join_token=dict(type='str'),
snapshot_interval=dict(type='int'),
task_history_retention_limit=dict(type='int'),
keep_old_snapshots=dict(type='int'),
log_entries_for_slow_followers=dict(type='int'),
heartbeat_tick=dict(type='int'),
election_tick=dict(type='int'),
dispatcher_heartbeat_period=dict(type='int'),
node_cert_expiry=dict(type='int'),
name=dict(type='str'),
labels=dict(type='dict'),
signing_ca_cert=dict(type='str'),
signing_ca_key=dict(type='str'),
ca_force_rotate=dict(type='int'),
autolock_managers=dict(type='bool'),
node_id=dict(type='str'),
rotate_worker_token=dict(type='bool', default=False),
rotate_manager_token=dict(type='bool', default=False)
)
required_if = [
('state', 'join', ['advertise_addr', 'remote_addrs', 'join_token']),
('state', 'remove', ['node_id'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
min_docker_version='2.6.0',
min_docker_api_version='1.35',
)
results = dict(
changed=False,
result='',
actions=[]
)
SwarmManager(client, results)()
client.module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,565,024,230,800,380,400 | 34.418738 | 138 | 0.60149 | false |
AlexMooney/pairsTournament | strategies/brianStrategies.py | 1 | 4218 | from __future__ import division
from math import ceil
from copy import copy
import numpy
class FoldLowWithHigh:
def __init__(self, fold, hand):
''' Strategy that folds when 'fold' is available if 'hand' or higher is in hand'''
self.fold = fold
self.hand = hand
def play(self, info):
# get best fold as tuple (playerIndex, card)
best = info.bestFold(self.player)
self.discards = info.discards
deck = info.deck
hand = tuple(self.player.stack)
# get current hand
stack = self.player.stack
if best[1] <= self.fold and max(stack) >= self.hand:
return best
return "Hit"
class pickoff:
'''
takes low cards.
'''
def __init__(self, high = 10, burn = 5):
self.cards = tuple(range(1, high + 1))
self.burn = burn
def play(self, info):
best = info.bestFold(self.player)
if best[1] <= 2:
return best
elif best[1] <= 4 and (max(self.player.stack) == 10 or (max(self.player.stack) >= 8 and len(self.player.stack) >= 3)):
return best
elif ((10-max(info.inStacks())) > best[1] and (info.inPoints()))/info.noPlayers > 6:
return best
else:
return "booger"
class simpleExp:
'''
takes low cards.
'''
def __init__(self, high = 10, burn = 5):
self.cards = tuple(range(1, high + 1))
self.burn = burn
def play(self, info):
self.discards = info.discards
deck = info.deck
hand = tuple(self.player.stack)
best = info.bestFold(self.player)
if best[1] <= 2:
return best
elif best[1] <= sum([self._p_deal(c, deck) * c for c in hand]) and (11-sum(info.inPoints()))/info.noPlayers > best[1] :
return best
else:
return "booger"
def _p_deal(self, c, deck):
return deck.count(c) / len(deck)
class simpleExp2:
'''
takes low cards.
'''
def __init__(self, high = 10, burn = 5):
self.cards = tuple(range(1, high + 1))
self.burn = burn
def play(self, info):
self.discards = info.discards
deck = info.deck
hand = tuple(self.player.stack)
best = info.bestFold(self.player)
if best[1] <= 2:
return best
elif best[1] <= sum([self._p_deal(c, deck) * c for c in hand]):
return best
elif sum(info.inPoints())/info.noPlayers > 6 and best[1] < 6:
return best
else:
return "booger"
def _p_deal(self, c, deck):
return deck.count(c) / len(deck)
class noPeek:
'''
strategy not requiring deck
'''
def play(self, info):
self.discards = info.discards
deck = info.deck
hand = tuple(self.player.stack)
best = info.bestFold(self.player)
if (sum(hand) > 15 or max(hand)>7) and best[1] < 5:
return best
elif (sum(hand) > 16 or max(hand)>9) and best[1] < 6:
return best
elif sum([1 if sum(p.points)>4 and len(p.stack)>0 and max(p.stack) > (11 - sum(p.points)) else 0 for p in info.players]) > 2:
return best
else:
return "booger"
class trad:
'''
takes low cards.
'''
def play(self, info):
deck = info.deck
hand = tuple(self.player.stack)
best = info.bestFold(self.player)
points = tuple(self.player.points)
opp = [pl for pl in info.players if pl != self.player]
if sum(points) + max(hand) < 11:
return "booger"
elif sum([self._p_deal(c, deck) * c for c in hand]) > best[1] + 1 and sum([1 if sum([self._p_deal(c, deck) * c for c in hand]) > sum([self._p_deal(c, deck) * c for c in p.stack]) else 0 for p in opp] ) >= (info.noPlayers - 1):
return best
elif sum([self._p_deal(c, deck) * (c + sum(points) >=11) for c in hand]) > 1 - numpy.product([ 1 - sum([self._p_deal(c, deck) * (c + sum(p.points) >=11) for c in p.stack]) for p in opp] ):
return best
else:
return "booger"
def _p_deal(self, c, deck):
return deck.count(c) / len(deck)
| mit | -3,768,044,802,287,331,300 | 31.198473 | 235 | 0.537459 | false |
ChristineLaMuse/mozillians | vendor-local/lib/python/rest_framework/utils/formatting.py | 18 | 1955 | """
Utility functions to return a formatted name and description for a given view.
"""
from __future__ import unicode_literals
from django.utils.html import escape
from django.utils.safestring import mark_safe
from rest_framework.compat import apply_markdown
from rest_framework.settings import api_settings
from textwrap import dedent
import re
def remove_trailing_string(content, trailing):
"""
Strip trailing component `trailing` from `content` if it exists.
Used when generating names from view classes.
"""
if content.endswith(trailing) and content != trailing:
return content[:-len(trailing)]
return content
def dedent(content):
"""
Remove leading indent from a block of text.
Used when generating descriptions from docstrings.
Note that python's `textwrap.dedent` doesn't quite cut it,
as it fails to dedent multiline docstrings that include
unindented text on the initial line.
"""
whitespace_counts = [len(line) - len(line.lstrip(' '))
for line in content.splitlines()[1:] if line.lstrip()]
# unindent the content if needed
if whitespace_counts:
whitespace_pattern = '^' + (' ' * min(whitespace_counts))
content = re.sub(re.compile(whitespace_pattern, re.MULTILINE), '', content)
return content.strip()
def camelcase_to_spaces(content):
"""
Translate 'CamelCaseNames' to 'Camel Case Names'.
Used when generating names from view classes.
"""
camelcase_boundry = '(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))'
content = re.sub(camelcase_boundry, ' \\1', content).strip()
return ' '.join(content.split('_')).title()
def markup_description(description):
"""
Apply HTML markup to the given description.
"""
if apply_markdown:
description = apply_markdown(description)
else:
description = escape(description).replace('\n', '<br />')
return mark_safe(description)
| bsd-3-clause | -1,574,175,183,076,498,700 | 31.583333 | 83 | 0.672634 | false |
krvss/django-social-auth | social_auth/backends/amazon.py | 2 | 2835 | import base64
from urllib2 import Request, HTTPError
from urllib import urlencode
from django.utils import simplejson
from social_auth.backends import BaseOAuth2, OAuthBackend
from social_auth.utils import dsa_urlopen
from social_auth.exceptions import AuthTokenError
class AmazonBackend(OAuthBackend):
"""Amazon OAuth2 authentication backend"""
name = 'amazon'
# Default extra data to store
EXTRA_DATA = [
('user_id', 'user_id'),
('postal_code', 'postal_code')
]
ID_KEY = 'user_id'
def get_user_details(self, response):
"""Return user details from amazon account"""
name = response.get('name') or ''
first_name = ''
last_name = ''
if name and ' ' in name:
first_name, last_name = response.get('name').split(' ', 1)
else:
first_name = name
return {'username': name,
'email': response.get('email'),
'fullname': name,
'first_name': first_name,
'last_name': last_name}
class AmazonAuth(BaseOAuth2):
"""Amazon OAuth2 support"""
REDIRECT_STATE = False
AUTH_BACKEND = AmazonBackend
AUTHORIZATION_URL = 'http://www.amazon.com/ap/oa'
ACCESS_TOKEN_URL = 'https://api.amazon.com/auth/o2/token'
SETTINGS_KEY_NAME = 'AMAZON_APP_ID'
SETTINGS_SECRET_NAME = 'AMAZON_API_SECRET'
SCOPE_VAR_NAME = 'AMAZON_EXTENDED_PERMISSIONS'
DEFAULT_SCOPE = ['profile']
@classmethod
def refresh_token(cls, token, redirect_uri):
data = cls.refresh_token_params(token)
data['redirect_uri'] = redirect_uri
request = Request(cls.ACCESS_TOKEN_URL,
data=urlencode(data),
headers=cls.auth_headers())
return cls.process_refresh_token_response(dsa_urlopen(request).read())
def user_data(self, access_token, *args, **kwargs):
"""Grab user profile information from amazon."""
url = 'https://www.amazon.com/ap/user/profile?access_token=%s' % \
access_token
try:
response = simplejson.load(dsa_urlopen(Request(url)))
except ValueError:
return None
except HTTPError:
raise AuthTokenError(self)
else:
if 'Profile' in response:
response = {
'user_id': response['Profile']['CustomerId'],
'name': response['Profile']['Name'],
'email': response['Profile']['PrimaryEmail']
}
return response
@classmethod
def auth_headers(cls):
return {
'Authorization': 'Basic %s' % base64.urlsafe_b64encode(
'%s:%s' % cls.get_key_and_secret()
)
}
BACKENDS = {
'amazon': AmazonAuth
}
| bsd-3-clause | -2,360,941,574,021,773,300 | 31.215909 | 78 | 0.573898 | false |
the-zebulan/CodeWars | tests/kyu_8_tests/test_is_your_period_late.py | 1 | 1475 | import unittest
from datetime import date
from katas.kyu_8.is_your_period_late import period_is_late
class PeriodIsLateTestCase(unittest.TestCase):
def test_true_1(self):
self.assertTrue(period_is_late(
date(2016, 6, 13), date(2016, 7, 16), 28
))
def test_true_2(self):
self.assertTrue(period_is_late(
date(2016, 7, 12), date(2016, 8, 10), 28
))
def test_true_3(self):
self.assertTrue(period_is_late(
date(2016, 7, 1), date(2016, 8, 1), 30
))
def test_true_4(self):
self.assertTrue(period_is_late(
date(2016, 1, 1), date(2016, 2, 1), 30
))
def test_false_1(self):
self.assertFalse(period_is_late(
date(2016, 6, 13), date(2016, 7, 16), 35
))
def test_false_2(self):
self.assertFalse(period_is_late(
date(2016, 6, 13), date(2016, 7, 16), 35
))
def test_false_3(self):
self.assertFalse(period_is_late(
date(2016, 6, 13), date(2016, 6, 29), 28
))
def test_false_4(self):
self.assertFalse(period_is_late(
date(2016, 7, 12), date(2016, 8, 9), 28
))
def test_false_5(self):
self.assertFalse(period_is_late(
date(2016, 6, 1), date(2016, 6, 30), 30
))
def test_false_6(self):
self.assertFalse(period_is_late(
date(2016, 1, 1), date(2016, 1, 31), 30
))
| mit | 3,006,762,715,689,809,000 | 25.339286 | 58 | 0.534915 | false |
tkcroat/Augerquant | Development/Auger_under_development.py | 1 | 20665 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 6 10:16:40 2016
@author: tkc
"""
#%% Load modules
import matplotlib.pyplot as plt
import re, os, glob, sys, csv # already run with functions
import pandas as pd
import numpy as np
import scipy
import scipy.stats
from matplotlib.backends.backend_pdf import PdfPages # Only needed for plotting
from collections import defaultdict
from math import factorial # used by Savgol matrix
from PIL import Image, ImageDraw, ImageFont # needed for jpg creation
import datetime
import tkinter as tk
import statsmodels.formula.api as smf # has OLS module with convenient outliers tests
#%%
row=AESsumm.iloc[0]
test= compsummary(Smdifcomp, Peaks, Peaksexcl)
test.to_csv('test.csv',index=False)
myarr=np.asarray(Auger.S7D72)
detect_peaks(myarr, mph=None, mpd=1, threshold=-200, edge='rising',
kpsh=False, valley=True, show=True, ax=None)
def peakshiftcorr(Smdifpeakslog):
''' Measure correlations between peak shifts after removal of weak/absent peaks '''
def packagequant():
''' Write backfit, integ, smdiff, AESquantparams to single Excel file '''
# Use statsmodels package for outlier detection
comp1=pd.read_csv('Smdifcomp.csv', encoding='cp437')
comp2=pd.read_csv('SmdifcompavgFe.csv', encoding='cp437')
# Find a way to do linear fits with least squares but with fixed intercept (through origin)
# web suggestion sez linalg.lstsq but that didn't work initially
# slope,intercept=np.polyfit(data1, data2, 1) numpy version
A=np.vstack([xdata, np.ones(len(xdata))]).T # x needs to be a column vector fo linalb.lstsqNot sure why these arrays need stacking
slope, _, _, _ = np.linalg.lstsq(A, ydata) # fixes intercept to zero?
fig, axes = plt.subplots(nrows=1, ncols=1)
Augerfile.plot(x='Energy',y='Backfit1', color='r',ax=axes)
Augerfile.plot(x='Energy',y='Counts1', ax=axes)
print(slope1, errslope1, slope2, errslope2)
diff=comparelinfits(lowfitparams,upperfitparams, thresh=0.4)
# TESTING of linear peak fits
# params are Augerfile, lowfitparams, upperfitparams, fitbounds (4 points)
# setting up data range for fits
ptlist = [i for i in range(fitbounds[0],fitbounds[1])]
ptlist2 = [i for i in range(fitbounds[2],fitbounds[3])]
fullpts=ptlist+ptlist2
# slice the dataframe
Augerslice=Augerfile[Augerfile.index.isin(ptlist)]
Augerslice=Augerfile[Augerfile.index.isin(fullpts)]
# plot dataframe slice
fig, axes = plt.subplots(nrows=1, ncols=1)
Augerslice.plot.scatter(x='Energy',y='Counts1', ax=axes)
Augerslice.plot.scatter(x='Energy',y='Backfit1', ax=axes)
# plot dataframe slice
fig, axes = plt.subplots(nrows=1, ncols=1)
Compresult.plot.scatter(x='Sampl',y='Samplavg', ax=axes)
# Plot lower line
xdata=Augerslice.Energy
xdata=xdata.loc[fitbounds[0]:fitbounds[1]+1]
ydata=lowfitparams[0]*xdata+lowfitparams[2]
plt.plot(xdata, ydata, color='b')
# Plot w/ 2 stdev different slope
ymax=(lowfitparams[0]+lowfitparams[1])*xdata+lowfitparams[2]
plt.plot(xdata, ymax, color='r')
ymin=(lowfitparams[0]-lowfitparams[1])*xdata+lowfitparams[2]
plt.plot(xdata, ymin, color='r')
# Test fits with linregress
xdata=Augerslice.Energy
ydata=Augerslice.Counts1
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(xdata, ydata)
# NUMPY LINEAR FITS
z, residuals, rank, singular_values, rcond = np.polyfit(xdata, ydata, 1, full=True)
(slope,intercept), cov = np.polyfit(xdata, ydata, 1, cov=True)
sd_slope=np.sqrt(cov[0,0]) # variance of slope is element 0,0 in covariance matrix
sd_intercept=np.sqrt(cov[1,1]) # same for standard deviation of intercept (sqrt of variance)
# Plot upper line
xdata=Augerslice.Energy
xdata2=xdata.loc[fitbounds[0]:fitbounds[1]+1]
ydata2=lowfitparams[0]*xdata+lowfitparams[2]
plt.plot(xdata, ydata2, color='b', ax=axes)
ymax2=(lowfitparams[0]+lowfitparams[1])*xdata+lowfitparams[2]
plt.plot(xdata, ymax, color='r', ax=axes)
ymin2=(lowfitparams[0]+lowfitparams[1])*xdata+lowfitparams[2]
plt.plot(xdata, ymin2, color='r', ax=axes)
# saving slice of data (pass to origin)
Augerslice.to_csv('testfits.csv',index=False)
smooth_width=7
x1 = np.linspace(-3,3,smooth_width)
norm = np.sum(np.exp(-x1**2)) * (x1[1]-x1[0]) # ad hoc normalization
y1 = (4*x1**2 - 2) * np.exp(-x1**2) / smooth_width *8 #norm*(x1[1]-x1[0])
xdata=lowerfitpeak['Energy']
ydata=lowerfitpeak['Counts1'] # Counts1, Counts2 or whatever
regression= smf.ols("data ~ x", data=dict(data=ydata, x=xdata)).fit()
outliers=regression.outlier_test()
intercept,slope=regression.params
colnames=['Resid','Pval','Bonf']
outliers.columns=colnames # rename columns
xvals=outliers.index.values
outliers.plot.scatter(x=xvals, y='Resid') # same indices as input data
plt.scatter(outliers.index, outliers.Resid)
def findcombofiles(spelist):
'''Find repeated spe files to combine via averaging..
also ensure x,y,z stage match '''
# get list of basenames that have >1 associated spe
spelist['Basename']='' # add basename column
for index, row in spelist.iterrows():
fname=spelist.loc[index]['Filename']
basename=fname.split('.')[0]
spelist=spelist.set_value(index,'Basename',basename)
basenamelist=spelist.Basename.unique()
basenamelist=np.ndarray.tolist(basenamelist) # list of unique base names
combinelist=[]
for i, bname in enumerate(basenamelist):
match=spelist[spelist['Basename']==bname]
if len(match)>1: # only combinable if multiple matching spes with same basename
# ensure they're at same x, y,z positions
xvals=match.X.unique()
if len(xvals)!=1:
print(len(match),' spe files with basename ', bname,' but ', len(xvals), ' unique X stage positions!... not combined')
continue
firstfile=match.Filenumber.min() # assumes consecutive file numbering w/ Autotool
lastfile=match.Filenumber.max()
combinelist.append([bname, firstfile, lastfile])
return combinelist # list with basename, firstfile, lastfile
from operator import itemgetter
from itertools import groupby
def autocombinespe(AugerParamLog, movefiles=False):
''' automatic method of finding combinable measurements (same X,Y, areas, basename) and combine all areas separately
via averaging
naming convention of basename.1.csv, basename.2.csv, basename.3.csv etc to basename.13.csv
if str(AugerParamLog.loc[index]['Comments'])=='nan':
AugerParamLog=AugerParamLog.set_value(index,'Comments','')
'''
if 'Basename' not in AugerParamLog: # in case basename not already added
AugerParamLog['Basename']=''
for index, row in AugerParamLog.iterrows():
fname=AugerParamLog.loc[index]['Filename']
basename=fname.split('.')[0]
AugerParamLog=AugerParamLog.set_value(index,'Basename',basename)
for index, row in AugerParamLog.iterrows(): # avoids nan problems with avg string filter
if str(AugerParamLog.loc[index]['Comments'])=='nan':
AugerParamLog=AugerParamLog.set_value(index,'Comments','')
# generate list of combinable files (same basename)
basenamelist=AugerParamLog.Basename.unique()
basenamelist=np.ndarray.tolist(basenamelist) # list of unique base names
for i, bname in enumerate(basenamelist): # loop through each combinable basename
logmatches=AugerParamLog[AugerParamLog['Basename']==bname]
excludemask=logmatches['Comments'].str.contains('avg', case=False, na=False)
logmatches=logmatches.loc[~excludemask] # fails if all are nan
# Exclude files already produced by averaging (avg in comments)
if len(logmatches)>1: # only combinable if multiple matching spes with same basename
xvals=logmatches.X.unique() # ensure they're all at same x and y positions
xvals=np.ndarray.tolist(xvals) # most likely len=1[ but possibly different
for j, xval in enumerate(xvals):
xmatches=logmatches[logmatches['X']==xval]
if len(xmatches)==1: # single file and thus not combinable
continue
# ensure all have same number of areas (true for combinable spectra made with Autotool loops)
xmatches=xmatches[(xmatches['Areas']>=1)] # convenient way to drop .sem and .map
if len(xmatches.Areas.unique())!=1:
print('Different # of areas for spectra', bname, str(xmatches.Filenumber.min()),' to ', str(xmatches.Filenumber.min()))
continue
# TODO fix this check if filenumbers are consecutive (true for Autotool)
'''
filenums=xmatches.Filenumber.unique()
filenums=np.ndarray.tolist(filenums)
for key, group in groupby(enumerate(filenums), lambda x: x[0]-x[1]):
print(group)
mygroup = map(itemgetter(1), group)
print (map(itemgetter(1),group))
'''
# now ready to combine this set of files
firstnum=xmatches.Filenumber.min() # assumes consecutive file numbering w/ Autotool
lastnum=xmatches.Filenumber.max()
csvname=bname+str(firstnum)+str(lastnum)+'.csv' # string for combined file based on first-last filenumber
if not os.path.isfile(csvname): # skip combined file creation if it already exists (however log is still regenerated)
avgcombinespe(xmatches,csvname) # combines above files and makes new averaged csv
print('Average-combined Auger spectra ', csvname, ' created.')
# make new entry for Augerparamlog
avgentry = avglogentry(xmatches) # create new logbook entry (series) for averaged spectrum (mostly from firstfile's info)
# append new Series entry to end of AugerParamLog
AugerParamLog=AugerParamLog.append(avgentry, ignore_index=True)
else:
print(csvname,' already exists')
if movefiles==True: # Moves all csv subfiles (those combined) into /sub directory
#TODO change this so that single AugerParamLog at top level remains... subs can be filtered out by "sub' in path
AugerParamLog=movespes(filelist, AugerParamLog) # shuffle csv sub files to subfolder and split Paramslog accordingly
return AugerParamLog
logmatches.to_csv('logmatches.csv',index=False)
def integpeaks(Augerfile, Backfitparams, areanum, Elemdata, Shifts, logmatch):
''' Background fit for each direct peak, shift is list of energy shifts of negpeak (same order as Eledata (opens source spectrum as Augerfile,
fits peak backgrounds above and below using Elemdata, saves background to source csv (overwrites existing fits), also saves linear fit params to logdataframe with position/amplitude/etc;
desired elements out of data range are skipped (in prior findindices function)
'''
#create Smdifpeaks dataframe for temp storage of each peak's params
Backfitparams=Backfitparams.dropna(subset=['Rval1']) # skip Gaussian fit if background fit fails
AugerFileName=logmatch.Filename #
# Create temp df to hold and pass linear fit data
mycols=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments', 'Areanumber', 'Element', 'Integcounts',
'Backcounts', 'Significance', 'Xc', 'Width', 'Peakarea', 'Y0','Rsquared','Numchannels']
Integresults=pd.DataFrame(columns=mycols) # empty df for all integ results for elems in this spe file
peakname='Peaks'+str(areanum) # this is counts - background (but only calculated in vicinity of known elemental peaks)
backfitname='Backfit'+str(areanum)
# all fit regions modify fit region boundaries for this spectrum based on smooth-differentiated peak (2nd deriv, Savgol (poly=2, pts=11))
# global shifts from smdifpeaks and local shift based on smoothed 2nd derivative
# already incorporated into Elemdata values (lower1,2 and upper1,2 fully adjusted)
# loop through and fit all peaks for each element in this spatial area
for i, (elem, fittype, integpeak, lower1, lower2, upper1, upper2, kfact, integwidth, siglevel) in enumerate(Elemdata):
# linear fit below this elem's peak (shifts and adjustments already made)
Integresult=pd.DataFrame(index=np.arange(0,1),columns=mycols) # blank df row for this element
if i in Backfitparams.index: # skips integ calc if backfit is n/a (but does put n/a entries in datalog)
fitregion=Augerfile[lower1:upper2+1]
if fitregion.empty==True: # skip if no data present (already should be skipped in Elemdata)
continue
# addgauss if save of gaussian peak fit in Augerfile is desired
# Probably could skip Gaussian fitting entirely if peak is weak (check smdiff)
fitregion, fitparams, rsquared, ier = fitgauss(fitregion, areanum, integwidth, elem, AugerFileName, addgauss=True)
addgauss=True # maybe pass this arg from elsewhere
if addgauss==True and ier in [1,2,3,4]: # copy gaussian fit over to csv file if successful
gaussname="Gauss"+str(areanum)
if gaussname not in Augerfile.dtypes.index: # add col if not already present
Augerfile[gaussname]='' # add col for gaussian fit
# Copy gaussian fit to Augerfile... fitregion only modified in new Gauss peak fit column
Augerfile.loc[fitregion.index,fitregion.columns]=fitregion
# if gaussian fit is successful set center integration channel to index nearest xc
# ier flag of 1,2,3,4 if fit succeeds but rsquared threshold is better
if rsquared!='n/a': # skip integcounts calc but do put 'n/a' entries in df
if rsquared>0.4:
xc=fitparams[0] # center of gaussian fit
center=int(round(xc,0))
tempdf=fitregion[fitregion['Energy']==center]
try:
centerindex=tempdf[peakname].idxmax() # corresponding index # of peak maximum
except:
print('Gaussian fit center out of data range for ', elem, ' in ', AugerFileName)
centerindex=integpeak # backup method of finding center of integration region
else: # indication of poor Gaussian fit R2<0.4 (use prior knowledge of peak position)
print('Failed gaussian fit for ', elem, ' in ', AugerFileName)
# set center integration channel to value passed by integpeak
# this is ideal energy value but adjusted by shift found using smooth-diff quant method
centerindex=integpeak # already stores index number of central peak (ideal - sm-diff shift value)
# Still do the counts integration for poor gaussian fits
# perform integration over peak center channel + integwidth on either side
Augerpeak=Augerfile[centerindex-integwidth:centerindex+integwidth+1]
integcounts=Augerpeak[peakname].sum() # get counts sum
backgroundcnts=Augerpeak[backfitname].sum() # sum counts over identical width in background fit
# Used for peak significance i.e. typically 2 sigma of background integration over identical width
# full integ width is 1.2*FWHM but integwidth here is closest integer half-width
# Write fit params from tuple over to Integresult df
Integresult.iloc[0]['Integcounts']=integcounts
Integresult.iloc[0]['Backcounts']=backgroundcnts
Integresult.iloc[0]['Significance']=round(integcounts/(np.sqrt(backgroundcnts)),3)
# TODO add 2/sqrt(n) calc of associated percent error (also can calculate later)
Integresult.iloc[0]['Numchannels']=integwidth
Integresult.iloc[0]['Rsquared']=rsquared
Integresult.iloc[0]['Element']=elem
# These will be n/a if fit fails
Integresult.iloc[0]['Xc']=fitparams[0]
Integresult.iloc[0]['Width']=fitparams[1]
Integresult.iloc[0]['Peakarea']=fitparams[2]
Integresult.iloc[0]['Y0']=fitparams[3]
Integresults=Integresults.append(Integresult, ignore_index=True) # add row to list with valid
# end of loop through each element
# assign params that are common to all areas/all peaks into rows of df (copied from original log)
for index,row in Integresults.iterrows():
Integresults.loc[index]['Filenumber']=logmatch.Filenumber
Integresults.iloc[index]['Filename']=logmatch.Filename
Integresults.iloc[index]['Filepath']=logmatch.FilePath
Integresults.iloc[index]['Sample']=logmatch.Sample
Integresults.iloc[index]['Comments']=logmatch.Comments
Integresults.loc[index]['Areanumber']=areanum
Integresults=Integresults[mycols] # put back in original order
return Augerfile, Integresults # df with direct peak fitting info for all areas/ all elements
Augerfile, smcountname, evbreaks
def packagequant():
''' Write backfit, integ, smdiff, AESquantparams to single Excel file '''
# Use statsmodels package for outlier detection
comp1=pd.read_csv('Smdifcomp.csv', encoding='cp437')
comp2=pd.read_csv('SmdifcompavgFe.csv', encoding='cp437')
# Find a way to do linear fits with least squares but with fixed intercept (through origin)
# web suggestion sez linalg.lstsq but that didn't work initially
# slope,intercept=np.polyfit(data1, data2, 1) numpy version
A=np.vstack([xdata, np.ones(len(xdata))]).T # x needs to be a column vector fo linalb.lstsqNot sure why these arrays need stacking
slope, _, _, _ = np.linalg.lstsq(A, ydata) # fixes intercept to zero?
def checklog(filelist, AugerParamLog):
''' Checks the user Auger parameter log matches the actual data file list from directory
prints out filenumbers that have a problem to console'''
spe=[] # list of filenumbers of spe files in directory
semmap=[] # list of filenumbers for sem or map files in directory
for i, name in enumerate(filelist): # deals with 3 cases (spe, sem or map)
match=re.search(r'\d+.csv',name) # spe files converted to csv
if match:
tempstring=match.group(0)
num=int(tempstring.split('.')[0])
spe.append(num) # add to spe files in dir list
spelist=AugerParamLog[(AugerParamLog['Areas']>=1)]
loglist=spelist.Filename.unique()
loglist=np.ndarray.tolist(loglist)
missingdata=[i for i in loglist if i not in spe] # list comprehension for missing data file (but present in excel logbook)
missingentry=[i for i in spe if i not in loglist] # list comprehension for data file with missing log entry (could also convert to sets and compare)
for i, val in enumerate(missingdata):
print ('Data file number ', val, ' present in Auger params log but missing from directory')
for i, val in enumerate(missingentry):
print ('Data file number ', val, ' present in directory but missing from Auger params log')
# check for duplicate entries in logbook
myset=set([x for x in loglist if loglist.count(x) > 1])
for i in myset:
print('Duplicate entry for file number', i, ' in Auger params log.')
return
def plotdffits(df, colname, nparray):
''' Plot a dataframe slice on top and a numpy array (i.e its derivative) on the bottom '''
fig, axes = plt.subplots(nrows=2, ncols=1) # axes is array
df.plot.scatter(x='Energy', y='Counts1', ax=axes[0,0], color='r')
df.plot.scatter(x='Energy', y='Counts1', color='r')
df.plot(x='Energy', y='Counts1', ax=axes[0,0], color='r')
xvals=np.arange(df.Energy.min(),df.Energy.max(),1)
plt.plot(xvals, )
# MORE PLOTTING STUFF UNDER DEVELOPMENT
def addternary(df, elemlist):
''' Working from df with AES basis and at.%
S, Mg+Ca, Fe '''
# TODO get ternary calculation working from df with at.%
| mit | 6,603,829,211,800,989,000 | 52.381579 | 190 | 0.664312 | false |
ChristosChristofidis/h2o-3 | h2o-py/tests/testdir_algos/kmeans/pyunit_parametersKmeans.py | 1 | 1034 | import sys
sys.path.insert(1, "../../../")
import h2o
def parametersKmeans(ip,port):
# Connect to a pre-existing cluster
h2o.init(ip,port) # connect to localhost:54321
#Log.info("Getting data...")
iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris.csv"))
#Log.info("Create and and duplicate...")
iris_km = h2o.kmeans(x=iris[0:4], k=3, seed=1234)
parameters = iris_km._model_json['parameters']
param_dict = {}
for p in range(len(parameters)):
param_dict[parameters[p]['label']] = parameters[p]['actual_value']
iris_km_again = h2o.kmeans(x=iris[0:4], **param_dict)
#Log.info("wss")
wss = iris_km.withinss().sort()
wss_again = iris_km_again.withinss().sort()
assert wss == wss_again, "expected wss to be equal"
#Log.info("centers")
centers = iris_km.centers()
centers_again = iris_km_again.centers()
assert centers == centers_again, "expected centers to be the same"
if __name__ == "__main__":
h2o.run_test(sys.argv, parametersKmeans)
| apache-2.0 | -588,064,947,878,889,500 | 31.3125 | 74 | 0.636364 | false |
tripzero/snetcam | snetcam/imageresource.py | 1 | 3821 | import cv2
import numpy as np
import json
import base64
from collections import deque
import trollius as asyncio
import sys, os, traceback
from multiprocessing import Process, Queue, Pool
is_py2 = sys.version[0] == '2'
if is_py2:
from Queue import Empty
else:
from queue import Empty
def MINS(mins):
return mins * 60
def dataToImage(data, base64Encode = False):
if base64Encode:
data = base64.b64decode(data)
img = np.frombuffer(data, dtype='uint8')
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
return img
def imgToData(frame, base64Encode = False):
success, data = cv2.imencode('.jpg', frame)
assert success
data = np.getbuffer(data)
if base64Encode:
data = base64.b64encode(data)
return data
class DeQueue(object):
def __init__(self, size=None):
self.maxSize = size
self.queue = Queue()
def put(self, data, block=True, timeout=None):
try:
if self.maxSize and self.qsize() >= self.maxSize:
'pop off'
self.queue.get(False)
except Empty:
pass
return self.queue.put(data, block, timeout)
def get(self, block=True, timeout=None):
return self.queue.get(block, timeout)
def get_nowait(self):
return self.queue.get(False)
def qsize(self):
return self.queue.qsize()
class MultiprocessImageResource():
pollRate = 0.001
debug = False
def __init__(self, name, processes=1, maxQueueSize=100, args=None):
try:
self.pool = []
self.resultQueue = DeQueue(maxQueueSize)
self.dataQueue = DeQueue(maxQueueSize)
self.debugQueue = Queue()
self.name = name
self.variables = {}
print ("processes for {} = {}".format(name, processes))
for i in range(processes):
if args:
p = Process(target=self.process, args=args)
else:
p = Process(target=self.process)
self.pool.append(p)
p.start()
asyncio.get_event_loop().create_task(self.poll())
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
def __del__(self):
for process in self.pool:
process.terminate()
process.join()
def setValue(self, name, value):
self.variables[name] = value
def debugOut(self, msg):
#if self.debug:
self.debugQueue.put(msg)
def handleImg(self, data):
self.dataQueue.put(data)
@asyncio.coroutine
def poll(self):
print("poll task started")
while True:
try:
msg = self.debugQueue.get_nowait()
print(msg)
except Empty:
pass
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("poll exception")
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
try:
result = self.resultQueue.get_nowait()
self.hasResult(result)
except Empty:
pass
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("poll exception")
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
finally:
yield asyncio.From(asyncio.sleep(self.pollRate))
"""
Should be overriden in subclass. For example:
def process(self):
while True:
imgData = self.dataQueue.get()
img = dataToImage(imgData)
# Do processing...
self.resultQueue.put(result)
"""
def process(self):
print ("base process() is being called. You should have overridden this")
assert False
"""
Should be overriden in subclass. This will be called when a result is processed from the
result queue.
"""
def hasResult(self, result):
print("you need to implement 'hasResult' in your subclass")
assert False
| lgpl-2.1 | 8,368,115,691,261,882,000 | 23.183544 | 91 | 0.676786 | false |
trondhindenes/ansible | lib/ansible/modules/network/avi/avi_cloud.py | 20 | 10708 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloud
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of Cloud Avi RESTful Object
description:
- This module is used to configure Cloud object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
apic_configuration:
description:
- Apicconfiguration settings for cloud.
apic_mode:
description:
- Boolean flag to set apic_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
aws_configuration:
description:
- Awsconfiguration settings for cloud.
azure_configuration:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
cloudstack_configuration:
description:
- Cloudstackconfiguration settings for cloud.
custom_tags:
description:
- Custom tags for all avi created resources in the cloud infrastructure.
- Field introduced in 17.1.5.
version_added: "2.5"
dhcp_enabled:
description:
- Select the ip address management scheme.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
dns_provider_ref:
description:
- Dns profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
docker_configuration:
description:
- Dockerconfiguration settings for cloud.
east_west_dns_provider_ref:
description:
- Dns profile for east-west services.
- It is a reference to an object of type ipamdnsproviderprofile.
east_west_ipam_provider_ref:
description:
- Ipam profile for east-west services.
- Warning - please use virtual subnets in this ipam profile that do not conflict with the underlay networks or any overlay networks in the cluster.
- For example in aws and gcp, 169.254.0.0/16 is used for storing instance metadata.
- Hence, it should not be used in this profile.
- It is a reference to an object of type ipamdnsproviderprofile.
enable_vip_static_routes:
description:
- Use static routes for vip side network resolution during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
ipam_provider_ref:
description:
- Ipam profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
license_tier:
description:
- Specifies the default license tier which would be used by new se groups.
- This field by default inherits the value from system configuration.
- Enum options - ENTERPRISE_16, ENTERPRISE_18.
- Field introduced in 17.2.5.
version_added: "2.5"
license_type:
description:
- If no license type is specified then default license enforcement for the cloud type is chosen.
- The default mappings are container cloud is max ses, openstack and vmware is cores and linux it is sockets.
- Enum options - LIC_BACKEND_SERVERS, LIC_SOCKETS, LIC_CORES, LIC_HOSTS, LIC_SE_BANDWIDTH.
linuxserver_configuration:
description:
- Linuxserverconfiguration settings for cloud.
mesos_configuration:
description:
- Mesosconfiguration settings for cloud.
mtu:
description:
- Mtu setting for the cloud.
- Default value when not specified in API or module is interpreted by Avi Controller as 1500.
- Units(BYTES).
name:
description:
- Name of the object.
required: true
nsx_configuration:
description:
- Configuration parameters for nsx manager.
- Field introduced in 17.1.1.
obj_name_prefix:
description:
- Default prefix for all automatically created objects in this cloud.
- This prefix can be overridden by the se-group template.
openstack_configuration:
description:
- Openstackconfiguration settings for cloud.
oshiftk8s_configuration:
description:
- Oshiftk8sconfiguration settings for cloud.
prefer_static_routes:
description:
- Prefer static routes over interface routes during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
proxy_configuration:
description:
- Proxyconfiguration settings for cloud.
rancher_configuration:
description:
- Rancherconfiguration settings for cloud.
state_based_dns_registration:
description:
- Dns records for vips are added/deleted based on the operational state of the vips.
- Field introduced in 17.1.12.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.5"
type: bool
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vca_configuration:
description:
- Vcloudairconfiguration settings for cloud.
vcenter_configuration:
description:
- Vcenterconfiguration settings for cloud.
vtype:
description:
- Cloud type.
- Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP,
- CLOUD_RANCHER, CLOUD_OSHIFT_K8S, CLOUD_AZURE.
- Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE.
required: true
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a VMWare cloud with write access mode
avi_cloud:
username: '{{ username }}'
controller: '{{ controller }}'
password: '{{ password }}'
apic_mode: false
dhcp_enabled: true
enable_vip_static_routes: false
license_type: LIC_CORES
mtu: 1500
name: VCenter Cloud
prefer_static_routes: false
tenant_ref: admin
vcenter_configuration:
datacenter_ref: /api/vimgrdcruntime/datacenter-2-10.10.20.100
management_network: /api/vimgrnwruntime/dvportgroup-103-10.10.20.100
password: password
privilege: WRITE_ACCESS
username: user
vcenter_url: 10.10.20.100
vtype: CLOUD_VCENTER
"""
RETURN = '''
obj:
description: Cloud (api/cloud) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
apic_configuration=dict(type='dict',),
apic_mode=dict(type='bool',),
aws_configuration=dict(type='dict',),
azure_configuration=dict(type='dict',),
cloudstack_configuration=dict(type='dict',),
custom_tags=dict(type='list',),
dhcp_enabled=dict(type='bool',),
dns_provider_ref=dict(type='str',),
docker_configuration=dict(type='dict',),
east_west_dns_provider_ref=dict(type='str',),
east_west_ipam_provider_ref=dict(type='str',),
enable_vip_static_routes=dict(type='bool',),
ipam_provider_ref=dict(type='str',),
license_tier=dict(type='str',),
license_type=dict(type='str',),
linuxserver_configuration=dict(type='dict',),
mesos_configuration=dict(type='dict',),
mtu=dict(type='int',),
name=dict(type='str', required=True),
nsx_configuration=dict(type='dict',),
obj_name_prefix=dict(type='str',),
openstack_configuration=dict(type='dict',),
oshiftk8s_configuration=dict(type='dict',),
prefer_static_routes=dict(type='bool',),
proxy_configuration=dict(type='dict',),
rancher_configuration=dict(type='dict',),
state_based_dns_registration=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
vca_configuration=dict(type='dict',),
vcenter_configuration=dict(type='dict',),
vtype=dict(type='str', required=True),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloud',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | -7,858,649,419,432,722,000 | 37.65704 | 159 | 0.624113 | false |
robinparadise/videoquizmaker | public/external/html5-lint.orig/html5check.py | 1 | 5799 | #!/usr/bin/python
# Copyright (c) 2007-2008 Mozilla Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Several "try" blocks for python2/3 differences (@secretrobotron)
try:
import httplib
except:
import http.client as httplib
import os
import sys
import re
try:
import urlparse
except:
import urllib.parse as urlparse
import string
import gzip
try:
from BytesIO import BytesIO
except:
from io import BytesIO
try:
maketrans = str.maketrans
except:
maketrans = string.maketrans
extPat = re.compile(r'^.*\.([A-Za-z]+)$')
extDict = {
"html" : "text/html",
"htm" : "text/html",
"xhtml" : "application/xhtml+xml",
"xht" : "application/xhtml+xml",
"xml" : "application/xml",
}
argv = sys.argv[1:]
forceXml = 0
forceHtml = 0
gnu = 0
errorsOnly = 0
encoding = None
fileName = None
contentType = None
inputHandle = None
service = 'https://validator.mozillalabs.com/'
for arg in argv:
if '--help' == arg:
print('-h : force text/html')
print('-x : force application/xhtml+xml')
print('-g : GNU output')
print('-e : errors only (no info or warnings)')
print('--encoding=foo : declare encoding foo')
print('--service=url : the address of the HTML5 validator')
print('One file argument allowed. Leave out to read from stdin.')
sys.exit(0)
elif arg.startswith("--encoding="):
encoding = arg[11:]
elif arg.startswith("--service="):
service = arg[10:]
elif arg.startswith("--"):
sys.stderr.write('Unknown argument %s.\n' % arg)
sys.exit(2)
elif arg.startswith("-"):
for c in arg[1:]:
if 'x' == c:
forceXml = 1
elif 'h' == c:
forceHtml = 1
elif 'g' == c:
gnu = 1
elif 'e' == c:
errorsOnly = 1
else:
sys.stderr.write('Unknown argument %s.\n' % arg)
sys.exit(3)
else:
if fileName:
sys.stderr.write('Cannot have more than one input file.\n')
sys.exit(1)
fileName = arg
if forceXml and forceHtml:
sys.stderr.write('Cannot force HTML and XHTML at the same time.\n')
sys.exit(2)
if forceXml:
contentType = 'application/xhtml+xml'
elif forceHtml:
contentType = 'text/html'
elif fileName:
m = extPat.match(fileName)
if m:
ext = m.group(1)
ext = ext.translate(maketrans(string.ascii_uppercase, string.ascii_lowercase))
if ext in extDict:
contentType = extDict[ext]
else:
sys.stderr.write('Unable to guess Content-Type from file name. Please force the type.\n')
sys.exit(3)
else:
sys.stderr.write('Could not extract a filename extension. Please force the type.\n')
sys.exit(6)
else:
sys.stderr.write('Need to force HTML or XHTML when reading from stdin.\n')
sys.exit(4)
if encoding:
contentType = '%s; charset=%s' % (contentType, encoding)
if fileName:
inputHandle = open(fileName, "rb")
else:
inputHandle = sys.stdin
data = inputHandle.read()
buf = BytesIO()
gzipper = gzip.GzipFile(fileobj=buf, mode='wb')
gzipper.write(data)
gzipper.close()
gzippeddata = buf.getvalue()
buf.close()
connection = None
response = None
status = 302
redirectCount = 0
url = service
if gnu:
url = url + '?out=gnu'
else:
url = url + '?out=text'
if errorsOnly:
url = url + '&level=error'
while (status == 302 or status == 301 or status == 307) and redirectCount < 10:
if redirectCount > 0:
url = response.getheader('Location')
parsed = urlparse.urlsplit(url)
if redirectCount > 0:
connection.close() # previous connection
print('Redirecting to %s' % url)
print('Please press enter to continue or type "stop" followed by enter to stop.')
if raw_input() != "":
sys.exit(0)
connection = httplib.HTTPSConnection(parsed[1])
connection.connect()
connection.putrequest("POST", "%s?%s" % (parsed[2], parsed[3]), skip_accept_encoding=1)
connection.putheader("Accept-Encoding", 'gzip')
connection.putheader("Content-Type", contentType)
connection.putheader("Content-Encoding", 'gzip')
connection.putheader("Content-Length", len(gzippeddata))
connection.endheaders()
connection.send(gzippeddata)
response = connection.getresponse()
status = response.status
redirectCount += 1
if status != 200:
sys.stderr.write('%s %s\n' % (status, response.reason))
sys.exit(5)
if response.getheader('Content-Encoding', 'identity').lower() == 'gzip':
response = gzip.GzipFile(fileobj=BytesIO(response.read()))
if fileName and gnu:
quotedName = '"%s"' % fileName.replace('"', '\\042')
for line in response:
sys.stdout.write(quotedName)
sys.stdout.write(line)
else:
output = response.read()
# python2/3 difference in output's type
if not isinstance(output, str):
output = output.decode('utf-8')
sys.stdout.write(output)
connection.close()
| mit | 6,954,196,385,093,068,000 | 27.150485 | 95 | 0.6796 | false |
Subsets and Splits