repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
rpatterson/test-har | test_har/tests/test_requests.py | 1 | 3214 | """
Test using HAR files in Python tests against the requests library.
"""
import json
import requests
import requests_mock
from test_har import requests_har as test_har
from test_har import tests
class HARDogfoodRequestsTests(tests.HARDogfoodTestCase, test_har.HARTestCase):
"""
Test using HAR files in Python tests against the requests library.
"""
RESPONSE_TYPE = requests.Response
def setUp(self):
"""
Start the mocker, mock the example HAR response, and register cleanup.
"""
super(HARDogfoodRequestsTests, self).setUp()
self.mocker = requests_mock.Mocker()
self.mocker.start()
self.addCleanup(self.mocker.stop)
self.headers = test_har.array_to_dict(
self.entry["response"]["headers"])
self.headers['Content-Type'] = self.entry[
"response"]["content"]["mimeType"]
# Insert a key into the response
# about which HAR response makes no assertion
content = dict(
self.entry["response"]["content"]["text"],
email='[email protected]')
self.mocker.post(
self.entry["request"]["url"],
status_code=self.entry["response"]["status"],
reason=self.entry["response"]["statusText"],
headers=self.headers,
text=json.dumps(content))
def test_non_json(self):
"""
Mock the requests library non-JSON response.
"""
self.entry["response"]["content"]["mimeType"] = "text/html"
self.entry["response"]["content"]["text"] = (
'<html><body>Foo HTML body</body></html>')
self.mocker.post(
self.entry["request"]["url"],
status_code=self.entry["response"]["status"],
reason=self.entry["response"]["statusText"],
headers=dict(self.headers, **{'Content-Type': self.entry[
"response"]["content"]["mimeType"]}),
text=self.entry["response"]["content"]["text"])
super(HARDogfoodRequestsTests, self).test_non_json()
def test_missing_content_type(self):
"""
Fail when the response is missing the content/MIME type.
"""
self.headers.pop('Content-Type')
self.mocker.post(
self.entry["request"]["url"],
status_code=self.entry["response"]["status"],
reason=self.entry["response"]["statusText"],
headers=self.headers,
text=json.dumps(self.entry["response"]["content"]["text"]))
with self.assertRaises(AssertionError) as har_failures:
self.assertHAR(self.example)
self.assertIn(
'content/mimeType', har_failures.exception.args[0],
'Assertion exception missing MIME type detail')
# BBB Python 2.7 str vs unicode compat
with self.assertRaises(AssertionError) as expected:
self.assertIn(
'Content-Type', self.headers,
'Missing response content type')
self.assertEqual(
har_failures.exception.args[0]['content/mimeType'].args,
expected.exception.args,
'Wrong missing response MIME type failure assertion')
| gpl-3.0 | 4,915,884,664,070,429,000 | 35.11236 | 78 | 0.59552 | false |
pavlov99/jsonapi | jsonapi/utils.py | 1 | 2220 | """ JSON:API utils."""
class _classproperty(property):
""" Implement property behaviour for classes.
class A():
@_classproperty
@classmethod
def name(cls):
return cls.__name__
"""
def __get__(self, obj, type_):
return self.fget.__get__(None, type_)()
def _cached(f):
""" Decorator that makes a method cached."""
attr_name = '_cached_' + f.__name__
def wrapper(obj, *args, **kwargs):
if not hasattr(obj, attr_name):
setattr(obj, attr_name, f(obj, *args, **kwargs))
return getattr(obj, attr_name)
return wrapper
classproperty = lambda f: _classproperty(classmethod(f))
cached_property = lambda f: property(_cached(f))
cached_classproperty = lambda f: classproperty(_cached(f))
class Choices(object):
""" Choices."""
def __init__(self, *choices):
self._choices = []
self._choice_dict = {}
for choice in choices:
if isinstance(choice, (list, tuple)):
if len(choice) == 2:
choice = (choice[0], choice[1], choice[1])
elif len(choice) != 3:
raise ValueError(
"Choices can't handle a list/tuple of length {0}, only\
2 or 3".format(choice))
else:
choice = (choice, choice, choice)
self._choices.append((choice[0], choice[2]))
self._choice_dict[choice[1]] = choice[0]
def __getattr__(self, attname):
try:
return self._choice_dict[attname]
except KeyError:
raise AttributeError(attname)
def __iter__(self):
return iter(self._choices)
def __getitem__(self, index):
return self._choices[index]
def __delitem__(self, index):
del self._choices[index]
def __setitem__(self, index, value):
self._choices[index] = value
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
self._choices
)
def __len__(self):
return len(self._choices)
def __contains__(self, element):
return element in self._choice_dict.values()
| mit | -3,830,283,769,636,155,400 | 23.94382 | 79 | 0.530631 | false |
NicWayand/xray | xarray/plot/utils.py | 1 | 6442 | import pkg_resources
import numpy as np
import pandas as pd
from ..core.pycompat import basestring
def _load_default_cmap(fname='default_colormap.csv'):
"""
Returns viridis color map
"""
from matplotlib.colors import LinearSegmentedColormap
# Not sure what the first arg here should be
f = pkg_resources.resource_stream(__name__, fname)
cm_data = pd.read_csv(f, header=None).values
return LinearSegmentedColormap.from_list('viridis', cm_data)
def _determine_extend(calc_data, vmin, vmax):
extend_min = calc_data.min() < vmin
extend_max = calc_data.max() > vmax
if extend_min and extend_max:
extend = 'both'
elif extend_min:
extend = 'min'
elif extend_max:
extend = 'max'
else:
extend = 'neither'
return extend
def _build_discrete_cmap(cmap, levels, extend, filled):
"""
Build a discrete colormap and normalization of the data.
"""
import matplotlib as mpl
if not filled:
# non-filled contour plots
extend = 'max'
if extend == 'both':
ext_n = 2
elif extend in ['min', 'max']:
ext_n = 1
else:
ext_n = 0
n_colors = len(levels) + ext_n - 1
pal = _color_palette(cmap, n_colors)
new_cmap, cnorm = mpl.colors.from_levels_and_colors(
levels, pal, extend=extend)
# copy the old cmap name, for easier testing
new_cmap.name = getattr(cmap, 'name', cmap)
return new_cmap, cnorm
def _color_palette(cmap, n_colors):
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
colors_i = np.linspace(0, 1., n_colors)
if isinstance(cmap, (list, tuple)):
# we have a list of colors
try:
# first try to turn it into a palette with seaborn
from seaborn.apionly import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except ImportError:
# if that fails, use matplotlib
# in this case, is there any difference between mpl and seaborn?
cmap = ListedColormap(cmap, N=n_colors)
pal = cmap(colors_i)
elif isinstance(cmap, basestring):
# we have some sort of named palette
try:
# first try to turn it into a palette with seaborn
from seaborn.apionly import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except (ImportError, ValueError):
# ValueError is raised when seaborn doesn't like a colormap
# (e.g. jet). If that fails, use matplotlib
try:
# is this a matplotlib cmap?
cmap = plt.get_cmap(cmap)
except ValueError:
# or maybe we just got a single color as a string
cmap = ListedColormap([cmap], N=n_colors)
pal = cmap(colors_i)
else:
# cmap better be a LinearSegmentedColormap (e.g. viridis)
pal = cmap(colors_i)
return pal
def _determine_cmap_params(plot_data, vmin=None, vmax=None, cmap=None,
center=None, robust=False, extend=None,
levels=None, filled=True, cnorm=None):
"""
Use some heuristics to set good defaults for colorbar and range.
Adapted from Seaborn:
https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158
Parameters
==========
plot_data: Numpy array
Doesn't handle xarray objects
Returns
=======
cmap_params : dict
Use depends on the type of the plotting function
"""
ROBUST_PERCENTILE = 2.0
import matplotlib as mpl
calc_data = np.ravel(plot_data[~pd.isnull(plot_data)])
# Setting center=False prevents a divergent cmap
possibly_divergent = center is not False
# Set center to 0 so math below makes sense but remember its state
center_is_none = False
if center is None:
center = 0
center_is_none = True
# Setting both vmin and vmax prevents a divergent cmap
if (vmin is not None) and (vmax is not None):
possibly_divergent = False
# vlim might be computed below
vlim = None
if vmin is None:
if robust:
vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
else:
vmin = calc_data.min()
elif possibly_divergent:
vlim = abs(vmin - center)
if vmax is None:
if robust:
vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
else:
vmax = calc_data.max()
elif possibly_divergent:
vlim = abs(vmax - center)
if possibly_divergent:
# kwargs not specific about divergent or not: infer defaults from data
divergent = ((vmin < 0) and (vmax > 0)) or not center_is_none
else:
divergent = False
# A divergent map should be symmetric around the center value
if divergent:
if vlim is None:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
# Now add in the centering value and set the limits
vmin += center
vmax += center
# Choose default colormaps if not provided
if cmap is None:
if divergent:
cmap = "RdBu_r"
else:
cmap = "viridis"
# Allow viridis before matplotlib 1.5
if cmap == "viridis":
cmap = _load_default_cmap()
# Handle discrete levels
if levels is not None:
if isinstance(levels, int):
ticker = mpl.ticker.MaxNLocator(levels)
levels = ticker.tick_values(vmin, vmax)
vmin, vmax = levels[0], levels[-1]
if extend is None:
extend = _determine_extend(calc_data, vmin, vmax)
if levels is not None:
cmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled)
return dict(vmin=vmin, vmax=vmax, cmap=cmap, extend=extend,
levels=levels, norm=cnorm)
def _infer_xy_labels(darray, x, y):
"""
Determine x and y labels. For use in _plot2d
darray must be a 2 dimensional data array.
"""
if x is None and y is None:
if darray.ndim != 2:
raise ValueError('DataArray must be 2d')
y, x = darray.dims
elif x is None or y is None:
raise ValueError('cannot supply only one of x and y')
elif any(k not in darray.coords for k in (x, y)):
raise ValueError('x and y must be coordinate variables')
return x, y
| apache-2.0 | 5,193,884,461,763,979,000 | 28.686636 | 78 | 0.603695 | false |
xozzo/pyfootball | setup.py | 1 | 1257 | from setuptools import setup, find_packages
import os
if os.path.exists('README.rst'):
readme_path = 'README.rst'
else:
readme_path = 'README.md'
setup(
name='pyfootball',
version='1.0.1',
description='A client library for the football-data.org REST API',
long_description=open(readme_path).read(),
url='https://github.com/xozzo/pyfootball',
author='Timothy Ng',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5'
],
keywords='api wrapper client library football data',
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'venv']),
install_requires=['requests'],
test_suite='tests',
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev]
extras_require={
'dev': ['sphinx', 'sphinx-autobuild']
}
)
| mit | -1,856,567,441,525,745,200 | 27.568182 | 73 | 0.6428 | false |
frankk00/realtor | oauth_provider/oauth.py | 1 | 23473 | """
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
logger.warning("!!! IN OAuthServer.fetch_access_token OAuth Params: %s"%oauth_request.parameters)
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
logging.error("key: %s",key)
logging.error("base: %s",base)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
logging.info("Built signature: %s"%(built))
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key | bsd-3-clause | -6,451,250,116,917,315,000 | 34.35241 | 105 | 0.615388 | false |
fsschneider/DeepOBS | deepobs/tensorflow/datasets/two_d.py | 1 | 4790 | # -*- coding: utf-8 -*-
"""2D DeepOBS dataset."""
import numpy as np
import tensorflow as tf
from . import dataset
class two_d(dataset.DataSet):
"""DeepOBS data set class to create two dimensional stochastic testproblems.
This toy data set consists of a fixed number (``train_size``) of iid draws
from two scalar zero-mean normal distributions with standard deviation
specified by the ``noise_level``.
Args:
batch_size (int): The mini-batch size to use. Note that, if ``batch_size``
is not a divider of the dataset size (``1000`` for train and test) the
remainder is dropped in each epoch (after shuffling).
train_size (int): Size of the training data set. This will also be used as
the train_eval and test set size. Defaults to ``1000``.
noise_level (float): Standard deviation of the data points around the mean.
The data points are drawn from a Gaussian distribution. Defaults to
``1.0``.
Attributes:
batch: A tuple ``(x, y)`` of tensors with random x and y that can be used to
create a noisy two dimensional testproblem. Executing these
tensors raises a ``tf.errors.OutOfRangeError`` after one epoch.
train_init_op: A tensorflow operation initializing the dataset for the
training phase.
train_eval_init_op: A tensorflow operation initializing the testproblem for
evaluating on training data.
test_init_op: A tensorflow operation initializing the testproblem for
evaluating on test data.
phase: A string-value tf.Variable that is set to "train", "train_eval" or
"test", depending on the current phase. This can be used by testproblems
to adapt their behavior to this phase.
"""
def __init__(self, batch_size, train_size=10000, noise_level=1.0):
"""Creates a new 2D instance.
Args:
batch_size (int): The mini-batch size to use. Note that, if ``batch_size``
is not a divider of the dataset size (1k for train and test) the
remainder is dropped in each epoch (after shuffling).
train_size (int): Size of the training data set. This will also be used as
the train_eval and test set size. Defaults to ``1000``.
noise_level (float): Standard deviation of the data points around the mean.
The data points are drawn from a Gaussian distribution. Defaults to
``1.0``.
"""
self._name = "two_d"
self._train_size = train_size
self._noise_level = noise_level
super(two_d, self).__init__(batch_size)
def _make_dataset(self, data_x, data_y, shuffle=True):
"""Creates a 2D data set (helper used by ``.make_*_datset`` below).
Args:
data_x (np.array): Numpy array containing the ``X`` values of the
data points.
data_y (np.array): Numpy array containing the ``y`` values of the
data points.
shuffle (bool): Switch to turn on or off shuffling of the data set.
Defaults to ``True``.
Returns:
A tf.data.Dataset yielding batches of 2D data.
"""
with tf.name_scope(self._name):
with tf.device('/cpu:0'):
data = tf.data.Dataset.from_tensor_slices((data_x, data_y))
if shuffle:
data = data.shuffle(buffer_size=20000)
data = data.batch(self._batch_size, drop_remainder=True)
data = data.prefetch(buffer_size=4)
return data
def _make_train_dataset(self):
"""Creates the 2D training dataset.
Returns:
A tf.data.Dataset instance with batches of training data.
"""
# Draw data from a random generator with a fixed seed to always get the
# same data.
rng = np.random.RandomState(42)
data_x = rng.normal(0.0, self._noise_level, self._train_size)
data_y = rng.normal(0.0, self._noise_level, self._train_size)
data_x = np.float32(data_x)
data_y = np.float32(data_y)
return self._make_dataset(data_x, data_y, shuffle=True)
def _make_train_eval_dataset(self):
"""Creates the 2D train eval dataset.
Returns:
A tf.data.Dataset instance with batches of training eval data.
"""
return self._train_dataset.take(self._train_size // self._batch_size)
def _make_test_dataset(self):
"""Creates the 2D test dataset.
Returns:
A tf.data.Dataset instance with batches of test data.
"""
# recovers the deterministic 2D function using zeros
data_x, data_y = np.zeros(self._train_size), np.zeros(self._train_size)
data_x = np.float32(data_x)
data_y = np.float32(data_y)
return self._make_dataset(data_x, data_y, shuffle=False)
| mit | -1,289,691,187,316,199,000 | 40.652174 | 81 | 0.632359 | false |
foursquare/pants | contrib/go/src/python/pants/contrib/go/tasks/go_test.py | 1 | 2117 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import filter
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.contrib.go.tasks.go_workspace_task import GoWorkspaceTask
class GoTest(GoWorkspaceTask):
"""Runs `go test` on Go packages.
To run a library's tests, GoTest only requires a Go workspace to be initialized
(see GoWorkspaceTask) with links to necessary source files. It does not require
GoCompile to first compile the library to be tested -- in fact, GoTest will ignore
any binaries in "$GOPATH/pkg/", because Go test files (which live in the package
they are testing) are ignored in normal compilation, so Go test must compile everything
from scratch.
"""
@classmethod
def register_options(cls, register):
super(GoTest, cls).register_options(register)
register('--build-and-test-flags', default='',
fingerprint=True,
help='Flags to pass in to `go test` tool.')
@classmethod
def supports_passthru_args(cls):
return True
def execute(self):
# Only executes the tests from the package specified by the target roots, so
# we don't run the tests for _all_ dependencies of said package.
targets = filter(self.is_local_src, self.context.target_roots)
for target in targets:
self.ensure_workspace(target)
self._go_test(target)
def _go_test(self, target):
args = (self.get_options().build_and_test_flags.split()
+ [target.import_path]
+ self.get_passthru_args())
result, go_cmd = self.go_dist.execute_go_cmd('test', gopath=self.get_gopath(target), args=args,
workunit_factory=self.context.new_workunit,
workunit_labels=[WorkUnitLabel.TEST])
if result != 0:
raise TaskError('{} failed with exit code {}'.format(go_cmd, result))
| apache-2.0 | -9,153,807,366,505,908,000 | 38.943396 | 99 | 0.683042 | false |
bodylabs/blmath | blmath/geometry/transform/correspondence.py | 1 | 2095 | # FIXME -- move back to core
def apply_correspondence(correspondence_src, correspondence_dst, vertices):
"""
Apply a correspondence defined between two vertex sets to a new set.
Identifies a correspondence between `correspondence_src` and
`correspondence_dst` then applies that correspondence to `vertices`.
That is, `correspondence_src` is to `correspondence_dst` as `vertices` is
to [ return value ].
`correspondence_src` and `vertices` must have the same topology. The return
value will have the same topology as `correspondence_dst`. Arguments can
be passed as `chumpy` or `numpy` arrays.
The most common usecase here is establishing a relationship between an
alignment and a pointcloud or set of landmarks. The pointcloud or landmarks
can then be moved automatically as the alignment is adjusted (e.g. fit to a
different mesh, reposed, etc).
Args:
correspondence_src: The source vertices for the correspondence
correspondence_dst: The destination vertices for the correspondence
vertices: The vertices to map using the defined correspondence
Returns:
the mapped version of `vertices`
Example usage
-------------
>>> transformed_scan_vertices = apply_correspondence(
... correspondence_src=alignment.v,
... correspondence_dst=scan.v,
... vertices=reposed_alignment.v
... )
>>> transformed_scan = Mesh(v=transformed_scan_vertices, vc=scan.vc)
"""
import chumpy as ch
from bodylabs.mesh.landmarking.transformed_lm import TransformedCoeffs
from bodylabs.mesh.landmarking.transformed_lm import TransformedLms
ch_desired = any([
isinstance(correspondence_src, ch.Ch),
isinstance(correspondence_dst, ch.Ch),
isinstance(vertices, ch.Ch),
])
coeffs = TransformedCoeffs(
src_v=correspondence_src, dst_v=correspondence_dst)
transformed_vertices = TransformedLms(
transformed_coeffs=coeffs, src_v=vertices)
return transformed_vertices if ch_desired else transformed_vertices.r
| bsd-2-clause | -4,415,321,806,514,047,000 | 36.410714 | 79 | 0.705967 | false |
gerrit-review/gerrit | tools/js/bowerutil.py | 1 | 1488 | # Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def hash_bower_component(hash_obj, path):
"""Hash the contents of a bower component directory.
This is a stable hash of a directory downloaded with `bower install`, minus
the .bower.json file, which is autogenerated each time by bower. Used in lieu
of hashing a zipfile of the contents, since zipfiles are difficult to hash in
a stable manner.
Args:
hash_obj: an open hash object, e.g. hashlib.sha1().
path: path to the directory to hash.
Returns:
The passed-in hash_obj.
"""
if not os.path.isdir(path):
raise ValueError('Not a directory: %s' % path)
path = os.path.abspath(path)
for root, dirs, files in os.walk(path):
dirs.sort()
for f in sorted(files):
if f == '.bower.json':
continue
p = os.path.join(root, f)
hash_obj.update(p[len(path)+1:])
hash_obj.update(open(p).read())
return hash_obj
| apache-2.0 | -1,230,258,163,134,067,000 | 31.347826 | 79 | 0.704301 | false |
lepinkainen/pyfibot | pyfibot/modules/module_geoip.py | 1 | 1389 | from __future__ import unicode_literals, print_function, division
import pygeoip
import os.path
import sys
import socket
try:
from modules.module_usertrack import get_table
user_track_available = True
except ImportError:
user_track_available = False
# http://dev.maxmind.com/geoip/legacy/geolite/
DATAFILE = os.path.join(sys.path[0], "GeoIP.dat")
# STANDARD = reload from disk
# MEMORY_CACHE = load to memory
# MMAP_CACHE = memory using mmap
gi4 = pygeoip.GeoIP(DATAFILE, pygeoip.MEMORY_CACHE)
def command_geoip(bot, user, channel, args):
"""Determine the user's country based on host or nick, if module_usertrack is used."""
if not args:
return bot.say(channel, "usage: .geoip HOST/NICK")
host = args
nick = None
if user_track_available:
table = get_table(bot, channel)
user = table.find_one(nick=args)
if user:
nick = user["nick"]
host = user["host"]
try:
country = gi4.country_name_by_name(host)
except socket.gaierror:
country = None
if country:
if nick:
return bot.say(channel, "%s (%s) is in %s" % (nick, host, country))
return bot.say(channel, "%s is in %s" % (host, country))
if nick:
return bot.say(channel, "Host not found for %s (%s)" % (nick, host))
return bot.say(channel, "Host not found for %s" % host)
| bsd-3-clause | -4,394,593,471,870,656,500 | 26.78 | 90 | 0.636429 | false |
llou/panopticon | panopticon/core/database.py | 1 | 7145 | # database.py is part of Panopticon.
# Panopticon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Panopticon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Panopticon. If not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
from paramiko import RSAKey as pRSAKey, DSSKey
from sqlalchemy import create_engine, Column, DateTime, String, Integer, Text, Boolean
from sqlalchemy.orm import sessionmaker, relationship, backref
from sqlalchemy.sql import not_
from sqlalchemy.schema import ForeignKey
from sqlalchemy.pool import NullPool
from sqlalchemy.ext.declarative import declarative_base
from panopticon.core.util.database import key_value_property
Base = declarative_base()
class Value(Base):
__tablename__ = "values"
id = Column(Integer(), primary_key=True)
name = Column(String(1000))
value = Column(String(1000), nullable=True)
parent_id = Column(Integer, ForeignKey("values.id"), nullable=True)
values = relationship("Value", backref=backref('parent', remote_side=[id],
cascade="all"))
type = Column(String(20))
def __init__(self, name, _type, value="", parent_id=None):
self.name = name
self.type = _type
self.value = value
self.parent_id = parent_id
@property
def root(self):
return self.id == self.parent
class Service(Base):
__tablename__ = "services"
name = Column(String(50), primary_key=True)
class Computer(Base):
__tablename__ = "computers"
__table_args__ = {'sqlite_autoincrement':True}
name = Column(String(255), primary_key=True)
key_name = Column(String(100), ForeignKey('keys.name', onupdate="CASCADE"))
active = Column(Boolean(), default=True)
key = relationship("Key", backref=backref('computers'))
logs = relationship("Log", backref="computer", order_by="Log.time")
def __init__(self, name, key_name="", active=True):
self.name = name
self.active = active
self.key_name = key_name
class Log(Base):
__tablename__ = "logs"
id = Column('id', Integer, primary_key=True)
time = Column(DateTime())
level = Column(String(10))
message = Column(Text())
computer_name = Column(String(255), ForeignKey('computers.name',
ondelete="CASCADE", onupdate="CASCADE"), index=True)
service_name = Column(String(255), ForeignKey('services.name',
ondelete="CASCADE", onupdate="CASCADE"), index=True)
role_name = Column(String(255), index=True)
action_name = Column(String(255), index=True)
def __init__(self, time, level, message, computer_name="",
service_name="", role_name="", action_name=""):
self.time = time
self.level = level
self.message = message
self.computer_name = computer_name
class FileTrack(Base):
__tablename__ = "filetracks"
uid = Column("uid", String(32), primary_key=True)
_computer_name = Column("computer_name", String(255),ForeignKey('computers.name'))
_path = Column("path", Text())
modification_time = Column("modification_time", DateTime())
md5 = Column("md5", String(32))
def __init__(self, computer_name, path, modification_time, md5=""):
self.computer_name = computer_name
self.path = path
self.modification_time = modification_time
self.md5 = md5
self.update_uid()
@property
def computer_name(self):
return self._computer_name
@computer_name.setter
def computer_name(self, value):
self._computer_name = value
self.update_uid()
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
self.update_uid()
def update_uid(self):
if self.computer_name and self.path:
self.uid = "%s:%s" % (self.computer_name, self.path)
else:
self.uid = ""
class Key(Base):
__tablename__ = "keys"
name = Column(String(100), primary_key=True)
algorithm = Column(String(20))
v1 = Column(String(2048))
v2 = Column(String(2048))
v3 = Column(String(2048))
v4 = Column(String(2048))
key_class = None
key_vals = []
__mapper_args__ = {'polymorphic_on' : algorithm}
@classmethod
def build_from_paramiko_key(cls, name, p_key):
if isinstance(p_key, pRSAKey):
return RSAKey(name, p_key.e, p_key.n)
elif isinstance(p_key, DSSKey):
return DSAKey(name, p_key.p, p_key.q, p_key.g, p_key.y)
else:
raise Exception("Not valid key")
def __init__(self, name, algorithm, v1, v2, v3, v4):
self.name = name
self.algorithm = algorithm
self.v1 = v1
self.v2 = v2
self.v3 = v3
self.v4 = v4
def get_paramiko_key(self):
vals = [ getattr(self, x) for x in self.key_vals ]
return self.key_class(vals=vals)
class RSAKey(Key):
__mapper_args__ = {'polymorphic_identity':'rsa'}
key_class = pRSAKey
key_vals = [ 'e', 'n' ]
def __init__(self, name, e, n):
self.name = name
self.algorithm = "rsa"
self.e = e
self.n = n
e = key_value_property("v1")
n = key_value_property("v2")
class DSAKey(Key):
__mapper_args__ = {'polymorphic_identity':'dsa'}
key_class = DSSKey
key_vals = [ 'p', 'q', 'g', 'y' ]
def __init__(self, name, p, q, g, y):
self.name = name
self.algorithm = "dsa"
self.p = p
self.q = q
self.g = g
self.y = y
p = key_value_property("v1")
q = key_value_property("v2")
g = key_value_property("v3")
y = key_value_property("v4")
class PanopticonDB(object):
def __init__(self, panopticon, engine=None):
self.panopticon = panopticon
self.engine = engine if engine is not None else create_engine(panopticon.db_url, poolclass=NullPool)
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
self.sync()
@contextmanager
def get_session(self):
session = self.Session()
yield session
session.commit()
session.close()
def purge(self,sure=False):
if sure:
Base.metadata.drop_all(self.engine)
Base.metadata.create_all(self.engine)
def sync(self):
computer_names = [ x[0] for x in self.panopticon.computers ]
with self.get_session() as session:
session.execute(Computer.__table__.update().where(Computer.name.in_(computer_names)).values(active=True))
session.execute(Computer.__table__.update().where(not_(Computer.name.in_(computer_names))).values(active=True))
| gpl-3.0 | 3,336,946,915,647,172,000 | 31.775229 | 123 | 0.626312 | false |
i-namekawa/TopSideMonitor | plotting.py | 1 | 37323 | import os, sys, time
from glob import glob
import cv2
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
matplotlib.rcParams['figure.facecolor'] = 'w'
from scipy.signal import argrelextrema
import scipy.stats as stats
import scipy.io as sio
from scipy import signal
from xlwt import Workbook
# specify these in mm to match your behavior chamber.
CHMAMBER_LENGTH=235
WATER_HIGHT=40
# quick plot should also show xy_within and location_one_third etc
# summary PDF: handle exception when a pickle file missing some fish in other pickle file
## these three taken from http://stackoverflow.com/a/18420730/566035
def strided_sliding_std_dev(data, radius=5):
windowed = rolling_window(data, (2*radius, 2*radius))
shape = windowed.shape
windowed = windowed.reshape(shape[0], shape[1], -1)
return windowed.std(axis=-1)
def rolling_window(a, window):
"""Takes a numpy array *a* and a sequence of (or single) *window* lengths
and returns a view of *a* that represents a moving window."""
if not hasattr(window, '__iter__'):
return rolling_window_lastaxis(a, window)
for i, win in enumerate(window):
if win > 1:
a = a.swapaxes(i, -1)
a = rolling_window_lastaxis(a, win)
a = a.swapaxes(-2, i)
return a
def rolling_window_lastaxis(a, window):
"""Directly taken from Erik Rigtorp's post to numpy-discussion.
<http://www.mail-archive.com/[email protected]/msg29450.html>"""
if window < 1:
raise ValueError, "`window` must be at least 1."
if window > a.shape[-1]:
raise ValueError, "`window` is too long."
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
## stealing ends here... //
def filterheadxy(headx,heady,thrs_denom=10):
b, a = signal.butter(8, 0.125)
dhy = np.abs(np.hstack((0, np.diff(heady,1))))
thrs = np.nanstd(dhy)/thrs_denom
ind2remove = dhy>thrs
headx[ind2remove] = np.nan
heady[ind2remove] = np.nan
headx = interp_nan(headx)
heady = interp_nan(heady)
headx = signal.filtfilt(b, a, headx, padlen=150)
heady = signal.filtfilt(b, a, heady, padlen=150)
return headx,heady
def smoothRad(theta, thrs=np.pi/4*3):
jumps = (np.diff(theta) > thrs).nonzero()[0]
print 'jumps.size', jumps.size
while jumps.size:
# print '%d/%d' % (jumps[0], theta.size)
theta[jumps+1] -= np.pi
jumps = (np.diff(theta) > thrs).nonzero()[0]
return theta
def datadct2array(data, key1, key2):
# put these in a MATLAB CELL
trialN = len(data[key1][key2])
matchedUSnameP = np.zeros((trialN,), dtype=np.object)
fnameP = np.zeros((trialN,), dtype=np.object)
# others to append to a list
eventsP = []
speed3DP = []
movingSTDP = []
d2inflowP = []
xP, yP, zP = [], [], []
XP, YP, ZP = [], [], []
ringpixelsP = []
peaks_withinP = []
swimdir_withinP = []
xy_withinP = []
location_one_thirdP = []
dtheta_shapeP = []
dtheta_velP = []
turns_shapeP = []
turns_velP = []
for n, dct in enumerate(data[key1][key2]):
# MATLAB CELL
matchedUSnameP[n] = dct['matchedUSname']
fnameP[n] = dct['fname']
# 2D array
eventsP.append([ele if type(ele) is not list else ele[0] for ele in dct['events']])
speed3DP.append(dct['speed3D'])
movingSTDP.append(dct['movingSTD'])
d2inflowP.append(dct['d2inflow'])
xP.append(dct['x'])
yP.append(dct['y'])
zP.append(dct['z'])
XP.append(dct['X'])
YP.append(dct['Y'])
ZP.append(dct['Z'])
ringpixelsP.append(dct['ringpixels'])
peaks_withinP.append(dct['peaks_within'])
swimdir_withinP.append(dct['swimdir_within'])
xy_withinP.append(dct['xy_within'])
location_one_thirdP.append(dct['location_one_third'])
dtheta_shapeP.append(dct['dtheta_shape'])
dtheta_velP.append(dct['dtheta_vel'])
turns_shapeP.append(dct['turns_shape'])
turns_velP.append(dct['turns_vel'])
TVroi = np.array(dct['TVroi'])
SVroi = np.array(dct['SVroi'])
return matchedUSnameP, fnameP, np.array(eventsP), np.array(speed3DP), np.array(d2inflowP), \
np.array(xP), np.array(yP), np.array(zP), np.array(XP), np.array(YP), np.array(ZP), \
np.array(ringpixelsP), np.array(peaks_withinP), np.array(swimdir_withinP), \
np.array(xy_withinP), np.array(dtheta_shapeP), np.array(dtheta_velP), \
np.array(turns_shapeP), np.array(turns_velP), TVroi, SVroi
def pickle2mat(fp, data=None):
# fp : full path to pickle file
# data : option to provide data to skip np.load(fp)
if not data:
data = np.load(fp)
for key1 in data.keys():
for key2 in data[key1].keys():
matchedUSname, fname, events, speed3D, d2inflow, x, y, z, X, Y, Z, \
ringpixels, peaks_within, swimdir_within, xy_within, dtheta_shape, dtheta_vel, \
turns_shape, turns_vel, TVroi, SVroi = datadct2array(data, key1, key2)
datadict = {
'matchedUSname' : matchedUSname,
'fname' : fname,
'events' : events,
'speed3D' : speed3D,
'd2inflow' : d2inflow,
'x' : x,
'y' : y,
'z' : z,
'X' : X,
'Y' : Y,
'Z' : Z,
'ringpixels' : ringpixels,
'peaks_within' : peaks_within,
'swimdir_within' : swimdir_within,
'xy_within' : xy_within,
'dtheta_shape' : dtheta_shape,
'dtheta_vel' : dtheta_vel,
'turns_shape' : turns_shape,
'turns_vel' : turns_vel,
'TVroi' : TVroi,
'SVroi' : SVroi,
}
outfp = '%s_%s_%s.mat' % (fp[:-7],key1,key2)
sio.savemat(outfp, datadict, oned_as='row', do_compression=True)
def interp_nan(x):
'''
Replace nan by interporation
http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
'''
ok = -np.isnan(x)
if (ok == False).all():
return x
else:
xp = ok.ravel().nonzero()[0]
fp = x[ok]
_x = np.isnan(x).ravel().nonzero()[0]
x[-ok] = np.interp(_x, xp, fp)
return x
def polytest(x,y,rx,ry,rw,rh,rang):
points=cv2.ellipse2Poly(
(rx,ry),
axes=(rw/2,rh/2),
angle=rang,
arcStart=0,
arcEnd=360,
delta=3
)
return cv2.pointPolygonTest(np.array(points), (x,y), measureDist=1)
def depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3):
z0 = z - SVy1
x0 = x - TVx1
mid = (SVy2-SVy1)/2
adj = (z0 - mid) / (SVy2-SVy1) * (SVy2-SVy3) * (1-(x0)/float(TVx2-TVx1))
return z0 + adj + SVy1 # back to abs coord
def putNp2xls(array, ws):
for r, row in enumerate(array):
for c, val in enumerate(row):
ws.write(r, c, val)
def drawLines(mi, ma, events, fps=30.0):
CS, USs, preRange = events
plot([CS-preRange, CS-preRange], [mi,ma], '--c') # 2 min prior odor
plot([CS , CS ], [mi,ma], '--g', linewidth=2) # CS onset
if USs:
if len(USs) > 3:
colors = 'r' * len(USs)
else:
colors = [_ for _ in ['r','b','c'][:len(USs)]]
for c,us in zip(colors, USs):
plot([us, us],[mi,ma], linestyle='--', color=c, linewidth=2) # US onset
plot([USs[0]+preRange/2,USs[0]+preRange/2], [mi,ma], linestyle='--', color=c, linewidth=2) # end of US window
xtck = np.arange(0, max(CS+preRange, max(USs)), 0.5*60*fps) # every 0.5 min tick
else:
xtck = np.arange(0, CS+preRange, 0.5*60*fps) # every 0.5 min tick
xticks(xtck, xtck/fps/60)
gca().xaxis.set_minor_locator(MultipleLocator(5*fps)) # 5 s minor ticks
def approachevents(x,y,z, ringpolyTVArray, ringpolySVArray, fishlength=134, thrs=None):
'''
fishlength: some old scrits may call this with fishlength
thrs: multitrack GUI provides this by ringAppearochLevel spin control.
can be an numpy array (to track water level change etc)
'''
smoothedz = np.convolve(np.hanning(10)/np.hanning(10).sum(), z, 'same')
peaks = argrelextrema(smoothedz, np.less)[0] # less because 0 is top in image.
# now filter peaks by height.
ringLevel = ringpolySVArray[:,1]
if thrs is None:
thrs = ringLevel+fishlength/2
if type(thrs) == int: # can be numpy array or int
thrs = ringLevel.mean() + thrs
peaks = peaks[ z[peaks] < thrs ]
else: # numpy array should be ready to use
peaks = peaks[ z[peaks] < thrs[peaks] ]
# now filter out by TVringCenter
peaks_within = get_withinring(ringpolyTVArray, peaks, x, y)
return smoothedz, peaks_within
def get_withinring(ringpolyTVArray, timepoints, x, y):
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
rang = ringpolyTVArray[:,4].astype(np.int)
# poly test
peaks_within = []
for p in timepoints:
points=cv2.ellipse2Poly(
(rx[p],ry[p]),
axes=(rw[p]/2,rh[p]/2),
angle=rang[p],
arcStart=0,
arcEnd=360,
delta=3
)
inout = cv2.pointPolygonTest(np.array(points), (x[p],y[p]), measureDist=1)
if inout > 0:
peaks_within.append(p)
return peaks_within
def location_ring(x,y,ringpolyTVArray):
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
d2ringcenter = np.sqrt((x-rx)**2 + (y-ry)**2)
# filter by radius 20% buffer in case the ring moves around
indices = (d2ringcenter < 1.2*max(rw.max(), rh.max())).nonzero()[0]
xy_within = get_withinring(ringpolyTVArray, indices, x, y)
return xy_within
def swimdir_analysis(x,y,z,ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps=30.0):
# smoothing
# z = np.convolve(np.hanning(16)/np.hanning(16).sum(), z, 'same')
# two cameras have different zoom settings. So, distance per pixel is different. But, for
# swim direction, it does not matter how much x,y are compressed relative to z.
# ring z level from SV
rz = ringpolySVArray[:,1].astype(np.int)
# ring all other params from TV
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
rang = ringpolyTVArray[:,4].astype(np.int)
speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 )
speed3D = np.hstack(([0], speed3D))
# line in 3D http://tutorial.math.lamar.edu/Classes/CalcIII/EqnsOfLines.aspx
# x-x0 y-y0 z-z0
# ---- = ---- = ----
# a b c
# solve them for z = rz. x0,y0,z0 are tvx, tvy, svy
# x = (a * (rz-z)) / c + x0
dt = 3 # define slope as diff between current and dt frame before
a = np.hstack( (np.ones(dt), x[dt:]-x[:-dt]) )
b = np.hstack( (np.ones(dt), y[dt:]-y[:-dt]) )
c = np.hstack( (np.ones(dt), z[dt:]-z[:-dt]) )
c[c==0] = np.nan # avoid zero division
water_x = (a * (rz-z) / c) + x
water_y = (b * (rz-z) / c) + y
upwards = c<-2/30.0*fps # not accurate when c is small or negative
xok = (TVx1 < water_x) & (water_x < TVx2)
yok = (TVy1 < water_y) & (water_y < TVy2)
filtered = upwards & xok & yok# & -np.isinf(water_x) & -np.isinf(water_y)
water_x[-filtered] = np.nan
water_y[-filtered] = np.nan
# figure()
# ax = subplot(111)
# ax.imshow(npData['TVbg'], cmap=cm.gray) # clip out from TVx1,TVy1
# ax.plot(x-TVx1, y-TVy1, 'c')
# ax.plot(water_x-TVx1, water_y-TVy1, 'r.')
# xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0])
# draw(); show()
SwimDir = []
for n in filtered.nonzero()[0]:
inout = polytest(water_x[n],water_y[n],rx[n],ry[n],rw[n],rh[n],rang[n])
SwimDir.append((n, inout, speed3D[n])) # inout>0 are inside
return SwimDir, water_x, water_y
def plot_eachTr(events, x, y, z, inflowpos, ringpixels, peaks_within, swimdir_within=None,
pp=None, _title=None, fps=30.0, inmm=False):
CS, USs, preRange = events
# preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min
if USs:
xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps
else:
xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps
fig = figure(figsize=(12,8), facecolor='w')
subplot(511) # Swimming speed
speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 )
drawLines(np.nanmin(speed3D), np.nanmax(speed3D), events, fps) # go behind
plot(speed3D)
movingSTD = np.append( np.zeros(fps*10), strided_sliding_std_dev(speed3D, fps*10) )
plot(movingSTD, linewidth=2)
plot(np.ones_like(speed3D) * speed3D.std()*6, '-.', color='gray')
ylim([-5, speed3D[xmin:xmax].max()])
xlim([xmin,xmax]); title(_title)
if inmm:
ylabel('Speed 3D (mm),\n6SD thr');
else:
ylabel('Speed 3D, 6SD thr');
ax = subplot(512) # z level
drawLines(z.min(), z.max(), events)
plot(z, 'b')
pkx = peaks_within.nonzero()[0]
if inmm:
plot(pkx, peaks_within[pkx]*z[xmin:xmax].max()*0.97, 'mo')
if swimdir_within is not None:
___x = swimdir_within.nonzero()[0]
plot(___x, swimdir_within[___x]*z[xmin:xmax].max()*0.96, 'g+')
ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()])
xlim([xmin,xmax]); ylabel('Z (mm)')
else:
plot(pkx, peaks_within[pkx]*z[xmin:xmax].min()*0.97, 'mo')
if swimdir_within is not None:
___x = swimdir_within.nonzero()[0]
plot(___x, swimdir_within[___x]*z[xmin:xmax].min()*0.96, 'g+')
ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()])
ax.invert_yaxis(); xlim([xmin,xmax]); ylabel('z')
subplot(513) # x
drawLines(x.min(), x.max(), events)
plot(x, 'b')
plot(y, 'g')
xlim([xmin,xmax]); ylabel('x,y')
subplot(514) # Distance to the inflow tube
xin, yin, zin = inflowpos
d2inflow = np.sqrt((x-xin) ** 2 + (y-yin) ** 2 + (z-zin) ** 2 )
drawLines(d2inflow.min(), d2inflow.max(), events)
plot(d2inflow)
ylim([d2inflow[xmin:xmax].min(), d2inflow[xmin:xmax].max()])
xlim([xmin,xmax]); ylabel('distance to\ninflow tube')
subplot(515) # ringpixels: it seems i never considered TV x,y for this
rpmax, rpmin = np.nanmax(ringpixels[xmin:xmax]), np.nanmin(ringpixels[xmin:xmax])
drawLines(rpmin, rpmax, events)
plot(ringpixels)
plot(pkx, peaks_within[pkx]*rpmax*1.06, 'mo')
if swimdir_within is not None:
plot(___x, swimdir_within[___x]*rpmax*1.15, 'g+')
ylim([-100, rpmax*1.2])
xlim([xmin,xmax]); ylabel('ringpixels')
tight_layout()
if pp:
fig.savefig(pp, format='pdf')
rng = np.arange(CS-preRange, CS+preRange, dtype=np.int)
return speed3D[rng], movingSTD[rng], d2inflow[rng], ringpixels[rng]
def plot_turnrates(events, dthetasum_shape,dthetasum_vel,turns_shape,turns_vel,
pp=None, _title=None, thrs=np.pi/4*(133.33333333333334/120), fps=30.0):
CS, USs, preRange = events
# preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min
if USs:
xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps
else:
xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps
fig = figure(figsize=(12,8), facecolor='w')
subplot(211)
drawLines(dthetasum_shape.min(), dthetasum_shape.max(), events)
plot(np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--')
plot(-np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--')
plot(dthetasum_shape)
dmax = dthetasum_shape[xmin:xmax].max()
plot(turns_shape, (0.5+dmax)*np.ones_like(turns_shape), 'o')
temp = np.zeros_like(dthetasum_shape)
temp[turns_shape] = 1
shape_cumsum = np.cumsum(temp)
shape_cumsum -= shape_cumsum[xmin]
plot( shape_cumsum / shape_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min())
xlim([xmin,xmax]); ylabel('Shape based'); title('Orientation change per 4 frames: ' + _title)
ylim([dthetasum_shape[xmin:xmax].min()-1, dmax+1])
subplot(212)
drawLines(dthetasum_vel.min(), dthetasum_vel.max(), events)
plot(np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--')
plot(-np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--')
plot(dthetasum_vel)
dmax = dthetasum_vel[xmin:xmax].max()
plot(turns_vel, (0.5+dmax)*np.ones_like(turns_vel), 'o')
temp = np.zeros_like(dthetasum_vel)
temp[turns_vel] = 1
vel_cumsum = np.cumsum(temp)
vel_cumsum -= vel_cumsum[xmin]
plot( vel_cumsum / vel_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min())
ylim([dthetasum_vel[xmin:xmax].min()-1, dmax+1])
xlim([xmin,xmax]); ylabel('Velocity based')
tight_layout()
if pp:
fig.savefig(pp, format='pdf')
def trajectory(x, y, z, rng, ax, _xlim=[0,640], _ylim=[480,480+300], _zlim=[150,340],
color='b', fps=30.0, ringpolygon=None):
ax.plot(x[rng],y[rng],z[rng], color=color)
ax.view_init(azim=-75, elev=-180+15)
if ringpolygon:
rx, ry, rz = ringpolygon
ax.plot(rx, ry, rz, color='gray')
ax.set_xlim(_xlim[0],_xlim[1])
ax.set_ylim(_ylim[0],_ylim[1])
ax.set_zlim(_zlim[0],_zlim[1])
title(("(%2.1f min to %2.1f min)" % (rng[0]/fps/60.0,(rng[-1]+1)/60.0/fps)))
draw()
def plotTrajectory(x, y, z, events, _xlim=None, _ylim=None, _zlim=None, fps=30.0, pp=None, ringpolygon=None):
CS, USs, preRange = events
rng1 = np.arange(CS-preRange, CS-preRange/2, dtype=int)
rng2 = np.arange(CS-preRange/2, CS, dtype=int)
if USs:
rng3 = np.arange(CS, min(USs), dtype=int)
rng4 = np.arange(min(USs), min(USs)+preRange/2, dtype=int)
combined = np.hstack((rng1,rng2,rng3,rng4))
else:
combined = np.hstack((rng1,rng2))
if _xlim is None:
_xlim = map( int, ( x[combined].min(), x[combined].max() ) )
if _ylim is None:
_ylim = map( int, ( y[combined].min(), y[combined].max() ) )
if _zlim is None:
_zlim = map( int, ( z[combined].min(), z[combined].max() ) )
if ringpolygon:
_zlim[0] = min( _zlim[0], int(ringpolygon[2][0]) )
fig3D = plt.figure(figsize=(12,8), facecolor='w')
ax = fig3D.add_subplot(221, projection='3d'); trajectory(x,y,z,rng1,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon)
ax = fig3D.add_subplot(222, projection='3d'); trajectory(x,y,z,rng2,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon)
if USs:
ax = fig3D.add_subplot(223, projection='3d'); trajectory(x,y,z,rng3,ax,_xlim,_ylim,_zlim,'g',fps,ringpolygon)
ax = fig3D.add_subplot(224, projection='3d'); trajectory(x,y,z,rng4,ax,_xlim,_ylim,_zlim,'r',fps,ringpolygon)
tight_layout()
if pp:
fig3D.savefig(pp, format='pdf')
def add2DataAndPlot(fp, fish, data, createPDF):
if createPDF:
pp = PdfPages(fp[:-7]+'_'+fish+'.pdf')
else:
pp = None
params = np.load(fp)
fname = os.path.basename(fp).split('.')[0] + '.avi'
dirname = os.path.dirname(fp)
preRange = params[(fname, 'mog')]['preRange']
fps = params[(fname, 'mog')]['fps']
TVx1 = params[(fname, fish)]['TVx1']
TVy1 = params[(fname, fish)]['TVy1']
TVx2 = params[(fname, fish)]['TVx2']
TVy2 = params[(fname, fish)]['TVy2']
SVx1 = params[(fname, fish)]['SVx1']
SVx2 = params[(fname, fish)]['SVx2']
SVx3 = params[(fname, fish)]['SVx3']
SVy1 = params[(fname, fish)]['SVy1']
SVy2 = params[(fname, fish)]['SVy2']
SVy3 = params[(fname, fish)]['SVy3']
ringAppearochLevel = params[(fname, fish)]['ringAppearochLevel']
_npz = os.path.join(dirname, os.path.join('%s_%s.npz' % (fname[:-4], fish)))
# if os.path.exists(_npz):
npData = np.load(_npz)
tvx = npData['TVtracking'][:,0] # x with nan
tvy = npData['TVtracking'][:,1] # y
headx = npData['TVtracking'][:,3] # headx
heady = npData['TVtracking'][:,4] # heady
svy = npData['SVtracking'][:,1] # z
InflowTubeTVArray = npData['InflowTubeTVArray']
InflowTubeSVArray = npData['InflowTubeSVArray']
inflowpos = InflowTubeTVArray[:,0], InflowTubeTVArray[:,1], InflowTubeSVArray[:,1]
ringpixels = npData['ringpixel']
ringpolyTVArray = npData['ringpolyTVArray']
ringpolySVArray = npData['ringpolySVArray']
TVbg = npData['TVbg']
print os.path.basename(_npz), 'loaded.'
x,y,z = map(interp_nan, [tvx,tvy,svy])
# z level correction by depth (x)
z = depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3)
smoothedz, peaks_within = approachevents(x, y, z,
ringpolyTVArray, ringpolySVArray, thrs=ringAppearochLevel)
# convert to numpy array from list
temp = np.zeros_like(x)
temp[peaks_within] = 1
peaks_within = temp
# normalize to mm
longaxis = float(max((TVx2-TVx1), (TVy2-TVy1))) # before rotation H is applied they are orthogonal
waterlevel = float(SVy2-SVy1)
X = (x-TVx1) / longaxis * CHMAMBER_LENGTH
Y = (TVy2-y) / longaxis * CHMAMBER_LENGTH
Z = (SVy2-z) / waterlevel * WATER_HIGHT # bottom of chamber = 0, higher more positive
inflowpos_mm = ((inflowpos[0]-TVx1) / longaxis * CHMAMBER_LENGTH,
(TVy2-inflowpos[1]) / longaxis * CHMAMBER_LENGTH,
(SVy2-inflowpos[2]) / waterlevel * WATER_HIGHT )
# do the swim direction analysis here
swimdir, water_x, water_y = swimdir_analysis(x,y,z,
ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps)
# all of swimdir are within ROI (frame#, inout, speed) but not necessary within ring
sdir = np.array(swimdir)
withinRing = sdir[:,1]>0 # inout>0 are inside ring
temp = np.zeros_like(x)
temp[ sdir[withinRing,0].astype(int) ] = 1
swimdir_within = temp
# location_ring
xy_within = location_ring(x,y, ringpolyTVArray)
temp = np.zeros_like(x)
temp[xy_within] = 1
xy_within = temp
# location_one_third
if (TVx2-TVx1) > (TVy2-TVy1):
if np.abs(np.arange(TVx1, longaxis+TVx1, longaxis/3) + longaxis/6 - inflowpos[0].mean()).argmin() == 2:
location_one_third = x-TVx1 > longaxis/3*2
else:
location_one_third = x < longaxis/3
else:
if np.abs(np.arange(TVy1, longaxis+TVy1, longaxis/3) + longaxis/6 - inflowpos[1].mean()).argmin() == 2:
location_one_third = y-TVy1 > longaxis/3*2
else:
location_one_third = y < longaxis/3
# turn rate analysis (shape based)
heady, headx = map(interp_nan, [heady, headx])
headx, heady = filterheadxy(headx, heady)
dy = heady - y
dx = headx - x
theta_shape = np.arctan2(dy, dx)
# velocity based
cx, cy = filterheadxy(x.copy(), y.copy()) # centroid x,y
vx = np.append(0, np.diff(cx))
vy = np.append(0, np.diff(cy))
theta_vel = np.arctan2(vy, vx)
# prepare ringpolygon for trajectory plot
rx, ry, rw, rh, rang = ringpolyTVArray.mean(axis=0).astype(int) # use mm ver above
rz = ringpolySVArray.mean(axis=0)[1].astype(int)
RX = (rx-TVx1) / longaxis * CHMAMBER_LENGTH
RY = (TVy2-ry) / longaxis * CHMAMBER_LENGTH
RW = rw / longaxis * CHMAMBER_LENGTH / 2
RH = rh / longaxis * CHMAMBER_LENGTH / 2
RZ = (SVy2-rz) / waterlevel * WATER_HIGHT
points = cv2.ellipse2Poly(
(RX.astype(int),RY.astype(int)),
axes=(RW.astype(int),RH.astype(int)),
angle=rang,
arcStart=0,
arcEnd=360,
delta=3
)
ringpolygon = [points[:,0], points[:,1], np.ones(points.shape[0]) * RZ]
eventTypeKeys = params[(fname, fish)]['EventData'].keys()
CSs = [_ for _ in eventTypeKeys if _.startswith('CS')]
USs = [_ for _ in eventTypeKeys if _.startswith('US')]
# print CSs, USs
# events
for CS in CSs:
CS_Timings = params[(fname, fish)]['EventData'][CS]
CS_Timings.sort()
# initialize when needed
if CS not in data[fish].keys():
data[fish][CS] = []
# now look around for US after it within preRange
for t in CS_Timings:
tr = len(data[fish][CS])+1
rng = np.arange(t-preRange, t+preRange, dtype=np.int)
matchedUSname = None
for us in USs:
us_Timings = params[(fname, fish)]['EventData'][us]
matched = [_ for _ in us_Timings if t-preRange < _ < t+preRange]
if matched:
events = [t, matched, preRange] # ex. CS+
matchedUSname = us
break
else:
continue
_title = '(%s, %s) trial#%02d %s (%s)' % (CS, matchedUSname[0], tr, fname, fish)
print _title, events
_speed3D, _movingSTD, _d2inflow, _ringpixels = plot_eachTr(events, X, Y, Z, inflowpos_mm,
ringpixels, peaks_within, swimdir_within, pp, _title, fps, inmm=True)
# 3d trajectory
_xlim = (0, CHMAMBER_LENGTH)
_zlim = (RZ.max(),0)
plotTrajectory(X, Y, Z, events, _xlim=_xlim, _zlim=_zlim, fps=fps, pp=pp, ringpolygon=ringpolygon)
# turn rate analysis
# shape based
theta_shape[rng] = smoothRad(theta_shape[rng].copy(), thrs=np.pi/2)
dtheta_shape = np.append(0, np.diff(theta_shape)) # full length
kernel = np.ones(4)
dthetasum_shape = np.convolve(dtheta_shape, kernel, 'same')
# 4 frames = 1000/30.0*4 = 133.3 ms
thrs = (np.pi / 2) * (133.33333333333334/120) # Braubach et al 2009 90 degree in 120 ms
peaks_shape = argrelextrema(abs(dthetasum_shape), np.greater)[0]
turns_shape = peaks_shape[ (abs(dthetasum_shape[peaks_shape]) > thrs).nonzero()[0] ]
# velocity based
theta_vel[rng] = smoothRad(theta_vel[rng].copy(), thrs=np.pi/2)
dtheta_vel = np.append(0, np.diff(theta_vel))
dthetasum_vel = np.convolve(dtheta_vel, kernel, 'same')
peaks_vel = argrelextrema(abs(dthetasum_vel), np.greater)[0]
turns_vel = peaks_vel[ (abs(dthetasum_vel[peaks_vel]) > thrs).nonzero()[0] ]
plot_turnrates(events, dthetasum_shape, dthetasum_vel, turns_shape, turns_vel, pp, _title, fps=fps)
_temp = np.zeros_like(dtheta_shape)
_temp[turns_shape] = 1
turns_shape_array = _temp
_temp = np.zeros_like(dtheta_vel)
_temp[turns_vel] = 1
turns_vel_array = _temp
# plot swim direction analysis
fig = figure(figsize=(12,8), facecolor='w')
ax1 = subplot(211)
ax1.imshow(TVbg, cmap=cm.gray) # TVbg is clip out of ROI
ax1.plot(x[rng]-TVx1, y[rng]-TVy1, 'gray')
ax1.plot(water_x[t-preRange:t]-TVx1, water_y[t-preRange:t]-TVy1, 'c.')
if matched:
ax1.plot( water_x[t:matched[0]]-TVx1,
water_y[t:matched[0]]-TVy1, 'g.')
ax1.plot( water_x[matched[0]:matched[0]+preRange/4]-TVx1,
water_y[matched[0]:matched[0]+preRange/4]-TVy1, 'r.')
xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0])
title(_title)
ax2 = subplot(212)
ax2.plot( swimdir_within )
ax2.plot( peaks_within*1.15-0.1, 'mo' )
if matched:
xmin, xmax = t-preRange-10*fps, matched[0]+preRange/4
else:
xmin, xmax = t-preRange-10*fps, t+preRange/2+10*fps
gzcs = np.cumsum(swimdir_within)
gzcs -= gzcs[xmin]
ax2.plot( gzcs/gzcs[xmax] )
drawLines(0,1.2, events)
ylim([0,1.2])
xlim([xmin, xmax])
ylabel('|: SwimDirection\no: approach events')
data[fish][CS].append( {
'fname' : fname,
'x': x[rng], 'y': y[rng], 'z': z[rng],
'X': X[rng], 'Y': Y[rng], 'Z': Z[rng], # calibrate space (mm)
'speed3D': _speed3D, # calibrate space (mm)
'movingSTD' : _movingSTD, # calibrate space (mm)
'd2inflow': _d2inflow, # calibrate space (mm)
'ringpixels': _ringpixels,
'peaks_within': peaks_within[rng],
'xy_within': xy_within[rng],
'location_one_third' : location_one_third[rng],
'swimdir_within' : swimdir_within[rng],
'dtheta_shape': dtheta_shape[rng],
'dtheta_vel': dtheta_vel[rng],
'turns_shape': turns_shape_array[rng], # already +/- preRange
'turns_vel': turns_vel_array[rng],
'events' : events,
'matchedUSname' : matchedUSname,
'TVroi' : (TVx1,TVy1,TVx2,TVy2),
'SVroi' : (SVx1,SVy1,SVx2,SVy2),
} )
if pp:
fig.savefig(pp, format='pdf')
close('all') # release memory ASAP!
if pp:
pp.close()
def getPDFs(pickle_files, fishnames=None, createPDF=True):
# type checking args
if type(pickle_files) is str:
pickle_files = [pickle_files]
# convert to a list or set of fish names
if type(fishnames) is str:
fishnames = [fishnames]
elif not fishnames:
fishnames = set()
# re-organize trials into a dict "data"
data = {}
# figure out trial number (sometime many trials in one files) for each fish
# go through all pickle_files and use timestamps of file to sort events.
timestamps = []
for fp in pickle_files:
# collect ctime of pickled files
fname = os.path.basename(fp).split('.')[0] + '.avi'
timestamps.append( time.strptime(fname, "%b-%d-%Y_%H_%M_%S.avi") )
# look into the pickle and collect fish analyzed
params = np.load(fp) # loading pickled file!
if type(fishnames) is set:
for fish in [fs for fl,fs in params.keys() if fl == fname and fs != 'mog']:
fishnames.add(fish)
timestamps = sorted(range(len(timestamps)), key=timestamps.__getitem__)
# For each fish, go thru all pickled files
for fish in fishnames:
data[fish] = {}
# now go thru the sorted
for ind in timestamps:
fp = pickle_files[ind]
print 'processing #%d\n%s' % (ind, fp)
add2DataAndPlot(fp, fish, data, createPDF)
return data
def plotTrials(data, fish, CSname, key, step, offset=0, pp=None):
fig = figure(figsize=(12,8), facecolor='w')
ax1 = fig.add_subplot(121) # raw trace
ax2 = fig.add_subplot(222) # learning curve
ax3 = fig.add_subplot(224) # bar plot
preP, postP, postP2 = [], [], []
longestUS = 0
for n, measurement in enumerate(data[fish][CSname]):
tr = n+1
CS, USs, preRange = measurement['events']
subplot(ax1)
mi = -step*(tr-1)
ma = mi + step
drawLines(mi, ma, (preRange, [preRange+(USs[0]-CS)], preRange))
longestUS = max([us-CS+preRange*3/2 for us in USs]+[longestUS])
# 'measurement[key]': vector around the CS timing (+/-) preRange. i.e., preRange is the center
ax1.plot(measurement[key]-step*(tr-1)+offset)
title(CSname+': '+key) # cf. preRange = 3600 frames
pre = measurement[key][:preRange].mean()+offset # 2 min window
post = measurement[key][preRange:preRange+(USs[0]-CS)].mean()+offset # 23 s window
post2 = measurement[key][preRange+(USs[0]-CS):preRange*3/2+(USs[0]-CS)].mean()+offset # 1 min window after US
preP.append(pre)
postP.append(post)
postP2.append(post2)
ax3.plot([1, 2, 3], [pre, post, post2],'o-')
ax1.set_xlim([0,longestUS])
ax1.axis('off')
subplot(ax2)
x = range(1, tr+1)
y = np.diff((preP,postP), axis=0).ravel()
ax2.plot( x, y, 'ko-', linewidth=2 )
ax2.plot( x, np.zeros_like(x), '-.', linewidth=1, color='gray' )
# grid()
slope, intercept, rvalue, pval, stderr = stats.stats.linregress(x,y)
title('slope = zero? p-value = %f' % pval)
ax2.set_xlabel("Trial#")
ax2.set_xlim([0.5,tr+0.5])
ax2.set_ylabel('CS - pre')
subplot(ax3)
ax3.bar([0.6, 1.6, 2.6], [np.nanmean(preP), np.nanmean(postP), np.nanmean(postP2)], facecolor='none')
t, pval = stats.ttest_rel(postP, preP)
title('paired t p-value = %f' % pval)
ax3.set_xticks([1,2,3])
ax3.set_xticklabels(['pre', CSname, measurement['matchedUSname']])
ax3.set_xlim([0.5,3.5])
ax3.set_ylabel('Raw mean values')
tight_layout(2, h_pad=1, w_pad=1)
if pp:
fig.savefig(pp, format='pdf')
close('all')
return np.vstack((preP, postP, postP2))
def getSummary(data, dirname=None):
for fish in data.keys():
for CSname in data[fish].keys():
if dirname:
pp = PdfPages(os.path.join(dirname, '%s_for_%s.pdf' % (CSname,fish)))
print 'generating %s_for_%s.pdf' % (CSname,fish)
book = Workbook()
sheet1 = book.add_sheet('speed3D')
avgs = plotTrials(data, fish, CSname, 'speed3D', 30, pp=pp)
putNp2xls(avgs, sheet1)
sheet2 = book.add_sheet('d2inflow')
avgs = plotTrials(data, fish, CSname, 'd2inflow', 200, pp=pp)
putNp2xls(avgs, sheet2)
# sheet3 = book.add_sheet('smoothedz')
sheet3 = book.add_sheet('Z')
# avgs = plotTrials(data, fish, CSname, 'smoothedz', 100, pp=pp)
avgs = plotTrials(data, fish, CSname, 'Z', 30, pp=pp)
putNp2xls(avgs, sheet3)
sheet4 = book.add_sheet('ringpixels')
avgs = plotTrials(data, fish, CSname, 'ringpixels', 1200, pp=pp)
putNp2xls(avgs, sheet4)
sheet5 = book.add_sheet('peaks_within')
avgs = plotTrials(data, fish, CSname, 'peaks_within', 1.5, pp=pp)
putNp2xls(avgs, sheet5)
sheet6 = book.add_sheet('swimdir_within')
avgs = plotTrials(data, fish, CSname, 'swimdir_within', 1.5, pp=pp)
putNp2xls(avgs, sheet6)
sheet7 = book.add_sheet('xy_within')
avgs = plotTrials(data, fish, CSname, 'xy_within', 1.5, pp=pp)
putNp2xls(avgs, sheet7)
sheet8 = book.add_sheet('turns_shape')
avgs = plotTrials(data, fish, CSname, 'turns_shape', 1.5, pp=pp)
putNp2xls(avgs, sheet8)
sheet9 = book.add_sheet('turns_vel')
avgs = plotTrials(data, fish, CSname, 'turns_vel', 1.5, pp=pp)
putNp2xls(avgs, sheet9)
if dirname:
pp.close()
book.save(os.path.join(dirname, '%s_for_%s.xls' % (CSname,fish)))
close('all')
else:
show()
def add2Pickles(dirname, pickle_files):
# dirname : folder to look for pickle files
# pickle_files : output, a list to be concatenated.
pattern = os.path.join(dirname, '*.pickle')
temp = [_ for _ in glob(pattern) if not _.endswith('- Copy.pickle') and
not os.path.basename(_).startswith('Summary')]
pickle_files += temp
if __name__ == '__main__':
pickle_files = []
# small test data
# add2Pickles('R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test', pickle_files)
# outputdir = 'R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test'
# show me what you got
for pf in pickle_files:
print pf
fp = os.path.join(outputdir, 'Summary.pickle')
createPDF = True # useful when plotting etc code updated
if 1: # refresh analysis
data = getPDFs(pickle_files, createPDF=createPDF)
import cPickle as pickle
with open(os.path.join(outputdir, 'Summary.pickle'), 'wb') as f:
pickle.dump(data, f)
else: # or reuse previous
data = np.load(fp)
getSummary(data, outputdir)
pickle2mat(fp, data)
| bsd-3-clause | 4,991,329,245,887,673,000 | 36.435306 | 124 | 0.567291 | false |
jkunimune15/Map-Projections | src/zupplemental/compose_maps.py | 1 | 5115 | #compose_maps.py
#make ALL the maps
import math
from generate_borders import generate_borders
from generate_graticule import generate_graticule, generate_backdrop
from generate_indicatrices import generate_indicatrices
from generate_orthodromes import generate_orthodromes
from generate_shape import plot_shapes
from generate_labels import generate_topographical_labels, label_shapes, label_points
def compose_landmasses():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="water">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t</g>')
def compose_graticule():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="graticule">')
generate_graticule(5, 1, include_tropics=True, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_graticule2():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="graticule">')
generate_graticule(15, .25, include_tropics=True, adjust_poles=True, double_dateline=True)
print('\t\t</g>')
print('\t</g>')
def compose_compound():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="river">')
plot_shapes('ne_50m_rivers_lake_centerlines', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(15, 1, include_tropics=True, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_indicatrices():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="tissot">')
generate_indicatrices(15, math.radians(3.75), resolution=180, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_indicatrices2(ctr_meridian):
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="water">')
generate_backdrop(.5, ctr_meridian=ctr_meridian)
print('\t\t</g>')
print('\t\t<g class="land">')
plot_shapes('ne_110m_land', flesh_out_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_110m_lakes')
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(10, .5, double_dateline=(ctr_meridian==0))
print('\t\t</g>')
print('\t\t<g class="tissot">')
generate_indicatrices(30, 500/6371, ctr_meridian=ctr_meridian, adjust_poles=True, resolution=120, side_res=5, pole_res=120)
print('\t\t</g>')
print('\t</g>')
def compose_political():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="country">')
generate_borders('ne_50m', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t</g>')
label_shapes('ne_50m_admin_0_countries', "pol")
def compose_orthodromes():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="lines">')
generate_orthodromes()
print('\t\t</g>')
print('\t</g>')
def compose_everything():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="country">')
generate_borders('ne_10m', trim_antarctica=True, borders_only=False)
print('\t\t<g class="border">')
generate_borders('ne_10m', trim_antarctica=True, borders_only=True)
print('\t\t</g>')
print('\t\t</g>')
print('\t\t<g class="sovereign">')
plot_shapes('ne_10m_admin_0_map_units')
print('\t\t</g>')
print('\t\t<g class="admin">')
plot_shapes('ne_10m_admin_1_states_provinces_lines', filter_field='adm0_a3',
filter_vals=['RUS','CAN','CHN','USA','BRA','AUS','IND','ARG','KAZ'])
print('\t\t</g>')
print('\t\t<g class="dispute">')
plot_shapes('ne_10m_admin_0_boundary_lines_disputed_areas')
print('\t\t</g>')
print('\t\t<g class="coastline">')
plot_shapes('ne_10m_coastline', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="river">')
plot_shapes('ne_10m_rivers_lake_centerlines', max_rank=5)
print('\t\t</g>')
print('\t\t<g class="lake">')
plot_shapes('ne_10m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(5, 1, include_tropics=True, adjust_poles=True)
plot_shapes('ne_10m_geographic_lines', clazz="dateline", filter_field='name', filter_vals=["International Date Line"])
print('\t\t</g>')
print('\t</g>')
generate_topographical_labels('ne_50m', max_rank=2, text_size=4)
label_shapes('ne_10m_lakes', "sea", max_rank=1, text_size=1)
label_shapes('ne_10m_admin_0_countries', "pol", text_size=4)
label_points('cities_capital', "cap", text_size=1)
label_points('cities_other', "cit", text_size=0)
if __name__ == '__main__':
# compose_landmasses()
# compose_graticule()
# compose_compound()
# compose_indicatrices()
# compose_indicatrices2(-0)
# compose_political()
# compose_orthodromes()
compose_everything()
| mit | -1,186,010,880,348,605,000 | 32.874172 | 124 | 0.657869 | false |
francisco-dlp/hyperspy | hyperspy/drawing/utils.py | 1 | 57321 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import itertools
import textwrap
from traits import trait_base
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backend_bases import key_press_handler
import warnings
import numpy as np
from distutils.version import LooseVersion
import logging
import hyperspy as hs
_logger = logging.getLogger(__name__)
def contrast_stretching(data, saturated_pixels):
"""Calculate bounds that leaves out a given percentage of the data.
Parameters
----------
data: numpy array
saturated_pixels: scalar, None
The percentage of pixels that are left out of the bounds. For example,
the low and high bounds of a value of 1 are the 0.5% and 99.5%
percentiles. It must be in the [0, 100] range. If None, set the value
to 0.
Returns
-------
vmin, vmax: scalar
The low and high bounds
Raises
------
ValueError if the value of `saturated_pixels` is out of the valid range.
"""
# Sanity check
if saturated_pixels is None:
saturated_pixels = 0
if not 0 <= saturated_pixels <= 100:
raise ValueError(
"saturated_pixels must be a scalar in the range[0, 100]")
vmin = np.nanpercentile(data, saturated_pixels / 2.)
vmax = np.nanpercentile(data, 100 - saturated_pixels / 2.)
return vmin, vmax
MPL_DIVERGING_COLORMAPS = [
"BrBG",
"bwr",
"coolwarm",
"PiYG",
"PRGn",
"PuOr",
"RdBu",
"RdGy",
"RdYIBu",
"RdYIGn",
"seismic",
"Spectral", ]
# Add reversed colormaps
MPL_DIVERGING_COLORMAPS += [cmap + "_r" for cmap in MPL_DIVERGING_COLORMAPS]
def centre_colormap_values(vmin, vmax):
"""Calculate vmin and vmax to set the colormap midpoint to zero.
Parameters
----------
vmin, vmax : scalar
The range of data to display.
Returns
-------
cvmin, cvmax : scalar
The values to obtain a centre colormap.
"""
absmax = max(abs(vmin), abs(vmax))
return -absmax, absmax
def create_figure(window_title=None,
_on_figure_window_close=None,
disable_xyscale_keys=False,
**kwargs):
"""Create a matplotlib figure.
This function adds the possibility to execute another function
when the figure is closed and to easily set the window title. Any
keyword argument is passed to the plt.figure function
Parameters
----------
window_title : string
_on_figure_window_close : function
disable_xyscale_keys : bool, disable the `k`, `l` and `L` shortcuts which
toggle the x or y axis between linear and log scale.
Returns
-------
fig : plt.figure
"""
fig = plt.figure(**kwargs)
if window_title is not None:
# remove non-alphanumeric characters to prevent file saving problems
# This is a workaround for:
# https://github.com/matplotlib/matplotlib/issues/9056
reserved_characters = r'<>"/\|?*'
for c in reserved_characters:
window_title = window_title.replace(c, '')
window_title = window_title.replace('\n', ' ')
window_title = window_title.replace(':', ' -')
fig.canvas.set_window_title(window_title)
if disable_xyscale_keys and hasattr(fig.canvas, 'toolbar'):
# hack the `key_press_handler` to disable the `k`, `l`, `L` shortcuts
manager = fig.canvas.manager
fig.canvas.mpl_disconnect(manager.key_press_handler_id)
manager.key_press_handler_id = manager.canvas.mpl_connect(
'key_press_event',
lambda event: key_press_handler_custom(event, manager.canvas))
if _on_figure_window_close is not None:
on_figure_window_close(fig, _on_figure_window_close)
return fig
def key_press_handler_custom(event, canvas):
if event.key not in ['k', 'l', 'L']:
key_press_handler(event, canvas, canvas.manager.toolbar)
def on_figure_window_close(figure, function):
"""Connects a close figure signal to a given function.
Parameters
----------
figure : mpl figure instance
function : function
"""
def function_wrapper(evt):
function()
figure.canvas.mpl_connect('close_event', function_wrapper)
def plot_RGB_map(im_list, normalization='single', dont_plot=False):
"""Plot 2 or 3 maps in RGB.
Parameters
----------
im_list : list of Signal2D instances
normalization : {'single', 'global'}
dont_plot : bool
Returns
-------
array: RGB matrix
"""
# from widgets import cursors
height, width = im_list[0].data.shape[:2]
rgb = np.zeros((height, width, 3))
rgb[:, :, 0] = im_list[0].data.squeeze()
rgb[:, :, 1] = im_list[1].data.squeeze()
if len(im_list) == 3:
rgb[:, :, 2] = im_list[2].data.squeeze()
if normalization == 'single':
for i in range(len(im_list)):
rgb[:, :, i] /= rgb[:, :, i].max()
elif normalization == 'global':
rgb /= rgb.max()
rgb = rgb.clip(0, rgb.max())
if not dont_plot:
figure = plt.figure()
ax = figure.add_subplot(111)
ax.frameon = False
ax.set_axis_off()
ax.imshow(rgb, interpolation='nearest')
# cursors.set_mpl_ax(ax)
figure.canvas.draw_idle()
else:
return rgb
def subplot_parameters(fig):
"""Returns a list of the subplot parameters of a mpl figure.
Parameters
----------
fig : mpl figure
Returns
-------
tuple : (left, bottom, right, top, wspace, hspace)
"""
wspace = fig.subplotpars.wspace
hspace = fig.subplotpars.hspace
left = fig.subplotpars.left
right = fig.subplotpars.right
top = fig.subplotpars.top
bottom = fig.subplotpars.bottom
return left, bottom, right, top, wspace, hspace
class ColorCycle:
_color_cycle = [mpl.colors.colorConverter.to_rgba(color) for color
in ('b', 'g', 'r', 'c', 'm', 'y', 'k')]
def __init__(self):
self.color_cycle = copy.copy(self._color_cycle)
def __call__(self):
if not self.color_cycle:
self.color_cycle = copy.copy(self._color_cycle)
return self.color_cycle.pop(0)
def plot_signals(signal_list, sync=True, navigator="auto",
navigator_list=None, **kwargs):
"""Plot several signals at the same time.
Parameters
----------
signal_list : list of BaseSignal instances
If sync is set to True, the signals must have the
same navigation shape, but not necessarily the same signal shape.
sync : True or False, default "True"
If True: the signals will share navigation, all the signals
must have the same navigation shape for this to work, but not
necessarily the same signal shape.
navigator : {"auto", None, "spectrum", "slider", BaseSignal}, default "auto"
See signal.plot docstring for full description
navigator_list : {List of navigator arguments, None}, default None
Set different navigator options for the signals. Must use valid
navigator arguments: "auto", None, "spectrum", "slider", or a
hyperspy Signal. The list must have the same size as signal_list.
If None, the argument specified in navigator will be used.
**kwargs
Any extra keyword arguments are passed to each signal `plot` method.
Example
-------
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll])
Specifying the navigator:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll], navigator="slider")
Specifying the navigator for each signal:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> s_edx = hs.load("edx.dm3")
>>> s_adf = hs.load("adf.dm3")
>>> hs.plot.plot_signals(
[s_cl, s_ll, s_edx], navigator_list=["slider",None,s_adf])
"""
import hyperspy.signal
if navigator_list:
if not (len(signal_list) == len(navigator_list)):
raise ValueError(
"signal_list and navigator_list must"
" have the same size")
if sync:
axes_manager_list = []
for signal in signal_list:
axes_manager_list.append(signal.axes_manager)
if not navigator_list:
navigator_list = []
if navigator is None:
navigator_list.extend([None] * len(signal_list))
elif isinstance(navigator, hyperspy.signal.BaseSignal):
navigator_list.append(navigator)
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "slider":
navigator_list.append("slider")
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "spectrum":
navigator_list.extend(["spectrum"] * len(signal_list))
elif navigator == "auto":
navigator_list.extend(["auto"] * len(signal_list))
else:
raise ValueError(
"navigator must be one of \"spectrum\",\"auto\","
" \"slider\", None, a Signal instance")
# Check to see if the spectra have the same navigational shapes
temp_shape_first = axes_manager_list[0].navigation_shape
for i, axes_manager in enumerate(axes_manager_list):
temp_shape = axes_manager.navigation_shape
if not (temp_shape_first == temp_shape):
raise ValueError(
"The spectra does not have the same navigation shape")
axes_manager_list[i] = axes_manager.deepcopy()
if i > 0:
for axis0, axisn in zip(axes_manager_list[0].navigation_axes,
axes_manager_list[i].navigation_axes):
axes_manager_list[i]._axes[axisn.index_in_array] = axis0
del axes_manager
for signal, navigator, axes_manager in zip(signal_list,
navigator_list,
axes_manager_list):
signal.plot(axes_manager=axes_manager,
navigator=navigator,
**kwargs)
# If sync is False
else:
if not navigator_list:
navigator_list = []
navigator_list.extend([navigator] * len(signal_list))
for signal, navigator in zip(signal_list, navigator_list):
signal.plot(navigator=navigator,
**kwargs)
def _make_heatmap_subplot(spectra):
from hyperspy._signals.signal2d import Signal2D
im = Signal2D(spectra.data, axes=spectra.axes_manager._get_axes_dicts())
im.metadata.General.title = spectra.metadata.General.title
im.plot()
return im._plot.signal_plot.ax
def set_xaxis_lims(mpl_ax, hs_axis):
"""
Set the matplotlib axis limits to match that of a HyperSpy axis
Parameters
----------
mpl_ax : :class:`matplotlib.axis.Axis`
The ``matplotlib`` axis to change
hs_axis : :class:`~hyperspy.axes.DataAxis`
The data axis that contains the values that control the scaling
"""
x_axis_lower_lim = hs_axis.axis[0]
x_axis_upper_lim = hs_axis.axis[-1]
mpl_ax.set_xlim(x_axis_lower_lim, x_axis_upper_lim)
def _make_overlap_plot(spectra, ax, color="blue", line_style='-'):
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_ylabel('Intensity')
ax.autoscale(tight=True)
def _make_cascade_subplot(
spectra, ax, color="blue", line_style='-', padding=1):
max_value = 0
for spectrum in spectra:
spectrum_yrange = (np.nanmax(spectrum.data) -
np.nanmin(spectrum.data))
if spectrum_yrange > max_value:
max_value = spectrum_yrange
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
data_to_plot = ((spectrum.data - spectrum.data.min()) /
float(max_value) + spectrum_index * padding)
ax.plot(x_axis.axis, data_to_plot, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_yticks([])
ax.autoscale(tight=True)
def _plot_spectrum(spectrum, ax, color="blue", line_style='-'):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
def _set_spectrum_xlabel(spectrum, ax):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.set_xlabel("%s (%s)" % (x_axis.name, x_axis.units))
def _transpose_if_required(signal, expected_dimension):
# EDS profiles or maps have signal dimension = 0 and navigation dimension
# 1 or 2. For convenience transpose the signal if possible
if (signal.axes_manager.signal_dimension == 0 and
signal.axes_manager.navigation_dimension == expected_dimension):
return signal.T
else:
return signal
def plot_images(images,
cmap=None,
no_nans=False,
per_row=3,
label='auto',
labelwrap=30,
suptitle=None,
suptitle_fontsize=18,
colorbar='multi',
centre_colormap="auto",
saturated_pixels=0,
scalebar=None,
scalebar_color='white',
axes_decor='all',
padding=None,
tight_layout=False,
aspect='auto',
min_asp=0.1,
namefrac_thresh=0.4,
fig=None,
vmin=None,
vmax=None,
*args,
**kwargs):
"""Plot multiple images as sub-images in one figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
images : list of Signal2D or BaseSignal
`images` should be a list of Signals to plot. For `BaseSignal` with
navigation dimensions 2 and signal dimension 0, the signal will be
tranposed to form a `Signal2D`.
Multi-dimensional images will have each plane plotted as a separate
image.
If any signal shape is not suitable, a ValueError will be raised.
cmap : matplotlib colormap, list, or ``'mpl_colors'``, *optional*
The colormap used for the images, by default read from ``pyplot``.
A list of colormaps can also be provided, and the images will
cycle through them. Optionally, the value ``'mpl_colors'`` will
cause the cmap to loop through the default ``matplotlib``
colors (to match with the default output of the
:py:func:`~.drawing.utils.plot_spectra` method.
Note: if using more than one colormap, using the ``'single'``
option for ``colorbar`` is disallowed.
no_nans : bool, optional
If True, set nans to zero for plotting.
per_row : int, optional
The number of plots in each row
label : None, str, or list of str, optional
Control the title labeling of the plotted images.
If None, no titles will be shown.
If 'auto' (default), function will try to determine suitable titles
using Signal2D titles, falling back to the 'titles' option if no good
short titles are detected.
Works best if all images to be plotted have the same beginning
to their titles.
If 'titles', the title from each image's metadata.General.title
will be used.
If any other single str, images will be labeled in sequence using
that str as a prefix.
If a list of str, the list elements will be used to determine the
labels (repeated, if necessary).
labelwrap : int, optional
integer specifying the number of characters that will be used on
one line
If the function returns an unexpected blank figure, lower this
value to reduce overlap of the labels between each figure
suptitle : str, optional
Title to use at the top of the figure. If called with label='auto',
this parameter will override the automatically determined title.
suptitle_fontsize : int, optional
Font size to use for super title at top of figure
colorbar : {'multi', None, 'single'}
Controls the type of colorbars that are plotted.
If None, no colorbar is plotted.
If 'multi' (default), individual colorbars are plotted for each
(non-RGB) image
If 'single', all (non-RGB) images are plotted on the same scale,
and one colorbar is shown for all
centre_colormap : {"auto", True, False}
If True the centre of the color scheme is set to zero. This is
specially useful when using diverging color schemes. If "auto"
(default), diverging color schemes are automatically centred.
saturated_pixels: None, scalar or list of scalar, optional, default: 0
If list of scalar, the length should match the number of images to
show. If provide in the list, set the value to 0.
The percentage of pixels that are left out of the bounds. For
example, the low and high bounds of a value of 1 are the 0.5% and
99.5% percentiles. It must be in the [0, 100] range.
scalebar : {None, 'all', list of ints}, optional
If None (or False), no scalebars will be added to the images.
If 'all', scalebars will be added to all images.
If list of ints, scalebars will be added to each image specified.
scalebar_color : str, optional
A valid MPL color string; will be used as the scalebar color
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
padding : None or dict, optional
This parameter controls the spacing between images.
If None, default options will be used
Otherwise, supply a dictionary with the spacing options as
keywords and desired values as values
Values should be supplied as used in pyplot.subplots_adjust(),
and can be:
'left', 'bottom', 'right', 'top', 'wspace' (width),
and 'hspace' (height)
tight_layout : bool, optional
If true, hyperspy will attempt to improve image placement in
figure using matplotlib's tight_layout
If false, repositioning images inside the figure will be left as
an exercise for the user.
aspect : str or numeric, optional
If 'auto', aspect ratio is auto determined, subject to min_asp.
If 'square', image will be forced onto square display.
If 'equal', aspect ratio of 1 will be enforced.
If float (or int/long), given value will be used.
min_asp : float, optional
Minimum aspect ratio to be used when plotting images
namefrac_thresh : float, optional
Threshold to use for auto-labeling. This parameter controls how
much of the titles must be the same for the auto-shortening of
labels to activate. Can vary from 0 to 1. Smaller values
encourage shortening of titles by auto-labeling, while larger
values will require more overlap in titles before activing the
auto-label code.
fig : mpl figure, optional
If set, the images will be plotted to an existing MPL figure
vmin, vmax : scalar or list of scalar, optional, default: None
If list of scalar, the length should match the number of images to
show.
A list of scalar is not compatible with a single colorbar.
See vmin, vmax of matplotlib.imshow() for more details.
*args, **kwargs, optional
Additional arguments passed to matplotlib.imshow()
Returns
-------
axes_list : list
a list of subplot axes that hold the images
See Also
--------
plot_spectra : Plotting of multiple spectra
plot_signals : Plotting of multiple signals
plot_histograms : Compare signal histograms
Notes
-----
`interpolation` is a useful parameter to provide as a keyword
argument to control how the space between pixels is interpolated. A
value of ``'nearest'`` will cause no interpolation between pixels.
`tight_layout` is known to be quite brittle, so an option is provided
to disable it. Turn this option off if output is not as expected,
or try adjusting `label`, `labelwrap`, or `per_row`
"""
def __check_single_colorbar(cbar):
if cbar == 'single':
raise ValueError('Cannot use a single colorbar with multiple '
'colormaps. Please check for compatible '
'arguments.')
from hyperspy.drawing.widgets import ScaleBar
from hyperspy.misc import rgb_tools
from hyperspy.signal import BaseSignal
# Check that we have a hyperspy signal
im = [images] if not isinstance(images, (list, tuple)) else images
for image in im:
if not isinstance(image, BaseSignal):
raise ValueError("`images` must be a list of image signals or a "
"multi-dimensional signal."
" " + repr(type(images)) + " was given.")
# For list of EDS maps, transpose the BaseSignal
if isinstance(images, (list, tuple)):
images = [_transpose_if_required(image, 2) for image in images]
# If input is >= 1D signal (e.g. for multi-dimensional plotting),
# copy it and put it in a list so labeling works out as (x,y) when plotting
if isinstance(images,
BaseSignal) and images.axes_manager.navigation_dimension > 0:
images = [images._deepcopy_with_new_data(images.data)]
n = 0
for i, sig in enumerate(images):
if sig.axes_manager.signal_dimension != 2:
raise ValueError("This method only plots signals that are images. "
"The signal dimension must be equal to 2. "
"The signal at position " + repr(i) +
" was " + repr(sig) + ".")
# increment n by the navigation size, or by 1 if the navigation size is
# <= 0
n += (sig.axes_manager.navigation_size
if sig.axes_manager.navigation_size > 0
else 1)
# If no cmap given, get default colormap from pyplot:
if cmap is None:
cmap = [plt.get_cmap().name]
elif cmap == 'mpl_colors':
for n_color, c in enumerate(mpl.rcParams['axes.prop_cycle']):
make_cmap(colors=['#000000', c['color']],
name='mpl{}'.format(n_color))
cmap = ['mpl{}'.format(i) for i in
range(len(mpl.rcParams['axes.prop_cycle']))]
__check_single_colorbar(colorbar)
# cmap is list, tuple, or something else iterable (but not string):
elif hasattr(cmap, '__iter__') and not isinstance(cmap, str):
try:
cmap = [c.name for c in cmap] # convert colormap to string
except AttributeError:
cmap = [c for c in cmap] # c should be string if not colormap
__check_single_colorbar(colorbar)
elif isinstance(cmap, mpl.colors.Colormap):
cmap = [cmap.name] # convert single colormap to list with string
elif isinstance(cmap, str):
cmap = [cmap] # cmap is single string, so make it a list
else:
# Didn't understand cmap input, so raise error
raise ValueError('The provided cmap value was not understood. Please '
'check input values.')
# If any of the cmaps given are diverging, and auto-centering, set the
# appropriate flag:
if centre_colormap == "auto":
centre_colormaps = []
for c in cmap:
if c in MPL_DIVERGING_COLORMAPS:
centre_colormaps.append(True)
else:
centre_colormaps.append(False)
# if it was True, just convert to list
elif centre_colormap:
centre_colormaps = [True]
# likewise for false
elif not centre_colormap:
centre_colormaps = [False]
# finally, convert lists to cycle generators for adaptive length:
centre_colormaps = itertools.cycle(centre_colormaps)
cmap = itertools.cycle(cmap)
def _check_arg(arg, default_value, arg_name):
if isinstance(arg, list):
if len(arg) != n:
_logger.warning('The provided {} values are ignored because the '
'length of the list does not match the number of '
'images'.format(arg_name))
arg = [default_value] * n
else:
arg = [arg] * n
return arg
vmin = _check_arg(vmin, None, 'vmin')
vmax = _check_arg(vmax, None, 'vmax')
saturated_pixels = _check_arg(saturated_pixels, 0, 'saturated_pixels')
# Sort out the labeling:
div_num = 0
all_match = False
shared_titles = False
user_labels = False
if label is None:
pass
elif label == 'auto':
# Use some heuristics to try to get base string of similar titles
label_list = [x.metadata.General.title for x in images]
# Find the shortest common string between the image titles
# and pull that out as the base title for the sequence of images
# array in which to store arrays
res = np.zeros((len(label_list), len(label_list[0]) + 1))
res[:, 0] = 1
# j iterates the strings
for j in range(len(label_list)):
# i iterates length of substring test
for i in range(1, len(label_list[0]) + 1):
# stores whether or not characters in title match
res[j, i] = label_list[0][:i] in label_list[j]
# sum up the results (1 is True, 0 is False) and create
# a substring based on the minimum value (this will be
# the "smallest common string" between all the titles
if res.all():
basename = label_list[0]
div_num = len(label_list[0])
all_match = True
else:
div_num = int(min(np.sum(res, 1)))
basename = label_list[0][:div_num - 1]
all_match = False
# trim off any '(' or ' ' characters at end of basename
if div_num > 1:
while True:
if basename[len(basename) - 1] == '(':
basename = basename[:-1]
elif basename[len(basename) - 1] == ' ':
basename = basename[:-1]
else:
break
# namefrac is ratio of length of basename to the image name
# if it is high (e.g. over 0.5), we can assume that all images
# share the same base
if len(label_list[0]) > 0:
namefrac = float(len(basename)) / len(label_list[0])
else:
# If label_list[0] is empty, it means there was probably no
# title set originally, so nothing to share
namefrac = 0
if namefrac > namefrac_thresh:
# there was a significant overlap of label beginnings
shared_titles = True
# only use new suptitle if one isn't specified already
if suptitle is None:
suptitle = basename
else:
# there was not much overlap, so default back to 'titles' mode
shared_titles = False
label = 'titles'
div_num = 0
elif label == 'titles':
# Set label_list to each image's pre-defined title
label_list = [x.metadata.General.title for x in images]
elif isinstance(label, str):
# Set label_list to an indexed list, based off of label
label_list = [label + " " + repr(num) for num in range(n)]
elif isinstance(label, list) and all(
isinstance(x, str) for x in label):
label_list = label
user_labels = True
# If list of labels is longer than the number of images, just use the
# first n elements
if len(label_list) > n:
del label_list[n:]
if len(label_list) < n:
label_list *= (n // len(label_list)) + 1
del label_list[n:]
else:
raise ValueError("Did not understand input of labels.")
# Determine appropriate number of images per row
rows = int(np.ceil(n / float(per_row)))
if n < per_row:
per_row = n
# Set overall figure size and define figure (if not pre-existing)
if fig is None:
k = max(plt.rcParams['figure.figsize']) / max(per_row, rows)
f = plt.figure(figsize=(tuple(k * i for i in (per_row, rows))))
else:
f = fig
# Initialize list to hold subplot axes
axes_list = []
# Initialize list of rgb tags
isrgb = [False] * len(images)
# Check to see if there are any rgb images in list
# and tag them using the isrgb list
for i, img in enumerate(images):
if rgb_tools.is_rgbx(img.data):
isrgb[i] = True
# Determine how many non-rgb Images there are
non_rgb = list(itertools.compress(images, [not j for j in isrgb]))
if len(non_rgb) == 0 and colorbar is not None:
colorbar = None
warnings.warn("Sorry, colorbar is not implemented for RGB images.")
# Find global min and max values of all the non-rgb images for use with
# 'single' scalebar
if colorbar == 'single':
# get a g_saturated_pixels from saturated_pixels
if isinstance(saturated_pixels, list):
g_saturated_pixels = min(np.array([v for v in saturated_pixels]))
else:
g_saturated_pixels = saturated_pixels
# estimate a g_vmin and g_max from saturated_pixels
g_vmin, g_vmax = contrast_stretching(np.concatenate(
[i.data.flatten() for i in non_rgb]), g_saturated_pixels)
# if vmin and vmax are provided, override g_min and g_max
if isinstance(vmin, list):
_logger.warning('vmin have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmin = vmin if vmin is not None else g_vmin
if isinstance(vmax, list):
_logger.warning('vmax have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmax = vmax if vmax is not None else g_vmax
if next(centre_colormaps):
g_vmin, g_vmax = centre_colormap_values(g_vmin, g_vmax)
# Check if we need to add a scalebar for some of the images
if isinstance(scalebar, list) and all(isinstance(x, int)
for x in scalebar):
scalelist = True
else:
scalelist = False
idx = 0
ax_im_list = [0] * len(isrgb)
# Replot: create a list to store references to the images
replot_ims = []
# Loop through each image, adding subplot for each one
for i, ims in enumerate(images):
# Get handles for the signal axes and axes_manager
axes_manager = ims.axes_manager
if axes_manager.navigation_dimension > 0:
ims = ims._deepcopy_with_new_data(ims.data)
for j, im in enumerate(ims):
ax = f.add_subplot(rows, per_row, idx + 1)
axes_list.append(ax)
data = im.data
centre = next(centre_colormaps) # get next value for centreing
# Enable RGB plotting
if rgb_tools.is_rgbx(data):
data = rgb_tools.rgbx2regular_array(data, plot_friendly=True)
l_vmin, l_vmax = None, None
else:
data = im.data
# Find min and max for contrast
l_vmin, l_vmax = contrast_stretching(
data, saturated_pixels[idx])
l_vmin = vmin[idx] if vmin[idx] is not None else l_vmin
l_vmax = vmax[idx] if vmax[idx] is not None else l_vmax
if centre:
l_vmin, l_vmax = centre_colormap_values(l_vmin, l_vmax)
# Remove NaNs (if requested)
if no_nans:
data = np.nan_to_num(data)
# Get handles for the signal axes and axes_manager
axes_manager = im.axes_manager
axes = axes_manager.signal_axes
# Set dimensions of images
xaxis = axes[0]
yaxis = axes[1]
extent = (
xaxis.low_value,
xaxis.high_value,
yaxis.high_value,
yaxis.low_value,
)
if not isinstance(aspect, (int, float)) and aspect not in [
'auto', 'square', 'equal']:
_logger.warning("Did not understand aspect ratio input. "
"Using 'auto' as default.")
aspect = 'auto'
if aspect == 'auto':
if float(yaxis.size) / xaxis.size < min_asp:
factor = min_asp * float(xaxis.size) / yaxis.size
elif float(yaxis.size) / xaxis.size > min_asp ** -1:
factor = min_asp ** -1 * float(xaxis.size) / yaxis.size
else:
factor = 1
asp = np.abs(factor * float(xaxis.scale) / yaxis.scale)
elif aspect == 'square':
asp = abs(extent[1] - extent[0]) / abs(extent[3] - extent[2])
elif aspect == 'equal':
asp = 1
elif isinstance(aspect, (int, float)):
asp = aspect
if 'interpolation' not in kwargs.keys():
kwargs['interpolation'] = 'nearest'
# Get colormap for this image:
cm = next(cmap)
# Plot image data, using vmin and vmax to set bounds,
# or allowing them to be set automatically if using individual
# colorbars
if colorbar == 'single' and not isrgb[i]:
axes_im = ax.imshow(data,
cmap=cm,
extent=extent,
vmin=g_vmin, vmax=g_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
else:
axes_im = ax.imshow(data,
cmap=cm,
extent=extent,
vmin=l_vmin,
vmax=l_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
# If an axis trait is undefined, shut off :
if isinstance(xaxis.units, trait_base._Undefined) or \
isinstance(yaxis.units, trait_base._Undefined) or \
isinstance(xaxis.name, trait_base._Undefined) or \
isinstance(yaxis.name, trait_base._Undefined):
if axes_decor == 'all':
_logger.warning(
'Axes labels were requested, but one '
'or both of the '
'axes units and/or name are undefined. '
'Axes decorations have been set to '
'\'ticks\' instead.')
axes_decor = 'ticks'
# If all traits are defined, set labels as appropriate:
else:
ax.set_xlabel(axes[0].name + " axis (" + axes[0].units + ")")
ax.set_ylabel(axes[1].name + " axis (" + axes[1].units + ")")
if label:
if all_match:
title = ''
elif shared_titles:
title = label_list[i][div_num - 1:]
else:
if len(ims) == n:
# This is true if we are plotting just 1
# multi-dimensional Signal2D
title = label_list[idx]
elif user_labels:
title = label_list[idx]
else:
title = label_list[i]
if ims.axes_manager.navigation_size > 1 and not user_labels:
title += " %s" % str(ims.axes_manager.indices)
ax.set_title(textwrap.fill(title, labelwrap))
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
# If using independent colorbars, add them
if colorbar == 'multi' and not isrgb[i]:
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(axes_im, cax=cax)
# Add scalebars as necessary
if (scalelist and idx in scalebar) or scalebar == 'all':
ax.scalebar = ScaleBar(
ax=ax,
units=axes[0].units,
color=scalebar_color,
)
# Replot: store references to the images
replot_ims.append(im)
idx += 1
# If using a single colorbar, add it, and do tight_layout, ensuring that
# a colorbar is only added based off of non-rgb Images:
if colorbar == 'single':
foundim = None
for i in range(len(isrgb)):
if (not isrgb[i]) and foundim is None:
foundim = i
if foundim is not None:
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.9, 0.1, 0.03, 0.8])
f.colorbar(ax_im_list[foundim], cax=cbar_ax)
if tight_layout:
# tight_layout, leaving room for the colorbar
plt.tight_layout(rect=[0, 0, 0.9, 1])
elif tight_layout:
plt.tight_layout()
elif tight_layout:
plt.tight_layout()
# Set top bounds for shared titles and add suptitle
if suptitle:
f.subplots_adjust(top=0.85)
f.suptitle(suptitle, fontsize=suptitle_fontsize)
# If we want to plot scalebars, loop through the list of axes and add them
if scalebar is None or scalebar is False:
# Do nothing if no scalebars are called for
pass
elif scalebar == 'all':
# scalebars were taken care of in the plotting loop
pass
elif scalelist:
# scalebars were taken care of in the plotting loop
pass
else:
raise ValueError("Did not understand scalebar input. Must be None, "
"\'all\', or list of ints.")
# Adjust subplot spacing according to user's specification
if padding is not None:
plt.subplots_adjust(**padding)
# Replot: connect function
def on_dblclick(event):
# On the event of a double click, replot the selected subplot
if not event.inaxes:
return
if not event.dblclick:
return
subplots = [axi for axi in f.axes if isinstance(axi, mpl.axes.Subplot)]
inx = list(subplots).index(event.inaxes)
im = replot_ims[inx]
# Use some of the info in the subplot
cm = subplots[inx].images[0].get_cmap()
clim = subplots[inx].images[0].get_clim()
sbar = False
if (scalelist and inx in scalebar) or scalebar == 'all':
sbar = True
im.plot(colorbar=bool(colorbar),
vmin=clim[0],
vmax=clim[1],
no_nans=no_nans,
aspect=asp,
scalebar=sbar,
scalebar_color=scalebar_color,
cmap=cm)
f.canvas.mpl_connect('button_press_event', on_dblclick)
return axes_list
def set_axes_decor(ax, axes_decor):
if axes_decor == 'off':
ax.axis('off')
elif axes_decor == 'ticks':
ax.set_xlabel('')
ax.set_ylabel('')
elif axes_decor == 'all':
pass
elif axes_decor is None:
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_xticklabels([])
ax.set_yticklabels([])
def make_cmap(colors, name='my_colormap', position=None,
bit=False, register=True):
"""
Create a matplotlib colormap with customized colors, optionally registering
it with matplotlib for simplified use.
Adapted from Chris Slocum's code at:
https://github.com/CSlocumWX/custom_colormap/blob/master/custom_colormaps.py
and used under the terms of that code's BSD-3 license
Parameters
----------
colors : iterable
list of either tuples containing rgb values, or html strings
Colors should be arranged so that the first color is the lowest
value for the colorbar and the last is the highest.
name : str
name of colormap to use when registering with matplotlib
position : None or iterable
list containing the values (from [0,1]) that dictate the position
of each color within the colormap. If None (default), the colors
will be equally-spaced within the colorbar.
bit : boolean
True if RGB colors are given in 8-bit [0 to 255] or False if given
in arithmetic basis [0 to 1] (default)
register : boolean
switch to control whether or not to register the custom colormap
with matplotlib in order to enable use by just the name string
"""
def _html_color_to_rgb(color_string):
""" convert #RRGGBB to an (R, G, B) tuple """
color_string = color_string.strip()
if color_string[0] == '#':
color_string = color_string[1:]
if len(color_string) != 6:
raise ValueError(
"input #{} is not in #RRGGBB format".format(color_string))
r, g, b = color_string[:2], color_string[2:4], color_string[4:]
r, g, b = [int(n, 16) / 255 for n in (r, g, b)]
return r, g, b
bit_rgb = np.linspace(0, 1, 256)
if position is None:
position = np.linspace(0, 1, len(colors))
else:
if len(position) != len(colors):
raise ValueError("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
raise ValueError("position must start with 0 and end with 1")
cdict = {'red': [], 'green': [], 'blue': []}
for pos, color in zip(position, colors):
if isinstance(color, str):
color = _html_color_to_rgb(color)
elif bit:
color = (bit_rgb[color[0]],
bit_rgb[color[1]],
bit_rgb[color[2]])
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
cmap = mpl.colors.LinearSegmentedColormap(name, cdict, 256)
if register:
mpl.cm.register_cmap(name, cmap)
return cmap
def plot_spectra(
spectra,
style='overlap',
color=None,
line_style=None,
padding=1.,
legend=None,
legend_picking=True,
legend_loc='upper right',
fig=None,
ax=None,
**kwargs):
"""Plot several spectra in the same figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
spectra : list of Signal1D or BaseSignal
Ordered spectra list of signal to plot. If `style` is "cascade" or
"mosaic" the spectra can have different size and axes. For `BaseSignal`
with navigation dimensions 1 and signal dimension 0, the signal will be
tranposed to form a `Signal1D`.
style : {'overlap', 'cascade', 'mosaic', 'heatmap'}
The style of the plot.
color : matplotlib color or a list of them or `None`
Sets the color of the lines of the plots (no action on 'heatmap').
If a list, if its length is less than the number of spectra to plot,
the colors will be cycled. If `None`, use default matplotlib color
cycle.
line_style: matplotlib line style or a list of them or `None`
Sets the line style of the plots (no action on 'heatmap').
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
padding : float, optional, default 0.1
Option for "cascade". 1 guarantees that there is not overlapping.
However, in many cases a value between 0 and 1 can produce a tighter
plot without overlapping. Negative values have the same effect but
reverse the order of the spectra without reversing the order of the
colors.
legend: None or list of str or 'auto'
If list of string, legend for "cascade" or title for "mosaic" is
displayed. If 'auto', the title of each spectra (metadata.General.title)
is used.
legend_picking: bool
If true, a spectrum can be toggle on and off by clicking on
the legended line.
legend_loc : str or int
This parameter controls where the legend is placed on the figure;
see the pyplot.legend docstring for valid values
fig : matplotlib figure or None
If None, a default figure will be created. Specifying fig will
not work for the 'heatmap' style.
ax : matplotlib ax (subplot) or None
If None, a default ax will be created. Will not work for 'mosaic'
or 'heatmap' style.
**kwargs
remaining keyword arguments are passed to matplotlib.figure() or
matplotlib.subplots(). Has no effect on 'heatmap' style.
Example
-------
>>> s = hs.load("some_spectra")
>>> hs.plot.plot_spectra(s, style='cascade', color='red', padding=0.5)
To save the plot as a png-file
>>> hs.plot.plot_spectra(s).figure.savefig("test.png")
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
import hyperspy.signal
def _reverse_legend(ax_, legend_loc_):
"""
Reverse the ordering of a matplotlib legend (to be more consistent
with the default ordering of plots in the 'cascade' and 'overlap'
styles
Parameters
----------
ax_: matplotlib axes
legend_loc_: str or int
This parameter controls where the legend is placed on the
figure; see the pyplot.legend docstring for valid values
"""
l = ax_.get_legend()
labels = [lb.get_text() for lb in list(l.get_texts())]
handles = l.legendHandles
ax_.legend(handles[::-1], labels[::-1], loc=legend_loc_)
# Before v1.3 default would read the value from prefereces.
if style == "default":
style = "overlap"
if color is not None:
if isinstance(color, str):
color = itertools.cycle([color])
elif hasattr(color, "__iter__"):
color = itertools.cycle(color)
else:
raise ValueError("Color must be None, a valid matplotlib color "
"string or a list of valid matplotlib colors.")
else:
if LooseVersion(mpl.__version__) >= "1.5.3":
color = itertools.cycle(
plt.rcParams['axes.prop_cycle'].by_key()["color"])
else:
color = itertools.cycle(plt.rcParams['axes.color_cycle'])
if line_style is not None:
if isinstance(line_style, str):
line_style = itertools.cycle([line_style])
elif hasattr(line_style, "__iter__"):
line_style = itertools.cycle(line_style)
else:
raise ValueError("line_style must be None, a valid matplotlib"
" line_style string or a list of valid matplotlib"
" line_style.")
else:
line_style = ['-'] * len(spectra)
if legend is not None:
if isinstance(legend, str):
if legend == 'auto':
legend = [spec.metadata.General.title for spec in spectra]
else:
raise ValueError("legend must be None, 'auto' or a list of"
" string")
elif hasattr(legend, "__iter__"):
legend = itertools.cycle(legend)
if style == 'overlap':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_overlap_plot(spectra,
ax,
color=color,
line_style=line_style,)
if legend is not None:
ax.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
if legend_picking is True:
animate_legend(fig=fig, ax=ax)
elif style == 'cascade':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_cascade_subplot(spectra,
ax,
color=color,
line_style=line_style,
padding=padding)
if legend is not None:
plt.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
elif style == 'mosaic':
default_fsize = plt.rcParams["figure.figsize"]
figsize = (default_fsize[0], default_fsize[1] * len(spectra))
fig, subplots = plt.subplots(
len(spectra), 1, figsize=figsize, **kwargs)
if legend is None:
legend = [legend] * len(spectra)
for spectrum, ax, color, line_style, legend in zip(
spectra, subplots, color, line_style, legend):
spectrum = _transpose_if_required(spectrum, 1)
_plot_spectrum(spectrum, ax, color=color, line_style=line_style)
ax.set_ylabel('Intensity')
if legend is not None:
ax.set_title(legend)
if not isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
if isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
fig.tight_layout()
elif style == 'heatmap':
if not isinstance(spectra, hyperspy.signal.BaseSignal):
import hyperspy.utils
spectra = [_transpose_if_required(spectrum, 1) for spectrum in
spectra]
spectra = hyperspy.utils.stack(spectra)
with spectra.unfolded():
ax = _make_heatmap_subplot(spectra)
ax.set_ylabel('Spectra')
ax = ax if style != "mosaic" else subplots
return ax
def animate_legend(fig=None, ax=None):
"""Animate the legend of a figure.
A spectrum can be toggle on and off by clicking on the legended line.
Parameters
----------
fig: None | matplotlib.figure
If None pick the current figure using "plt.gcf"
ax: None | matplotlib.axes
If None pick the current axes using "plt.gca".
Note
----
Code inspired from legend_picking.py in the matplotlib gallery
"""
if fig is None:
fig = plt.gcf()
if ax is None:
ax = plt.gca()
lines = ax.lines[::-1]
lined = dict()
leg = ax.get_legend()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
def onpick(event):
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
if legline.axes == ax:
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('pick_event', onpick)
def plot_histograms(signal_list,
bins='freedman',
range_bins=None,
color=None,
line_style=None,
legend='auto',
fig=None,
**kwargs):
"""Plot the histogram of every signal in the list in the same figure.
This function creates a histogram for each signal and plot the list with
the `utils.plot.plot_spectra` function.
Parameters
----------
signal_list : iterable
Ordered spectra list to plot. If `style` is "cascade" or "mosaic"
the spectra can have different size and axes.
bins : int or list or str, optional
If bins is a string, then it must be one of:
'knuth' : use Knuth's rule to determine bins
'scotts' : use Scott's rule to determine bins
'freedman' : use the Freedman-diaconis rule to determine bins
'blocks' : use bayesian blocks for dynamic bin widths
range_bins : tuple or None, optional.
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
color : valid matplotlib color or a list of them or `None`, optional.
Sets the color of the lines of the plots. If a list, if its length is
less than the number of spectra to plot, the colors will be cycled. If
If `None`, use default matplotlib color cycle.
line_style: valid matplotlib line style or a list of them or `None`,
optional.
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
legend: None or list of str or 'auto', optional.
Display a legend. If 'auto', the title of each spectra
(metadata.General.title) is used.
legend_picking: bool, optional.
If true, a spectrum can be toggle on and off by clicking on
the legended line.
fig : matplotlib figure or None, optional.
If None, a default figure will be created.
**kwargs
other keyword arguments (weight and density) are described in
np.histogram().
Example
-------
Histograms of two random chi-square distributions
>>> img = hs.signals.Signal2D(np.random.chisquare(1,[10,10,100]))
>>> img2 = hs.signals.Signal2D(np.random.chisquare(2,[10,10,100]))
>>> hs.plot.plot_histograms([img,img2],legend=['hist1','hist2'])
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
hists = []
for obj in signal_list:
hists.append(obj.get_histogram(bins=bins,
range_bins=range_bins, **kwargs))
if line_style is None:
line_style = 'steps'
return plot_spectra(hists, style='overlap', color=color,
line_style=line_style, legend=legend, fig=fig)
| gpl-3.0 | 685,807,880,072,156,300 | 36.986083 | 82 | 0.581131 | false |
rg3915/django-experience | djexperience/settings.py | 1 | 3763 | import os
from decouple import config, Csv
from dj_database_url import parse as dburl
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', default=[], cast=Csv())
# Application definition
INSTALLED_APPS = [
# my apps
'djexperience.core',
# default django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# thirty apps
'django_extensions',
'bootstrapform',
'widget_tweaks',
'daterange_filter',
'django_activeurl',
'import_export',
'django_tables2',
# my apps
'djexperience.bookstore',
'djexperience.company',
'djexperience.crm',
'djexperience.myemail',
'djexperience.product',
'djexperience.selling',
'djexperience.service',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djexperience.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djexperience.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
default_dburl = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
DATABASES = {
'default': config('DATABASE_URL', default=default_dburl, cast=dburl),
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL')
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
DECIMAL_SEPARATOR = ','
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
LOGIN_URL = '/admin/login/'
| mit | -8,715,243,961,084,812,000 | 25.687943 | 91 | 0.687749 | false |
linegpe/FYS3150 | Project4/expect_random_T1.py | 1 | 3161 | import numpy as np
import matplotlib.pyplot as plt
data1 = np.loadtxt("expect_random_T1.00.dat")
data2 = np.loadtxt("expect_ordered_T1.00.dat")
data3 = np.loadtxt("expect_random2_T2.40.dat")
data4 = np.loadtxt("expect_ordered2_T2.40.dat")
values1 = data1[0::1]
values2 = data2[0::1]
values3 = data3[0::1]
values4 = data4[0::1]
N1 = len(values1)
x1 = np.linspace(0,N1,N1)
N2 = len(values3)
x2 = np.linspace(0,N2,N2)
figure1 = plt.figure()
labels = figure1.add_subplot(111)
# Turn off axis lines and ticks of the big subplot
labels.spines['top'].set_color('none')
labels.spines['bottom'].set_color('none')
labels.spines['left'].set_color('none')
labels.spines['right'].set_color('none')
labels.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.ylabel("Mean energy per spin",fontsize=15)
#figure1.yaxis.set_ticks_position(right)
#figure1.ylabel.set_ticks_position('left')
#figure1.yaxis.tick_right()
fig1 = figure1.add_subplot(211)
fig1.plot(x1,values1[:,0],label="Random initial spins, T=1")
fig1.plot(x1,values2[:,0],label="Ordered initial spins, T=1")
fig1.tick_params(axis='x', labelsize=15) #HOW TO PUT THIS ON THE RIGHT SIDE?
fig1.tick_params(axis='y', labelsize=15)
fig1.yaxis.tick_right()
#plt.ylabel(r"$\langle E\rangle /L^2$",fontsize=17)
#plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
plt.axis([0,N1,-3,0])
#plt.show()
fig2 = figure1.add_subplot(212)
fig2.plot(x2,values3[:,0],label="Random initial spins, T=2.4")
fig2.plot(x2,values4[:,0],label="Ordered initial spins, T=2.4")
fig2.tick_params(axis='x', labelsize=15)
fig2.tick_params(axis='y', labelsize=15)
fig2.yaxis.tick_right()
#plt.ylabel(r"$\langle E\rangle /L^2$",fontsize=15)
#plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
plt.axis([0,50000,-2,-0.4])
plt.show()
figure2 = plt.figure()
labels = figure2.add_subplot(111)
labels.spines['top'].set_color('none')
labels.spines['bottom'].set_color('none')
labels.spines['left'].set_color('none')
labels.spines['right'].set_color('none')
labels.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.ylabel("Absolute magnetization per spin",fontsize=15)
fig1 = figure2.add_subplot(211)
fig1.plot(x1,values1[:,1],label="Random initial spins, T=1")
fig1.plot(x1,values2[:,1],label="Ordered initial spins, T=1")
fig1.tick_params(axis='x', labelsize=15)
fig1.tick_params(axis='y', labelsize=15)
fig1.yaxis.tick_right()
#fig2.ylabel(r"$abs(\langle M \rangle /L^2)$",fontsize=15)
#fig2.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
plt.axis([0,N1,0.2,1.6])
#plt.show()
fig2 = figure2.add_subplot(212)
fig2.plot(x2,values3[:,1],label="Random initial spins, T=2.4")
fig2.plot(x2,values4[:,1],label="Ordered initial spins, T=2.4")
fig2.tick_params(axis='x', labelsize=15)
fig2.tick_params(axis='y', labelsize=15)
fig2.yaxis.tick_right()
#plt.ylabel(r"$abs(\langle M\rangle / L^2)$",fontsize=15)
#plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
#plt.axis([0,8e6,-0.1,1.4])
plt.show() | gpl-3.0 | -8,583,681,277,502,563,000 | 28.830189 | 84 | 0.708637 | false |
shoyer/xray | xarray/backends/locks.py | 1 | 5397 | import multiprocessing
import threading
import weakref
from typing import Any, MutableMapping
try:
from dask.utils import SerializableLock
except ImportError:
# no need to worry about serializing the lock
SerializableLock = threading.Lock
try:
from dask.distributed import Lock as DistributedLock
except ImportError:
DistributedLock = None
# Locks used by multiple backends.
# Neither HDF5 nor the netCDF-C library are thread-safe.
HDF5_LOCK = SerializableLock()
NETCDFC_LOCK = SerializableLock()
_FILE_LOCKS = weakref.WeakValueDictionary() # type: MutableMapping[Any, threading.Lock] # noqa
def _get_threaded_lock(key):
try:
lock = _FILE_LOCKS[key]
except KeyError:
lock = _FILE_LOCKS[key] = threading.Lock()
return lock
def _get_multiprocessing_lock(key):
# TODO: make use of the key -- maybe use locket.py?
# https://github.com/mwilliamson/locket.py
del key # unused
return multiprocessing.Lock()
_LOCK_MAKERS = {
None: _get_threaded_lock,
'threaded': _get_threaded_lock,
'multiprocessing': _get_multiprocessing_lock,
'distributed': DistributedLock,
}
def _get_lock_maker(scheduler=None):
"""Returns an appropriate function for creating resource locks.
Parameters
----------
scheduler : str or None
Dask scheduler being used.
See Also
--------
dask.utils.get_scheduler_lock
"""
return _LOCK_MAKERS[scheduler]
def _get_scheduler(get=None, collection=None):
"""Determine the dask scheduler that is being used.
None is returned if no dask scheduler is active.
See also
--------
dask.base.get_scheduler
"""
try:
# dask 0.18.1 and later
from dask.base import get_scheduler
actual_get = get_scheduler(get, collection)
except ImportError:
try:
from dask.utils import effective_get
actual_get = effective_get(get, collection)
except ImportError:
return None
try:
from dask.distributed import Client
if isinstance(actual_get.__self__, Client):
return 'distributed'
except (ImportError, AttributeError):
try:
import dask.multiprocessing
if actual_get == dask.multiprocessing.get:
return 'multiprocessing'
else:
return 'threaded'
except ImportError:
return 'threaded'
def get_write_lock(key):
"""Get a scheduler appropriate lock for writing to the given resource.
Parameters
----------
key : str
Name of the resource for which to acquire a lock. Typically a filename.
Returns
-------
Lock object that can be used like a threading.Lock object.
"""
scheduler = _get_scheduler()
lock_maker = _get_lock_maker(scheduler)
return lock_maker(key)
def acquire(lock, blocking=True):
"""Acquire a lock, possibly in a non-blocking fashion.
Includes backwards compatibility hacks for old versions of Python, dask
and dask-distributed.
"""
if blocking:
# no arguments needed
return lock.acquire()
elif DistributedLock is not None and isinstance(lock, DistributedLock):
# distributed.Lock doesn't support the blocking argument yet:
# https://github.com/dask/distributed/pull/2412
return lock.acquire(timeout=0)
else:
# "blocking" keyword argument not supported for:
# - threading.Lock on Python 2.
# - dask.SerializableLock with dask v1.0.0 or earlier.
# - multiprocessing.Lock calls the argument "block" instead.
return lock.acquire(blocking)
class CombinedLock:
"""A combination of multiple locks.
Like a locked door, a CombinedLock is locked if any of its constituent
locks are locked.
"""
def __init__(self, locks):
self.locks = tuple(set(locks)) # remove duplicates
def acquire(self, blocking=True):
return all(acquire(lock, blocking=blocking) for lock in self.locks)
def release(self):
for lock in self.locks:
lock.release()
def __enter__(self):
for lock in self.locks:
lock.__enter__()
def __exit__(self, *args):
for lock in self.locks:
lock.__exit__(*args)
def locked(self):
return any(lock.locked for lock in self.locks)
def __repr__(self):
return "CombinedLock(%r)" % list(self.locks)
class DummyLock:
"""DummyLock provides the lock API without any actual locking."""
def acquire(self, blocking=True):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def locked(self):
return False
def combine_locks(locks):
"""Combine a sequence of locks into a single lock."""
all_locks = []
for lock in locks:
if isinstance(lock, CombinedLock):
all_locks.extend(lock.locks)
elif lock is not None:
all_locks.append(lock)
num_locks = len(all_locks)
if num_locks > 1:
return CombinedLock(all_locks)
elif num_locks == 1:
return all_locks[0]
else:
return DummyLock()
def ensure_lock(lock):
"""Ensure that the given object is a lock."""
if lock is None or lock is False:
return DummyLock()
return lock
| apache-2.0 | -7,480,206,169,277,232,000 | 24.578199 | 96 | 0.631277 | false |
brahle/eval2 | scripts/haski/actions/reviewaction.py | 1 | 1578 | #!/usr/bin/env python3.2
# Copyright 2011 Bruno Rahle
#
# This file is part of Evaluator.
#
# Evaluator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Evaluator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Evaluator. If not, see
# <http://www.gnu.org/licenses/>.
from actions.baseaction import BaseHaskiAction
import argparse
class ReviewAction(BaseHaskiAction):
"""This class is the class that does linting work.
"""
RB_ID_STR = 'reviewboard id'
def __call__(self, params):
"""Fetches the desired revision and then sends it to reviewboard.
"""
commit = self.get_commit(params)
if not params.skip_lint:
commit.lint(params)
rb_id = commit.review()
if params.revision != 'HEAD':
if self.RB_ID_STR not in commit.message.fields:
print('[WARNING] Please edit the message to incorporate',
'`ReviewBoardID` field.')
else:
commit.message.set_field(self.RB_ID_STR, rb_id)
commit.amend()
def main():
pass
if __name__ == '__main__':
main()
| agpl-3.0 | -8,740,087,895,962,332,000 | 31.204082 | 73 | 0.665399 | false |
pelodelfuego/word2vec-toolbox | toolbox/mlLib/conceptPairFeature.py | 1 | 4358 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import __init__
import numpy as np
from scipy.weave import inline
from sklearn.ensemble import RandomForestClassifier
import cpLib.concept as cp
import utils.skUtils as sku
# PROJECTION
def projCosSim(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float norm_v1 = 0.0;
float norm_v2 = 0.0;
float dot_pdt = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dot_pdt += v1[j] * v2[j];
norm_v1 += v1[j] * v1[j];
norm_v2 += v2[j] * v2[j];
}
}
norm_v1 = sqrtf(norm_v1);
norm_v2 = sqrtf(norm_v2);
arr[i] = dot_pdt / norm_v1 / norm_v2;
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
def projEuclDist(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float dist = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dist += pow(v1[j] - v2[j], 2);
}
}
arr[i] = sqrt(dist);
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
def projManaDist(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float dist = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dist += fabs(v1[i] - v2[i]);
}
}
arr[i] = dist;
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
# COMMUTATIVE FEATURE
def subCarth(conceptPair):
return conceptPair[2].vect - conceptPair[0].vect
def subPolar(conceptPair):
return conceptPair[2].polarVect() - conceptPair[0].polarVect()
def subAngular(conceptPair):
return conceptPair[2].angularVect() - conceptPair[0].angularVect()
def concatCarth(conceptPair):
return np.concatenate((conceptPair[0].vect, conceptPair[2].vect))
def concatPolar(conceptPair):
return np.concatenate((conceptPair[0].polarVect(), conceptPair[2].polarVect()))
def concatAngular(conceptPair):
return np.concatenate((conceptPair[0].angularVect(), conceptPair[2].angularVect()))
# NON COMMUATIVE FEATURE
# PROJECTION SIMILARITY
def pCosSim(conceptPair):
return projCosSim(conceptPair[0], conceptPair[2])
def pEuclDist(conceptPair):
return projEuclDist(conceptPair[0], conceptPair[2])
def pManaDist(conceptPair):
return projManaDist(conceptPair[0], conceptPair[2])
# PROJECTION DISSIMILARITY
def _projectionDissimarilty(projectionMetric, globalMetric, conceptPair):
projectedFeature = projectionMetric(conceptPair[0], conceptPair[2])
globalFeature = globalMetric(conceptPair[0], conceptPair[2])
return np.array([(globalFeature - v) for v in projectedFeature])
def pdCosSim(conceptPair):
return _projectionDissimarilty(projCosSim, cp.cosSim, conceptPair)
def pdEuclDist(conceptPair):
return _projectionDissimarilty(projEuclDist, cp.euclDist, conceptPair)
def pdManaDist(conceptPair):
return _projectionDissimarilty(projManaDist, cp.manaDist, conceptPair)
# CLF
class ConceptPairClf(object):
def __init__(self, clf, featureExtractionFct):
self.clf = clf
self.featureExtractionFct = featureExtractionFct
def fit(self, X, y):
self.clf.fit([self.featureExtractionFct(x) for x in X], y)
self.classes_ = self.clf.classes_
def predict(self, X):
return self.clf.predict([self.featureExtractionFct(x) for x in X])
def predict_proba(self, X):
return self.clf.predict_proba([self.featureExtractionFct(x) for x in X])
| gpl-3.0 | -1,004,693,826,859,406,600 | 26.2375 | 91 | 0.562184 | false |
skodapetr/lbvs-environment | scripts/libs/core.py | 1 | 1664 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import csv
import os
import logging
import gzip
__license__ = "X11"
def init_logging():
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] - %(message)s',
datefmt='%H:%M:%S')
def create_directory(path):
if not os.path.exists(path) and not path == "":
os.makedirs(path)
def create_parent_directory(path):
parent_directory = os.path.dirname(path)
if not os.path.exists(parent_directory) and not parent_directory == "":
os.makedirs(parent_directory)
def read_json(path):
if path.endswith(".gz"):
with gzip.open(path, "rt") as stream:
return json.load(stream)
else:
with open(path, "r") as stream:
return json.load(stream)
def write_json(path, object_to_write):
create_parent_directory(path)
if path.endswith(".gz"):
with gzip.open(path, "wt") as stream:
json.dump(object_to_write, stream, indent=2)
else:
with open(path, "w") as stream:
json.dump(object_to_write, stream, indent=2)
def read_csv_as_object(path):
"""
Read CSV lines as objects.
"""
results = []
with open(path) as stream:
reader = csv.reader(stream, delimiter=",", quotechar='"')
header = next(reader)
for row in reader:
new_object = {}
for index in range(0, len(row)):
new_object[header[index]] = row[index]
results.append(new_object)
return results
if __name__ == "__main__":
raise Exception("This module can be used only as a library!")
| mit | 453,216,932,470,209,800 | 23.470588 | 75 | 0.590144 | false |
kaniblu/hangul-utils | hangul_utils/unicode.py | 1 | 8775 | __all__ = ["split_syllable_char", "split_syllables",
"join_jamos", "join_jamos_char",
"CHAR_INITIALS", "CHAR_MEDIALS", "CHAR_FINALS"]
import itertools
INITIAL = 0x001
MEDIAL = 0x010
FINAL = 0x100
CHAR_LISTS = {
INITIAL: list(map(chr, [
0x3131, 0x3132, 0x3134, 0x3137, 0x3138, 0x3139,
0x3141, 0x3142, 0x3143, 0x3145, 0x3146, 0x3147,
0x3148, 0x3149, 0x314a, 0x314b, 0x314c, 0x314d,
0x314e
])),
MEDIAL: list(map(chr, [
0x314f, 0x3150, 0x3151, 0x3152, 0x3153, 0x3154,
0x3155, 0x3156, 0x3157, 0x3158, 0x3159, 0x315a,
0x315b, 0x315c, 0x315d, 0x315e, 0x315f, 0x3160,
0x3161, 0x3162, 0x3163
])),
FINAL: list(map(chr, [
0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136,
0x3137, 0x3139, 0x313a, 0x313b, 0x313c, 0x313d,
0x313e, 0x313f, 0x3140, 0x3141, 0x3142, 0x3144,
0x3145, 0x3146, 0x3147, 0x3148, 0x314a, 0x314b,
0x314c, 0x314d, 0x314e
]))
}
CHAR_INITIALS = CHAR_LISTS[INITIAL]
CHAR_MEDIALS = CHAR_LISTS[MEDIAL]
CHAR_FINALS = CHAR_LISTS[FINAL]
CHAR_SETS = {k: set(v) for k, v in CHAR_LISTS.items()}
CHARSET = set(itertools.chain(*CHAR_SETS.values()))
CHAR_INDICES = {k: {c: i for i, c in enumerate(v)}
for k, v in CHAR_LISTS.items()}
def is_hangul_syllable(c):
return 0xac00 <= ord(c) <= 0xd7a3 # Hangul Syllables
def is_hangul_jamo(c):
return 0x1100 <= ord(c) <= 0x11ff # Hangul Jamo
def is_hangul_compat_jamo(c):
return 0x3130 <= ord(c) <= 0x318f # Hangul Compatibility Jamo
def is_hangul_jamo_exta(c):
return 0xa960 <= ord(c) <= 0xa97f # Hangul Jamo Extended-A
def is_hangul_jamo_extb(c):
return 0xd7b0 <= ord(c) <= 0xd7ff # Hangul Jamo Extended-B
def is_hangul(c):
return (is_hangul_syllable(c) or
is_hangul_jamo(c) or
is_hangul_compat_jamo(c) or
is_hangul_jamo_exta(c) or
is_hangul_jamo_extb(c))
def is_supported_hangul(c):
return is_hangul_syllable(c) or is_hangul_compat_jamo(c)
def check_hangul(c, jamo_only=False):
if not ((jamo_only or is_hangul_compat_jamo(c)) or is_supported_hangul(c)):
raise ValueError(f"'{c}' is not a supported hangul character. "
f"'Hangul Syllables' (0xac00 ~ 0xd7a3) and "
f"'Hangul Compatibility Jamos' (0x3130 ~ 0x318f) are "
f"supported at the moment.")
def get_jamo_type(c):
check_hangul(c)
assert is_hangul_compat_jamo(c), f"not a jamo: {ord(c):x}"
return sum(t for t, s in CHAR_SETS.items() if c in s)
def split_syllable_char(c):
"""
Splits a given korean syllable into its components. Each component is
represented by Unicode in 'Hangul Compatibility Jamo' range.
Arguments:
c: A Korean character.
Returns:
A triple (initial, medial, final) of Hangul Compatibility Jamos.
If no jamo corresponds to a position, `None` is returned there.
Example:
>>> split_syllable_char("안")
("ㅇ", "ㅏ", "ㄴ")
>>> split_syllable_char("고")
("ㄱ", "ㅗ", None)
>>> split_syllable_char("ㅗ")
(None, "ㅗ", None)
>>> split_syllable_char("ㅇ")
("ㅇ", None, None)
"""
check_hangul(c)
if len(c) != 1:
raise ValueError("Input string must have exactly one character.")
init, med, final = None, None, None
if is_hangul_syllable(c):
offset = ord(c) - 0xac00
x = (offset - offset % 28) // 28
init, med, final = x // 21, x % 21, offset % 28
if not final:
final = None
else:
final -= 1
else:
pos = get_jamo_type(c)
if pos & INITIAL == INITIAL:
pos = INITIAL
elif pos & MEDIAL == MEDIAL:
pos = MEDIAL
elif pos & FINAL == FINAL:
pos = FINAL
idx = CHAR_INDICES[pos][c]
if pos == INITIAL:
init = idx
elif pos == MEDIAL:
med = idx
else:
final = idx
return tuple(CHAR_LISTS[pos][idx] if idx is not None else None
for pos, idx in
zip([INITIAL, MEDIAL, FINAL], [init, med, final]))
def split_syllables(s, ignore_err=True, pad=None):
"""
Performs syllable-split on a string.
Arguments:
s (str): A string (possibly mixed with non-Hangul characters).
ignore_err (bool): If set False, it ensures that all characters in
the string are Hangul-splittable and throws a ValueError otherwise.
(default: True)
pad (str): Pad empty jamo positions (initial, medial, or final) with
`pad` character. This is useful for cases where fixed-length
strings are needed. (default: None)
Returns:
Hangul-split string
Example:
>>> split_syllables("안녕하세요")
"ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ"
>>> split_syllables("안녕하세요~~", ignore_err=False)
ValueError: encountered an unsupported character: ~ (0x7e)
>>> split_syllables("안녕하세요ㅛ", pad="x")
'ㅇㅏㄴㄴㅕㅇㅎㅏxㅅㅔxㅇㅛxxㅛx'
"""
def try_split(c):
try:
return split_syllable_char(c)
except ValueError:
if ignore_err:
return (c,)
raise ValueError(f"encountered an unsupported character: "
f"{c} (0x{ord(c):x})")
s = map(try_split, s)
if pad is not None:
tuples = map(lambda x: tuple(pad if y is None else y for y in x), s)
else:
tuples = map(lambda x: filter(None, x), s)
return "".join(itertools.chain(*tuples))
def join_jamos_char(init, med, final=None):
"""
Combines jamos into a single syllable.
Arguments:
init (str): Initial jao.
med (str): Medial jamo.
final (str): Final jamo. If not supplied, the final syllable is made
without the final. (default: None)
Returns:
A Korean syllable.
"""
chars = (init, med, final)
for c in filter(None, chars):
check_hangul(c, jamo_only=True)
idx = tuple(CHAR_INDICES[pos][c] if c is not None else c
for pos, c in zip((INITIAL, MEDIAL, FINAL), chars))
init_idx, med_idx, final_idx = idx
# final index must be shifted once as
# final index with 0 points to syllables without final
final_idx = 0 if final_idx is None else final_idx + 1
return chr(0xac00 + 28 * 21 * init_idx + 28 * med_idx + final_idx)
def join_jamos(s, ignore_err=True):
"""
Combines a sequence of jamos to produce a sequence of syllables.
Arguments:
s (str): A string (possible mixed with non-jamo characters).
ignore_err (bool): If set False, it will ensure that all characters
will be consumed for the making of syllables. It will throw a
ValueError when it fails to do so. (default: True)
Returns:
A string
Example:
>>> join_jamos("ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ")
"안녕하세요"
>>> join_jamos("ㅇㅏㄴㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ")
"안ㄴ녕하세요"
>>> join_jamos()
"""
last_t = 0
queue = []
new_string = ""
def flush(n=0):
new_queue = []
while len(queue) > n:
new_queue.append(queue.pop())
if len(new_queue) == 1:
if not ignore_err:
raise ValueError(f"invalid jamo character: {new_queue[0]}")
result = new_queue[0]
elif len(new_queue) >= 2:
try:
result = join_jamos_char(*new_queue)
except (ValueError, KeyError):
# Invalid jamo combination
if not ignore_err:
raise ValueError(f"invalid jamo characters: {new_queue}")
result = "".join(new_queue)
else:
result = None
return result
for c in s:
if c not in CHARSET:
if queue:
new_c = flush() + c
else:
new_c = c
last_t = 0
else:
t = get_jamo_type(c)
new_c = None
if t & FINAL == FINAL:
if not (last_t == MEDIAL):
new_c = flush()
elif t == INITIAL:
new_c = flush()
elif t == MEDIAL:
if last_t & INITIAL == INITIAL:
new_c = flush(1)
else:
new_c = flush()
last_t = t
queue.insert(0, c)
if new_c:
new_string += new_c
if queue:
new_string += flush()
return new_string
| gpl-3.0 | 3,778,202,042,101,361,700 | 29.820789 | 79 | 0.551227 | false |
drnextgis/QGIS | python/plugins/processing/core/parameters.py | 1 | 55397 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Parameters.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
from builtins import range
from builtins import object
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import sys
import os
import math
from inspect import isclass
from copy import deepcopy
import numbers
from qgis.utils import iface
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsRasterLayer, QgsVectorLayer, QgsMapLayer, QgsCoordinateReferenceSystem,
QgsExpressionContext, QgsExpressionContextUtils, QgsExpression, QgsExpressionContextScope)
from processing.tools.vector import resolveFieldIndex, features
from processing.tools import dataobjects
from processing.core.outputs import OutputNumber, OutputRaster, OutputVector
from processing.tools.dataobjects import getObject
def parseBool(s):
if s is None or s == str(None).lower():
return None
return str(s).lower() == str(True).lower()
def _splitParameterOptions(line):
tokens = line.split('=', 1)
if tokens[1].lower().strip().startswith('optional'):
isOptional = True
definition = tokens[1].strip()[len('optional') + 1:]
else:
isOptional = False
definition = tokens[1]
return isOptional, tokens[0], definition
def _createDescriptiveName(s):
return s.replace('_', ' ')
def _expressionContext():
context = QgsExpressionContext()
context.appendScope(QgsExpressionContextUtils.globalScope())
context.appendScope(QgsExpressionContextUtils.projectScope())
if iface.mapCanvas():
context.appendScope(QgsExpressionContextUtils.mapSettingsScope(iface.mapCanvas().mapSettings()))
processingScope = QgsExpressionContextScope()
extent = iface.mapCanvas().fullExtent()
processingScope.setVariable('fullextent_minx', extent.xMinimum())
processingScope.setVariable('fullextent_miny', extent.yMinimum())
processingScope.setVariable('fullextent_maxx', extent.xMaximum())
processingScope.setVariable('fullextent_maxy', extent.yMaximum())
context.appendScope(processingScope)
return context
def _resolveLayers(value):
layers = dataobjects.getAllLayers()
if value:
inputlayers = value.split(';')
for i, inputlayer in enumerate(inputlayers):
for layer in layers:
if layer.name() == inputlayer:
inputlayers[i] = layer.source()
break
return ";".join(inputlayers)
class Parameter(object):
"""
Base class for all parameters that a geoalgorithm might
take as input.
"""
default_metadata = {}
def __init__(self, name='', description='', default=None, optional=False,
metadata={}):
self.name = name
self.description = description
self.default = default
self.value = default
self.isAdvanced = False
# A hidden parameter can be used to set a hard-coded value.
# It can be used as any other parameter, but it will not be
# shown to the user
self.hidden = False
self.optional = parseBool(optional)
# TODO: make deep copy and deep update
self.metadata = deepcopy(self.default_metadata)
self.metadata.update(deepcopy(metadata))
def setValue(self, obj):
"""
Sets the value of the parameter.
Returns true if the value passed is correct for the type
of parameter.
"""
if obj is None:
if not self.optional:
return False
self.value = None
return True
self.value = str(obj)
return True
def setDefaultValue(self):
"""
Sets the value of the parameter to the default one
Returns true if the default value is correct for the type
of parameter.
"""
return self.setValue(self.default)
def __str__(self):
return u'{} <{}>'.format(self.name, self.__class__.__name__)
def getValueAsCommandLineParameter(self):
"""
Returns the value of this parameter as it should have been
entered in the console if calling an algorithm using the
Processing.runalg() method.
"""
return str(self.value)
def typeName(self):
return self.__class__.__name__.replace('Parameter', '').lower()
def todict(self):
o = deepcopy(self.__dict__)
del o['metadata']
return o
def tr(self, string, context=''):
if context == '':
context = 'Parameter'
return QCoreApplication.translate(context, string)
def wrapper(self, dialog, row=0, col=0):
wrapper = self.metadata.get('widget_wrapper', None)
# wrapper metadata should be a class path
if isinstance(wrapper, str):
tokens = wrapper.split('.')
mod = __import__('.'.join(tokens[:-1]), fromlist=[tokens[-1]])
wrapper = getattr(mod, tokens[-1])
# or directly a class object
if isclass(wrapper):
wrapper = wrapper(self, dialog, row, col)
# or a wrapper instance
return wrapper
def evaluate(self, alg):
pass
def evaluateForModeler(self, value, model):
return value
class ParameterBoolean(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.BooleanWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False, metadata={}):
Parameter.__init__(self, name, description, parseBool(default), optional, metadata)
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(value, str):
self.value = str(value).lower() == str(True).lower()
else:
self.value = bool(value)
return True
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'boolean '
return '##' + self.name + '=' + param_type + str(self.default)
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("boolean"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('boolean') + 1:]
if default:
param = ParameterBoolean(name, descName, default)
else:
param = ParameterBoolean(name, descName)
param.optional = isOptional
return param
class ParameterCrs(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.CrsWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False, metadata={}):
'''The value is a string that uniquely identifies the
coordinate reference system. Typically it is the auth id of the CRS
(if the authority is EPSG) or proj4 string of the CRS (in case
of other authorities or user defined projections).'''
Parameter.__init__(self, name, description, default, optional, metadata)
def setValue(self, value):
if not bool(value):
if not self.optional:
return False
self.value = None
return True
if isinstance(value, QgsCoordinateReferenceSystem):
self.value = value.authid()
return True
if isinstance(value, QgsMapLayer):
self.value = value.crs().authid()
return True
try:
layer = dataobjects.getObjectFromUri(value)
if layer is not None:
self.value = layer.crs().authid()
return True
except:
pass
# TODO: check it is a valid authid
self.value = value
return True
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'crs '
return '##' + self.name + '=' + param_type + str(self.default)
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("crs"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('crs') + 1:]
if default:
return ParameterCrs(name, descName, default, isOptional)
else:
return ParameterCrs(name, descName, None, isOptional)
class ParameterDataObject(Parameter):
def getValueAsCommandLineParameter(self):
if self.value is None:
return str(None)
else:
s = dataobjects.normalizeLayerSource(str(self.value))
s = '"%s"' % s
return s
def evaluate(self, alg):
self.value = _resolveLayers(self.value)
class ParameterExtent(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.ExtentWidgetWrapper'
}
USE_MIN_COVERING_EXTENT = 'USE_MIN_COVERING_EXTENT'
def __init__(self, name='', description='', default=None, optional=True):
Parameter.__init__(self, name, description, default, optional)
# The value is a string in the form "xmin, xmax, ymin, ymax"
def setValue(self, value):
if not value:
if not self.optional:
return False
self.value = None
return True
if isinstance(value, QgsMapLayer):
rect = value.extent()
self.value = '{},{},{},{}'.format(
rect.xMinimum(), rect.xMaximum(), rect.yMinimum(), rect.yMaximum())
return True
try:
layer = dataobjects.getObjectFromUri(value)
if layer is not None:
rect = layer.extent()
self.value = '{},{},{},{}'.format(
rect.xMinimum(), rect.xMaximum(), rect.yMinimum(), rect.yMaximum())
return True
except:
pass
tokens = str(value).split(',')
if len(tokens) != 4:
return False
try:
float(tokens[0])
float(tokens[1])
float(tokens[2])
float(tokens[3])
self.value = value
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'extent'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("extent"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('extent') + 1:] or None
return ParameterExtent(name, descName, default, isOptional)
def evaluate(self, alg):
if self.optional and not bool(self.value):
self.value = self.getMinCoveringExtent(alg)
def getMinCoveringExtent(self, alg):
first = True
found = False
for param in alg.parameters:
if param.value:
if isinstance(param, (ParameterRaster, ParameterVector)):
if isinstance(param.value, (QgsRasterLayer,
QgsVectorLayer)):
layer = param.value
else:
layer = dataobjects.getObject(param.value)
if layer:
found = True
self.addToRegion(layer, first)
first = False
elif isinstance(param, ParameterMultipleInput):
layers = param.value.split(';')
for layername in layers:
layer = dataobjects.getObject(layername)
if layer:
found = True
self.addToRegion(layer, first)
first = False
if found:
return '{},{},{},{}'.format(
self.xmin, self.xmax, self.ymin, self.ymax)
else:
return None
def addToRegion(self, layer, first):
if first:
self.xmin = layer.extent().xMinimum()
self.xmax = layer.extent().xMaximum()
self.ymin = layer.extent().yMinimum()
self.ymax = layer.extent().yMaximum()
else:
self.xmin = min(self.xmin, layer.extent().xMinimum())
self.xmax = max(self.xmax, layer.extent().xMaximum())
self.ymin = min(self.ymin, layer.extent().yMinimum())
self.ymax = max(self.ymax, layer.extent().yMaximum())
class ParameterPoint(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.PointWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
# The value is a string in the form "x, y"
def setValue(self, text):
if text is None:
if not self.optional:
return False
self.value = None
return True
tokens = str(text).split(',')
if len(tokens) != 2:
return False
try:
float(tokens[0])
float(tokens[1])
self.value = text
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'point'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("point"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('point') + 1:] or None
return ParameterPoint(name, descName, default, isOptional)
class ParameterFile(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.FileWidgetWrapper'
}
def __init__(self, name='', description='', isFolder=False, optional=True, ext=None):
Parameter.__init__(self, name, description, None, parseBool(optional))
self.ext = ext
self.isFolder = parseBool(isFolder)
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def setValue(self, obj):
if obj is None or obj.strip() == '':
if not self.optional:
return False
self.value = None if obj is None else obj.strip()
return True
if self.ext is not None and obj != '' and not obj.endswith(self.ext):
return False
self.value = str(obj)
return True
def typeName(self):
if self.isFolder:
return 'directory'
else:
return 'file'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
if self.isFolder:
param_type += 'folder'
else:
param_type += 'file'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("file") or definition.startswith("folder"):
descName = _createDescriptiveName(name)
return ParameterFile(name, descName, definition.startswith("folder"), isOptional)
class ParameterFixedTable(Parameter):
def __init__(self, name='', description='', numRows=3,
cols=['value'], fixedNumOfRows=False, optional=False):
Parameter.__init__(self, name, description, None, optional)
self.cols = cols
if isinstance(cols, str):
self.cols = self.cols.split(";")
self.numRows = int(numRows)
self.fixedNumOfRows = parseBool(fixedNumOfRows)
def setValue(self, obj):
if obj is None:
if not self.optional:
return False
self.value = None
return True
# TODO: check that it contains a correct number of elements
if isinstance(obj, str):
self.value = obj
else:
self.value = ParameterFixedTable.tableToString(obj)
return True
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
@staticmethod
def tableToString(table):
tablestring = ''
for i in range(len(table)):
for j in range(len(table[0])):
tablestring = tablestring + table[i][j] + ','
tablestring = tablestring[:-1]
return tablestring
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("point"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('point') + 1:] or None
return ParameterPoint(name, descName, default, isOptional)
class ParameterMultipleInput(ParameterDataObject):
"""A parameter representing several data objects.
Its value is a string with substrings separated by semicolons,
each of which represents the data source location of each element.
"""
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.MultipleInputWidgetWrapper'
}
exported = None
def __init__(self, name='', description='', datatype=-1, optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
self.datatype = int(float(datatype))
self.exported = None
self.minNumInputs = 0
""" Set minimum required number of inputs for parameter
By default minimal number of inputs is set to 1
@type _minNumInputs: numeric type or None
@param _minNumInputs: required minimum number of inputs for parameter. \
If user will pass None as parameter, we will use default minimal number of inputs (1)
@return: result, if the minimum number of inputs were set.
"""
def setMinNumInputs(self, _minNumInputs):
if _minNumInputs is None:
self.minNumInputs = 0
return True
if _minNumInputs < 1 and not self.optional:
# don't allow to set negative or null number of inputs if parameter isn't optional
return False
self.minNumInputs = int(_minNumInputs)
return True
""" Get minimum required number of inputs for parameter
@return: minimum number of inputs required for this parameter
@see: setMinNumInputs()
"""
def getMinNumInputs(self):
return self.minNumInputs
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, list):
if len(obj) == 0:
if self.optional:
self.value = None
return True
else:
return False
# prevent setting value if we didn't provide required minimal number of inputs
elif len(obj) < self.minNumInputs:
return False
self.value = ";".join([self.getAsString(lay) for lay in obj])
return True
else:
self.value = str(obj)
return True
def getSafeExportedLayers(self):
"""
Returns not the value entered by the user, but a string with
semicolon-separated filenames which contains the data of the
selected layers, but saved in a standard format (currently
shapefiles for vector layers and GeoTiff for raster) so that
they can be opened by most external applications.
If there is a selection and QGIS is configured to use just the
selection, it exports the layer even if it is already in a
suitable format.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does no export at all and returns that value.
Currently, it works just for vector layer. In the case of
raster layers, it returns the parameter value.
The layers are exported just the first time the method is
called. The method can be called several times and it will
always return the same string, performing the export only the
first time.
"""
if self.exported:
return self.exported
self.exported = self.value
layers = self.value.split(';')
if layers is None or len(layers) == 0:
return self.value
if self.datatype == dataobjects.TYPE_RASTER:
for layerfile in layers:
layer = dataobjects.getObjectFromUri(layerfile, False)
if layer:
filename = dataobjects.exportRasterLayer(layer)
self.exported = self.exported.replace(layerfile, filename)
return self.exported
elif self.datatype == dataobjects.TYPE_FILE:
return self.value
else:
for layerfile in layers:
layer = dataobjects.getObjectFromUri(layerfile, False)
if layer:
filename = dataobjects.exportVectorLayer(layer)
self.exported = self.exported.replace(layerfile, filename)
return self.exported
def getAsString(self, value):
if self.datatype == dataobjects.TYPE_RASTER:
if isinstance(value, QgsRasterLayer):
return str(value.dataProvider().dataSourceUri())
else:
s = str(value)
layers = dataobjects.getRasterLayers()
for layer in layers:
if layer.name() == s:
return str(layer.dataProvider().dataSourceUri())
return s
if self.datatype == dataobjects.TYPE_FILE:
return str(value)
else:
if isinstance(value, QgsVectorLayer):
return str(value.source())
else:
s = str(value)
layers = dataobjects.getVectorLayers([self.datatype])
for layer in layers:
if layer.name() == s:
return str(layer.source())
return s
def getFileFilter(self):
if self.datatype == dataobjects.TYPE_RASTER:
exts = dataobjects.getSupportedOutputRasterLayerExtensions()
elif self.datatype == dataobjects.TYPE_FILE:
return self.tr('All files (*.*)', 'ParameterMultipleInput')
else:
exts = dataobjects.getSupportedOutputVectorLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterMultipleInput') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def dataType(self):
if self.datatype == dataobjects.TYPE_VECTOR_POINT:
return 'points'
elif self.datatype == dataobjects.TYPE_VECTOR_LINE:
return 'lines'
elif self.datatype == dataobjects.TYPE_VECTOR_POLYGON:
return 'polygons'
elif self.datatype == dataobjects.TYPE_RASTER:
return 'rasters'
elif self.datatype == dataobjects.TYPE_FILE:
return 'files'
else:
return 'any vectors'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
if self.datatype == dataobjects.TYPE_RASTER:
param_type += 'multiple raster'
if self.datatype == dataobjects.TYPE_FILE:
param_type += 'multiple file'
else:
param_type += 'multiple vector'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip() == 'multiple raster':
return ParameterMultipleInput(name, descName,
dataobjects.TYPE_RASTER, isOptional)
elif definition.lower().strip() == 'multiple vector':
return ParameterMultipleInput(name, definition,
dataobjects.TYPE_VECTOR_ANY, isOptional)
def evaluate(self, alg):
self.value = _resolveLayers(self.value)
class ParameterNumber(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.NumberWidgetWrapper'
}
def __init__(self, name='', description='', minValue=None, maxValue=None,
default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
if default is not None:
try:
self.default = int(str(default))
self.isInteger = True
except ValueError:
self.default = float(default)
self.isInteger = False
else:
self.isInteger = False
if minValue is not None:
self.min = int(float(minValue)) if self.isInteger else float(minValue)
else:
self.min = None
if maxValue is not None:
self.max = int(float(maxValue)) if self.isInteger else float(maxValue)
else:
self.max = None
self.value = self.default
def setValue(self, n):
if n is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(n, str):
try:
v = self._evaluate(n)
self.value = float(v)
if self.isInteger:
self.value = int(math.floor(self.value))
return True
except:
return False
else:
try:
if float(n) - int(float(n)) == 0:
value = int(float(n))
else:
value = float(n)
if self.min is not None:
if value < self.min:
return False
if self.max is not None:
if value > self.max:
return False
self.value = value
return True
except:
raise
return False
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'number'
code = '##' + self.name + '=' + param_type
if self.default:
code += str(self.default)
return code
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('number'):
default = definition.strip()[len('number'):] or None
if default == 'None':
default = None
return ParameterNumber(name, descName, default=default, optional=isOptional)
def _evaluate(self, value):
exp = QgsExpression(value)
if exp.hasParserError():
raise ValueError(self.tr("Error in parameter expression: ") + exp.parserErrorString())
result = exp.evaluate(_expressionContext())
if exp.hasEvalError():
raise ValueError("Error evaluating parameter expression: " + exp.evalErrorString())
if self.isInteger:
return math.floor(result)
else:
return result
def evaluate(self, alg):
if isinstance(self.value, str) and bool(self.value):
self.value = self._evaluate(self.value)
def _layerVariables(self, element, alg=None):
variables = {}
layer = getObject(element.value)
if layer is not None:
name = element.name if alg is None else "%s_%s" % (alg.name, element.name)
variables['@%s_minx' % name] = layer.extent().xMinimum()
variables['@%s_miny' % name] = layer.extent().yMinimum()
variables['@%s_maxx' % name] = layer.extent().yMaximum()
variables['@%s_maxy' % name] = layer.extent().yMaximum()
if isinstance(element, (ParameterRaster, OutputRaster)):
stats = layer.dataProvider().bandStatistics(1)
variables['@%s_avg' % name] = stats.mean
variables['@%s_stddev' % name] = stats.stdDev
variables['@%s_min' % name] = stats.minimumValue
variables['@%s_max' % name] = stats.maximumValue
return variables
def evaluateForModeler(self, value, model):
if isinstance(value, numbers.Number):
return value
variables = {}
for param in model.parameters:
if isinstance(param, ParameterNumber):
variables["@" + param.name] = param.value
if isinstance(param, (ParameterRaster, ParameterVector)):
variables.update(self._layerVariables(param))
for alg in list(model.algs.values()):
for out in alg.algorithm.outputs:
if isinstance(out, OutputNumber):
variables["@%s_%s" % (alg.name, out.name)] = out.value
if isinstance(out, (OutputRaster, OutputVector)):
variables.update(self._layerVariables(out, alg))
for k, v in list(variables.items()):
value = value.replace(k, str(v))
return value
def expressionContext(self):
return _expressionContext()
def getValueAsCommandLineParameter(self):
if self.value is None:
return str(None)
if isinstance(self.value, str):
return '"%s"' + self.value
return str(self.value)
class ParameterRange(Parameter):
def __init__(self, name='', description='', default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
if default is not None:
values = default.split(',')
try:
int(values[0])
int(values[1])
self.isInteger = True
except:
self.isInteger = False
else:
self.isInteger = False
def setValue(self, text):
if text is None:
if not self.optional:
return False
self.value = None
return True
tokens = text.split(',')
if len(tokens) != 2:
return False
try:
float(tokens[0])
float(tokens[1])
self.value = text
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"' if self.value is not None else str(None)
class ParameterRaster(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.RasterWidgetWrapper'
}
def __init__(self, name='', description='', optional=False, showSublayersDialog=True):
ParameterDataObject.__init__(self, name, description, None, optional)
self.showSublayersDialog = parseBool(showSublayersDialog)
self.exported = None
def getSafeExportedLayer(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this layer, but saved in
a standard format (currently always a geotiff file) so that it
can be opened by most external applications.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does not export at all and returns that value.
The layer is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
layer = dataobjects.getObjectFromUri(self.value, False)
if layer:
self.exported = dataobjects.exportRasterLayer(layer)
else:
self.exported = self.value
return self.exported
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsRasterLayer):
self.value = str(obj.dataProvider().dataSourceUri())
return True
else:
self.value = str(obj)
return True
def getFileFilter(self):
exts = dataobjects.getSupportedOutputRasterLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterRaster') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'raster'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('raster'):
return ParameterRaster(name, descName, optional=isOptional)
class ParameterSelection(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.SelectionWidgetWrapper'
}
def __init__(self, name='', description='', options=[], default=None, isSource=False,
multiple=False, optional=False):
Parameter.__init__(self, name, description, default, optional)
self.multiple = multiple
isSource = parseBool(isSource)
self.options = options
if isSource:
self.options = []
layer = QgsVectorLayer(options[0], "layer", "ogr")
if layer.isValid():
try:
index = resolveFieldIndex(layer, options[1])
feats = features(layer)
for feature in feats:
self.options.append(str(feature.attributes()[index]))
except ValueError:
pass
elif isinstance(self.options, str):
self.options = self.options.split(";")
if default is not None:
try:
self.default = int(default)
except:
self.default = 0
self.value = self.default
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = 0
return True
if isinstance(value, list):
if not self.multiple:
return False
values = []
for v in value:
try:
n = int(v)
values.append(n)
except:
return False
if not self.optional and len(values) == 0:
return False
self.value = values
return True
else:
try:
n = int(value)
self.value = n
return True
except:
return False
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('selectionfromfile'):
options = definition.strip()[len('selectionfromfile '):].split(';')
return ParameterSelection(name, descName, options, isSource=True, optional=isOptional)
elif definition.lower().strip().startswith('selection'):
options = definition.strip()[len('selection '):].split(';')
return ParameterSelection(name, descName, options, optional=isOptional)
elif definition.lower().strip().startswith('multipleselectionfromfile'):
options = definition.strip()[len('multipleselectionfromfile '):].split(';')
return ParameterSelection(name, descName, options, isSource=True,
multiple=True, optional=isOptional)
elif definition.lower().strip().startswith('multipleselection'):
options = definition.strip()[len('multipleselection '):].split(';')
return ParameterSelection(name, descName, options, multiple=True, optional=isOptional)
class ParameterEvaluationException(Exception):
def __init__(self, param, msg):
Exception.__init__(msg)
self.param = param
class ParameterString(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.StringWidgetWrapper'
}
NEWLINE = '\n'
ESCAPED_NEWLINE = '\\n'
def __init__(self, name='', description='', default=None, multiline=False,
optional=False, evaluateExpressions=False):
Parameter.__init__(self, name, description, default, optional)
self.multiline = parseBool(multiline)
self.evaluateExpressions = parseBool(evaluateExpressions)
def setValue(self, obj):
if not bool(obj):
if not self.optional:
return False
self.value = None
return True
self.value = str(obj).replace(
ParameterString.ESCAPED_NEWLINE,
ParameterString.NEWLINE
)
return True
def getValueAsCommandLineParameter(self):
return ('"' + str(self.value.replace(ParameterString.NEWLINE,
ParameterString.ESCAPED_NEWLINE)) + '"'
if self.value is not None else str(None))
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'string'
return '##' + self.name + '=' + param_type + self.default
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('string'):
default = definition.strip()[len('string') + 1:]
if default:
return ParameterString(name, descName, default, optional=isOptional)
else:
return ParameterString(name, descName, optional=isOptional)
elif definition.lower().strip().startswith('longstring'):
default = definition.strip()[len('longstring') + 1:]
if default:
return ParameterString(name, descName, default, multiline=True, optional=isOptional)
else:
return ParameterString(name, descName, multiline=True, optional=isOptional)
def evaluate(self, alg):
if isinstance(self.value, str) and bool(self.value) and self.evaluateExpressions:
exp = QgsExpression(self.value)
if exp.hasParserError():
raise ValueError(self.tr("Error in parameter expression: ") + exp.parserErrorString())
result = exp.evaluate(_expressionContext())
if exp.hasEvalError():
raise ValueError("Error evaluating parameter expression: " + exp.evalErrorString())
self.value = result
def expressionContext(self):
return _expressionContext()
class ParameterExpression(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.ExpressionWidgetWrapper'
}
NEWLINE = '\n'
ESCAPED_NEWLINE = '\\n'
def __init__(self, name='', description='', default=None, optional=False, parent_layer=None):
Parameter.__init__(self, name, description, default, optional)
self.parent_layer = parent_layer
def setValue(self, obj):
if not bool(obj):
if not self.optional:
return False
self.value = None
return True
self.value = str(obj).replace(
ParameterString.ESCAPED_NEWLINE,
ParameterString.NEWLINE
)
return True
def getValueAsCommandLineParameter(self):
return ('"' + str(self.value.replace(ParameterExpression.NEWLINE,
ParameterExpression.ESCAPED_NEWLINE)) + '"'
if self.value is not None else str(None))
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'expression'
return '##' + self.name + '=' + param_type + self.default
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.lower().strip().startswith('expression'):
descName = _createDescriptiveName(name)
default = definition.strip()[len('expression') + 1:]
if default:
return ParameterExpression(name, descName, default, optional=isOptional)
else:
return ParameterExpression(name, descName, optional=isOptional)
class ParameterTable(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.TableWidgetWrapper'
}
def __init__(self, name='', description='', optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
self.exported = None
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsVectorLayer):
source = str(obj.source())
self.value = source
return True
else:
self.value = str(obj)
layers = dataobjects.getTables()
for layer in layers:
if layer.name() == self.value or layer.source() == self.value:
source = str(layer.source())
self.value = source
return True
val = str(obj)
self.value = val
return os.path.exists(self.value)
def getSafeExportedTable(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this table, but saved in
a standard format (currently always a DBF file) so that it can
be opened by most external applications.
Works only if the table represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a table in a suitable format,
it does not export at all and returns that value.
The table is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
table = dataobjects.getObjectFromUri(self.value, False)
if table:
self.exported = dataobjects.exportTable(table)
else:
self.exported = self.value
return self.exported
def getFileFilter(self):
exts = ['csv', 'dbf']
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterTable') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'table'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('table'):
return ParameterTable(name, descName, isOptional)
class ParameterTableField(Parameter):
"""A parameter representing a table field.
Its value is a string that represents the name of the field.
"""
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.TableFieldWidgetWrapper'
}
DATA_TYPE_NUMBER = 0
DATA_TYPE_STRING = 1
DATA_TYPE_DATETIME = 2
DATA_TYPE_ANY = -1
def __init__(self, name='', description='', parent=None, datatype=-1,
optional=False, multiple=False):
Parameter.__init__(self, name, description, None, optional)
self.parent = parent
self.multiple = multiple
self.datatype = int(datatype)
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"' if self.value is not None else str(None)
def setValue(self, value):
if not bool(value):
if not self.optional:
return False
self.value = None
return True
if isinstance(value, list):
if not self.multiple and len(value) > 1:
return False
self.value = ";".join(value)
return True
else:
self.value = str(value)
return True
def __str__(self):
return self.name + ' <' + self.__module__.split('.')[-1] + ' from ' \
+ self.parent + '>'
def dataType(self):
if self.datatype == self.DATA_TYPE_NUMBER:
return 'numeric'
elif self.datatype == self.DATA_TYPE_STRING:
return 'string'
elif self.datatype == self.DATA_TYPE_DATETIME:
return 'datetime'
else:
return 'any'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'field'
return '##' + self.name + '=' + param_type + self.parent
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('field'):
if definition.lower().strip().startswith('field number'):
parent = definition.strip()[len('field number') + 1:]
datatype = ParameterTableField.DATA_TYPE_NUMBER
elif definition.lower().strip().startswith('field string'):
parent = definition.strip()[len('field string') + 1:]
datatype = ParameterTableField.DATA_TYPE_STRING
elif definition.lower().strip().startswith('field datetime'):
parent = definition.strip()[len('field datetime') + 1:]
datatype = ParameterTableField.DATA_TYPE_DATETIME
else:
parent = definition.strip()[len('field') + 1:]
datatype = ParameterTableField.DATA_TYPE_ANY
return ParameterTableField(name, descName, parent, datatype, isOptional)
class ParameterVector(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.VectorWidgetWrapper'
}
def __init__(self, name='', description='', datatype=[-1],
optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
if isinstance(datatype, int):
datatype = [datatype]
elif isinstance(datatype, str):
datatype = [int(t) for t in datatype.split(',')]
self.datatype = datatype
self.exported = None
self.allowOnlyOpenedLayers = False
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsVectorLayer):
self.value = str(obj.source())
return True
else:
self.value = str(obj)
return True
def getSafeExportedLayer(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this layer, but saved in
a standard format (currently always a shapefile) so that it can
be opened by most external applications.
If there is a selection and QGIS is configured to use just the
selection, if exports the layer even if it is already in a
suitable format.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does not export at all and returns that value.
The layer is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
layer = dataobjects.getObjectFromUri(self.value, False)
if layer:
self.exported = dataobjects.exportVectorLayer(layer)
else:
self.exported = self.value
return self.exported
def getFileFilter(self):
exts = dataobjects.getSupportedOutputVectorLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterVector') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def dataType(self):
return dataobjects.vectorDataType(self)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'vector'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip() == 'vector':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_ANY], isOptional)
elif definition.lower().strip() == 'vector point':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_POINT], isOptional)
elif definition.lower().strip() == 'vector line':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_LINE], isOptional)
elif definition.lower().strip() == 'vector polygon':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_POLYGON], isOptional)
class ParameterGeometryPredicate(Parameter):
predicates = ('intersects',
'contains',
'disjoint',
'equals',
'touches',
'overlaps',
'within',
'crosses')
def __init__(self, name='', description='', left=None, right=None,
optional=False, enabledPredicates=None):
Parameter.__init__(self, name, description, None, optional)
self.left = left
self.right = right
self.value = None
self.enabledPredicates = enabledPredicates
if self.enabledPredicates is None:
self.enabledPredicates = self.predicates
def getValueAsCommandLineParameter(self):
return str(self.value)
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = None
return True
elif len(value) == 0 and not self.optional:
return False
if isinstance(value, str):
self.value = value.split(';') # relates to ModelerAlgorithm.resolveValue
else:
self.value = value
return True
paramClasses = [c for c in list(sys.modules[__name__].__dict__.values()) if isclass(c) and issubclass(c, Parameter)]
def getParameterFromString(s):
# Try the parameter definitions used in description files
if '|' in s and (s.startswith("Parameter") or s.startswith("*Parameter")):
isAdvanced = False
if s.startswith("*"):
s = s[1:]
isAdvanced = True
tokens = s.split("|")
params = [t if str(t) != str(None) else None for t in tokens[1:]]
try:
clazz = getattr(sys.modules[__name__], tokens[0])
param = clazz(*params)
param.isAdvanced = isAdvanced
return param
except:
return None
else: # try script syntax
for paramClass in paramClasses:
try:
param = paramClass.fromScriptCode(s)
if param is not None:
return param
except AttributeError:
pass
except:
return None
| gpl-2.0 | 6,668,968,050,453,547,000 | 34.083597 | 116 | 0.573605 | false |
andrewk1/Climb-Bot | climb-bot.py | 1 | 3083 | import praw
import requests
import json
import time
import re
# Function iterates over each submission title and checks if the title contains route syntax that indicates the post is about a route
def parse_titles(bot, subreddit):
start_time = time.time()
for submission in subreddit.stream.submissions():
if (submission.created_utc < start_time):
continue
title = submission.title
# regex matches sequence of capitalized words followed by climb grade notation (V or 5.)
route_regex = '([A-Z][a-z]+(?=\s[A-Z])(?:\s[A-Z][a-z]+)+) [( ]?(5.[0-9][0-9]?[A-Za-z]|[Vv][0-9][0-9]?)'
route_name = re.search(route_regex, title)
print route_name
comment = make_get_request(route_name.group(0))
if comment != 'NA':
submission.reply(comment)
# Call custom google search engine API to parse the formulated title and gather theCrag's metadata for the route
def make_get_request(route):
key = 'key=***'
cx = 'cx=***'
query= 'q='+route
google_url = 'https://www.googleapis.com/customsearch/v1?' + key + cx + query
response = requests.get(google_url)
parsed_response= json.loads(response.text)
return form_post(parsed_response)
# Extract data from google's JSON response and form a post
def form_post(parsed_response):
# Check if Google search received a hit
if parsed_response['searchInformation']['totalResults'] == 0 or 'items' not in parsed_response:
return 'NA'
title = parsed_response['items'][0]['title']
print title
breadcrumb = parsed_response['items'][0]['pagemap']['breadcrumb']
count = 0
# Build up region string
region_string = ''
for key in breadcrumb:
region = breadcrumb[count]['title']
if (count > 0) :
region_string = region + ', ' + region_string
else :
region_string = region;
count+=1
metatags = parsed_response['items'][0]['pagemap']['metatags']
country = breadcrumb[0]['title']
latitude = metatags[0]['place:location:latitude']
longitude = metatags[0]['place:location:longitude']
google_pin = 'https://www.google.com/maps/@?api=1&map_action=map&basemap=satellite&zoom=19¢er=' + latitude + ',' + longitude
link = metatags[0]['og:url']
if (' in ' in title):
title = title[:title.index(' in ')]
# Truncate values to 3rd decimal place
lat_decimal = latitude.index('.')
latitude = latitude[:lat_decimal+4]
long_decimal = longitude.index('.')
longitude = longitude[:long_decimal+4]
# Format comment response
return 'I found a route! [' + title + '](' + link + ') in ' + region_string + '\n\nGPS Location: [' + latitude + ', ' + longitude + ']('+google_pin+')' + '\n\n ' + '\n\n^^^I ^^^am ^^^a ^^^bot ^^^| ^^^Data ^^^from ^^^[theCrag.com](https://www.thecrag.com/) ^^^| ^^^Feedback ^^^welcome ^^^at ^^^[r/climbBot](https://www.reddit.com/r/climbBot/)'
if __name__ == "__main__":
bot = praw.Reddit(
user_agent='climb-bot posts additional information on climbing routes it finds, created by /u/Akondrich, email: [email protected]',
client_id='***',
client_secret='***',
username='climb-bot',
password='***')
subreddit = bot.subreddit('climbBot')
parse_titles(bot, subreddit)
| mit | -518,196,464,358,046,460 | 38.525641 | 343 | 0.67337 | false |
UXE/local-edx | cms/envs/common.py | 1 | 24184 | # -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-import, unused-wildcard-import
import imp
import os
import sys
import lms.envs.common
# Although this module itself may not use these imported variables, other dependent modules may.
from lms.envs.common import (
USE_TZ, TECH_SUPPORT_EMAIL, PLATFORM_NAME, BUGS_EMAIL, DOC_STORE_CONFIG, ALL_LANGUAGES, WIKI_ENABLED, MODULESTORE,
update_module_store_settings, ASSET_IGNORE_REGEX
)
from path import path
from warnings import simplefilter
from lms.lib.xblock.mixin import LmsBlockMixin
from dealer.git import git
from xmodule.modulestore.edit_info import EditInfoMixin
############################ FEATURE CONFIGURATION #############################
FEATURES = {
'USE_DJANGO_PIPELINE': True,
'GITHUB_PUSH': False,
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the ones in lms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True,
'AUTH_USE_CERTIFICATES': False,
# email address for studio staff (eg to request course creation)
'STUDIO_REQUEST_EMAIL': '',
# Segment.io - must explicitly turn it on for production
'SEGMENT_IO': False,
# Enable URL that shows information about the status of various services
'ENABLE_SERVICE_STATUS': False,
# Don't autoplay videos for course authors
'AUTOPLAY_VIDEOS': False,
# If set to True, new Studio users won't be able to author courses unless
# edX has explicitly added them to the course creator group.
'ENABLE_CREATOR_GROUP': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# If set to True, Studio won't restrict the set of advanced components
# to just those pre-approved by edX
'ALLOW_ALL_ADVANCED_COMPONENTS': False,
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Allow editing of short description in course settings in cms
'EDITABLE_SHORT_DESCRIPTION': True,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles the embargo functionality, which enable embargoing for particular courses
'EMBARGO': False,
# Toggles the embargo site functionality, which enable embargoing for the whole site
'SITE_EMBARGOED': False,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Allow creating courses with non-ascii characters in the course id
'ALLOW_UNICODE_COURSE_ID': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': False,
# Turn off Advanced Security by default
'ADVANCED_SECURITY': False,
# Modulestore to use for new courses
'DEFAULT_STORE_FOR_NEW_COURSE': None,
}
ENABLE_JASMINE = False
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/cms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
LMS_ROOT = REPO_ROOT / "lms"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
GITHUB_REPO_ROOT = ENV_ROOT / "data"
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat"
############################# WEB CONFIGURATION #############################
# This is where we stick our compiled template files.
import tempfile
MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_cms')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [
PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_js' / 'templates',
]
for namespace, template_dirs in lms.envs.common.MAKO_TEMPLATES.iteritems():
MAKO_TEMPLATES['lms.' + namespace] = template_dirs
TEMPLATE_DIRS = MAKO_TEMPLATES['main']
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/signin'
LOGIN_URL = EDX_ROOT_URL + '/signin'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
'dealer.contrib.django.staff.context_processor', # access git revision
'contentstore.context_processors.doc_url',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
LMS_BASE = None
# These are standard regexes for pulling out info like course_ids, usage_ids, etc.
# They are used so that URLs with deprecated-format strings still work.
from lms.envs.common import (
COURSE_KEY_PATTERN, COURSE_ID_PATTERN, USAGE_KEY_PATTERN, ASSET_KEY_PATTERN
)
#################### CAPA External Code Evaluation #############################
XQUEUE_INTERFACE = {
'url': 'http://localhost:8888',
'django_auth': {'username': 'local',
'password': 'local'},
'basic_auth': None,
}
################################# Deprecation warnings #####################
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
simplefilter('ignore')
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'method_override.middleware.MethodOverrideMiddleware',
# Instead of AuthenticationMiddleware, we use a cache-backed version
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin, EditInfoMixin)
# Allow any XBlock in Studio
# You should also enable the ALLOW_ALL_ADVANCED_COMPONENTS feature flag, so that
# xblocks can be added via advanced settings
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############################ Modulestore Configuration ################################
MODULESTORE_BRANCH = 'draft-preferred'
############################ DJANGO_BUILTINS ################################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
SESSION_COOKIE_SECURE = False
# Site info
SITE_ID = 1
SITE_NAME = "localhost:8001"
HTTPS = 'on'
ROOT_URLCONF = 'cms.urls'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/' + git.revision + "/"
STATIC_ROOT = ENV_ROOT / "staticfiles" / git.revision
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
LMS_ROOT / "static",
# This is how you would use the textbook images locally
# ("book", ENV_ROOT / "book_images"),
]
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGES_BIDI = lms.envs.common.LANGUAGES_BIDI
LANGUAGES = lms.envs.common.LANGUAGES
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
##### EMBARGO #####
EMBARGO_SITE_REDIRECT_URL = None
############################### Pipeline #######################################
STATICFILES_STORAGE = 'cms.lib.django_require.staticstorage.OptimizedCachedRequireJsStorage'
from rooted_paths import rooted_glob
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/normalize.css',
'css/vendor/font-awesome.css',
'css/vendor/html5-input-polyfills/number-polyfill.css',
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
'css/vendor/jquery.qtip.min.css',
'js/vendor/markitup/skins/simple/style.css',
'js/vendor/markitup/sets/wiki/style.css',
],
'output_filename': 'css/cms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'css/tinymce-studio-content-fonts.css',
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css',
'css/tinymce-studio-content.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-skin.css',
},
'style-app': {
'source_filenames': [
'sass/style-app.css',
],
'output_filename': 'css/cms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/style-app-extend1.css',
],
'output_filename': 'css/cms-style-app-extend1.css',
},
'style-app-rtl': {
'source_filenames': [
'sass/style-app-rtl.css',
],
'output_filename': 'css/cms-style-app-rtl.css',
},
'style-app-extend1-rtl': {
'source_filenames': [
'sass/style-app-extend1-rtl.css',
],
'output_filename': 'css/cms-style-app-extend1-rtl.css',
},
'style-xmodule': {
'source_filenames': [
'sass/style-xmodule.css',
],
'output_filename': 'css/cms-style-xmodule.css',
},
'style-xmodule-rtl': {
'source_filenames': [
'sass/style-xmodule-rtl.css',
],
'output_filename': 'css/cms-style-xmodule-rtl.css',
},
'style-xmodule-annotations': {
'source_filenames': [
'css/vendor/ova/annotator.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/diacritic-annotator.css',
'css/vendor/ova/grouping-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/cms-style-xmodule-annotations.css',
},
}
# test_order: Determines the position of this chunk of javascript on
# the jasmine test page
PIPELINE_JS = {
'module-js': {
'source_filenames': (
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/modules/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'coffee/src/discussion/*.js')
),
'output_filename': 'js/cms-modules.js',
'test_order': 1
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.coffee.CoffeeScriptCompiler',
)
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
STATICFILES_IGNORE_PATTERNS = (
"*.py",
"*.pyc"
# it would be nice if we could do, for example, "**/*.scss",
# but these strings get passed down to the `fnmatch` module,
# which doesn't support that. :(
# http://docs.python.org/2/library/fnmatch.html
"sass/*.scss",
"sass/*/*.scss",
"sass/*/*/*.scss",
"sass/*/*/*/*.scss",
"coffee/*.coffee",
"coffee/*/*.coffee",
"coffee/*/*/*.coffee",
"coffee/*/*/*/*.coffee",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_YUI_BINARY = 'yui-compressor'
################################# DJANGO-REQUIRE ###############################
# The baseUrl to pass to the r.js optimizer, relative to STATIC_ROOT.
REQUIRE_BASE_URL = "./"
# The name of a build profile to use for your project, relative to REQUIRE_BASE_URL.
# A sensible value would be 'app.build.js'. Leave blank to use the built-in default build profile.
# Set to False to disable running the default profile (e.g. if only using it to build Standalone
# Modules)
REQUIRE_BUILD_PROFILE = "build.js"
# The name of the require.js script used by your project, relative to REQUIRE_BASE_URL.
REQUIRE_JS = "js/vendor/require.js"
# A dictionary of standalone modules to build with almond.js.
REQUIRE_STANDALONE_MODULES = {}
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = False
# A tuple of files to exclude from the compilation result of r.js.
REQUIRE_EXCLUDE = ("build.txt",)
# The execution environment in which to run r.js: auto, node or rhino.
# auto will autodetect the environment and make use of node if available and rhino if not.
# It can also be a path to a custom class that subclasses require.environments.Environment and defines some "args" function that returns a list with the command arguments to execute.
REQUIRE_ENVIRONMENT = "node"
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'www.youtube.com/iframe_api',
# URL to test YouTube availability
'TEST_URL': 'gdata.youtube.com/feeds/api/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
}
############################ APPS #####################################
INSTALLED_APPS = (
# Standard apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'djcelery',
'south',
'method_override',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# Testing
'django_nose',
# For CMS
'contentstore',
'course_creators',
'student', # misleading name due to sharing with lms
'openedx.core.djangoapps.course_groups', # not used in cms (yet), but tests run
# Tracking
'track',
'eventtracking.django',
# Monitoring
'datadog',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
'require',
# comment common
'django_comment_common',
# for course creator table
'django.contrib.admin',
# for managing course modes
'course_modes',
# Dark-launching languages
'dark_lang',
# Student identity reverification
'reverification',
# User preferences
'openedx.core.djangoapps.user_api',
'django_openid_auth',
'embargo',
# Monitoring signals
'monitoring',
# Course action state
'course_action_state',
# Additional problem types
'edx_jsme', # Molecular Structure
)
################# EDX MARKETING SITE ##################################
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
}
COURSES_WITH_UNSAFE_CODE = []
############################## EVENT TRACKING #################################
TRACK_MAX_EVENT = 50000
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
}
EVENT_TRACKING_PROCESSORS = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
### Apps only installed in some instances
OPTIONAL_APPS = (
'mentoring',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.fileupload',
'openassessment.workflow',
'openassessment.xblock',
# edxval
'edxval'
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
### External auth usage -- prefixes for ENROLLMENT_DOMAIN
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
OPENID_DOMAIN_PREFIX = 'openid:'
### Size of chunks into which asset uploads will be divided
UPLOAD_CHUNK_SIZE_IN_MB = 10
### Max size of asset uploads to GridFS
MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB = 10
# FAQ url to direct users to if they upload
# a file that exceeds the above size
MAX_ASSET_UPLOAD_FILE_SIZE_URL = ""
################ ADVANCED_COMPONENT_TYPES ###############
ADVANCED_COMPONENT_TYPES = [
'annotatable',
'textannotation', # module for annotating text (with annotation table)
'videoannotation', # module for annotating video (with annotation table)
'imageannotation', # module for annotating image (with annotation table)
'word_cloud',
'graphical_slider_tool',
'lti',
# XBlocks from pmitros repos are prototypes. They should not be used
# except for edX Learning Sciences experiments on edge.edx.org without
# further work to make them robust, maintainable, finalize data formats,
# etc.
'concept', # Concept mapper. See https://github.com/pmitros/ConceptXBlock
'done', # Lets students mark things as done. See https://github.com/pmitros/DoneXBlock
'audio', # Embed an audio file. See https://github.com/pmitros/AudioXBlock
'recommender', # Crowdsourced recommender. Prototype by dli&pmitros. Intended for roll-out in one place in one course.
'profile', # Prototype user profile XBlock. Used to test XBlock parameter passing. See https://github.com/pmitros/ProfileXBlock
'split_test',
'combinedopenended',
'peergrading',
'notes',
]
# Adding components in this list will disable the creation of new problem for those
# compoenents in studio. Existing problems will work fine and one can edit them in studio
DEPRECATED_ADVANCED_COMPONENT_TYPES = []
# Specify xblocks that should be treated as advanced problems. Each entry is a tuple
# specifying the xblock name and an optional YAML template to be used.
ADVANCED_PROBLEM_TYPES = [
{
'component': 'openassessment',
'boilerplate_name': None,
}
]
| agpl-3.0 | 1,861,303,884,916,963,600 | 30.530639 | 182 | 0.663248 | false |
yuxng/Deep_ISM | ISM/lib/setup.py | 1 | 6351 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
),
Extension(
"utils.cython_nms",
["utils/nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
Extension('normals.gpu_normals',
['normals/compute_normals.cu', 'normals/gpu_normals.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include'], '/usr/local/include/eigen3']
)
]
setup(
name='fast_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
| mit | -73,697,840,488,066,960 | 37.490909 | 91 | 0.587781 | false |
gamechanger/kafka-python | kafka/protocol/admin.py | 1 | 1182 | from .struct import Struct
from .types import Array, Bytes, Int16, Schema, String
class ListGroupsResponse(Struct):
SCHEMA = Schema(
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsRequest(Struct):
API_KEY = 16
API_VERSION = 0
RESPONSE_TYPE = ListGroupsResponse
SCHEMA = Schema()
class DescribeGroupsResponse(Struct):
SCHEMA = Schema(
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsRequest(Struct):
API_KEY = 15
API_VERSION = 0
RESPONSE_TYPE = DescribeGroupsResponse
SCHEMA = Schema(
('groups', Array(String('utf-8')))
)
| apache-2.0 | -8,683,488,429,018,159,000 | 25.863636 | 54 | 0.526227 | false |
UTSA-ICS/keystone-SID | keystone/tests/test_auth.py | 1 | 44678 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import uuid
import mock
from keystone import assignment
from keystone import auth
from keystone.common import authorization
from keystone.common import environment
from keystone import config
from keystone import exception
from keystone.openstack.common import timeutils
from keystone import tests
from keystone.tests import default_fixtures
from keystone import token
from keystone import trust
CONF = config.CONF
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
HOST_URL = 'http://keystone:5001'
def _build_user_auth(token=None, user_id=None, username=None,
password=None, tenant_id=None, tenant_name=None,
trust_id=None):
"""Build auth dictionary.
It will create an auth dictionary based on all the arguments
that it receives.
"""
auth_json = {}
if token is not None:
auth_json['token'] = token
if username or password:
auth_json['passwordCredentials'] = {}
if username is not None:
auth_json['passwordCredentials']['username'] = username
if user_id is not None:
auth_json['passwordCredentials']['userId'] = user_id
if password is not None:
auth_json['passwordCredentials']['password'] = password
if tenant_name is not None:
auth_json['tenantName'] = tenant_name
if tenant_id is not None:
auth_json['tenantId'] = tenant_id
if trust_id is not None:
auth_json['trust_id'] = trust_id
return auth_json
class AuthTest(tests.TestCase):
def setUp(self):
super(AuthTest, self).setUp()
self.load_backends()
self.load_fixtures(default_fixtures)
# need to register the token provider first because auth controller
# depends on it
token.provider.Manager()
self.context_with_remote_user = {'environment':
{'REMOTE_USER': 'FOO',
'AUTH_TYPE': 'Negotiate'}}
self.empty_context = {'environment': {}}
self.controller = token.controllers.Auth()
#This call sets up, among other things, the call to popen
#that will be used to run the CMS command. These tests were
#passing only due to the global nature of the call. If the
#tests in this file are run alone, API calls return unauthorized.
environment.use_eventlet(monkeypatch_thread=False)
def assertEqualTokens(self, a, b):
"""Assert that two tokens are equal.
Compare two tokens except for their ids. This also truncates
the time in the comparison.
"""
def normalize(token):
token['access']['token']['id'] = 'dummy'
del token['access']['token']['expires']
del token['access']['token']['issued_at']
return token
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['expires']),
timeutils.parse_isotime(b['access']['token']['expires']))
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['issued_at']),
timeutils.parse_isotime(b['access']['token']['issued_at']))
return self.assertDictEqual(normalize(a), normalize(b))
class AuthBadRequests(AuthTest):
def setUp(self):
super(AuthBadRequests, self).setUp()
def test_no_external_auth(self):
"""Verify that _authenticate_external() raises exception if N/A."""
self.assertRaises(
token.controllers.ExternalAuthNotApplicable,
self.controller._authenticate_external,
{}, {})
def test_no_token_in_auth(self):
"""Verify that _authenticate_token() raises exception if no token."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_token,
None, {})
def test_no_credentials_in_auth(self):
"""Verify that _authenticate_local() raises exception if no creds."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_local,
None, {})
def test_authenticate_blank_request_body(self):
"""Verify sending empty json dict raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, {})
def test_authenticate_blank_auth(self):
"""Verify sending blank 'auth' raises the right exception."""
body_dict = _build_user_auth()
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_invalid_auth_content(self):
"""Verify sending invalid 'auth' raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, {'auth': 'abcd'})
def test_authenticate_user_id_too_large(self):
"""Verify sending large 'userId' raises the right exception."""
body_dict = _build_user_auth(user_id='0' * 65, username='FOO',
password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_username_too_large(self):
"""Verify sending large 'username' raises the right exception."""
body_dict = _build_user_auth(username='0' * 65, password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_tenant_id_too_large(self):
"""Verify sending large 'tenantId' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_id='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_tenant_name_too_large(self):
"""Verify sending large 'tenantName' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_token_too_large(self):
"""Verify sending large 'token' raises the right exception."""
body_dict = _build_user_auth(token={'id': '0' * 8193})
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_password_too_large(self):
"""Verify sending large 'password' raises the right exception."""
length = CONF.identity.max_password_length + 1
body_dict = _build_user_auth(username='FOO', password='0' * length)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
class AuthWithToken(AuthTest):
def setUp(self):
super(AuthWithToken, self).setUp()
def test_unscoped_token(self):
"""Verify getting an unscoped token with password creds."""
body_dict = _build_user_auth(username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
self.assertNotIn('tenant', unscoped_token['access']['token'])
def test_auth_invalid_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={"id": uuid.uuid4().hex})
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_bad_formatted_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={})
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_auth_unscoped_token_no_project(self):
"""Verify getting an unscoped token with an unscoped token."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate({}, body_dict)
self.assertEqualTokens(unscoped_token, unscoped_token_2)
def test_auth_unscoped_token_project(self):
"""Verify getting a token in a tenant with an unscoped token."""
# Add a role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Get an unscoped tenant
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
# Get a token on BAR tenant using the unscoped tenant
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"],
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertEqual(self.role_member['id'], roles[0])
def test_auth_token_project_group_role(self):
"""Verify getting a token in a tenant with group roles."""
# Add a v2 style role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Now create a group role for this user as well
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain1['id'], domain1)
new_group = {'id': uuid.uuid4().hex, 'domain_id': domain1['id'],
'name': uuid.uuid4().hex}
self.identity_api.create_group(new_group['id'], new_group)
self.identity_api.add_user_to_group(self.user_foo['id'],
new_group['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_admin['id'])
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
def test_auth_token_cross_domain_group_and_project(self):
"""Verify getting a token in cross domain group/project roles."""
# create domain, project and group and grant roles to user
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain1['id'], domain1)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain1['id']}
self.assignment_api.create_project(project1['id'], project1)
role_foo_domain1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_foo_domain1['id'],
role_foo_domain1)
role_group_domain1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_group_domain1['id'],
role_group_domain1)
self.assignment_api.add_user_to_project(project1['id'],
self.user_foo['id'])
new_group = {'id': uuid.uuid4().hex, 'domain_id': domain1['id'],
'name': uuid.uuid4().hex}
self.identity_api.create_group(new_group['id'], new_group)
self.identity_api.add_user_to_group(self.user_foo['id'],
new_group['id'])
self.assignment_api.create_grant(
user_id=self.user_foo['id'],
project_id=project1['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
project_id=project1['id'],
role_id=self.role_admin['id'])
self.assignment_api.create_grant(
user_id=self.user_foo['id'],
domain_id=domain1['id'],
role_id=role_foo_domain1['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
domain_id=domain1['id'],
role_id=role_group_domain1['id'])
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_name=project1['name'])
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(project1['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
self.assertNotIn(role_foo_domain1['id'], roles)
self.assertNotIn(role_group_domain1['id'], roles)
def test_belongs_to_no_tenant(self):
r = self.controller.authenticate(
{},
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
token_id=unscoped_token_id)
def test_belongs_to(self):
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
scoped_token_id = scoped_token['access']['token']['id']
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'me'}),
token_id=scoped_token_id)
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
token_id=scoped_token_id)
def test_token_auth_with_binding(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth()
unscoped_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
# the token should have bind information in it
bind = unscoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
body_dict = _build_user_auth(
token=unscoped_token['access']['token'],
tenant_name='BAR')
# using unscoped token without remote user context fails
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.empty_context, body_dict)
# using token with remote user context succeeds
scoped_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
# the bind information should be carried over from the original token
bind = scoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
def test_deleting_role_revokes_token(self):
role_controller = assignment.controllers.Role()
project1 = {'id': 'Project1', 'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID}
self.assignment_api.create_project(project1['id'], project1)
role_one = {'id': 'role_one', 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_one['id'], role_one)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project1['id'], role_one['id'])
no_context = {}
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_name=project1['name'])
token = self.controller.authenticate(no_context, body_dict)
# Ensure it is valid
token_id = token['access']['token']['id']
self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=token_id)
# Delete the role, which should invalidate the token
role_controller.delete_role(
dict(is_admin=True, query_string={}), role_one['id'])
# Check the token is now invalid
self.assertRaises(
exception.TokenNotFound,
self.controller.validate_token,
dict(is_admin=True, query_string={}),
token_id=token_id)
class AuthWithPasswordCredentials(AuthTest):
def setUp(self):
super(AuthWithPasswordCredentials, self).setUp()
def test_auth_invalid_user(self):
"""Verify exception is raised if invalid user."""
body_dict = _build_user_auth(
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_valid_user_invalid_password(self):
"""Verify exception is raised if invalid password."""
body_dict = _build_user_auth(
username="FOO",
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_empty_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(
username="FOO",
password="")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_no_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(username="FOO")
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_blank_password_credentials(self):
"""Sending empty dict as passwordCredentials raises a 400 error."""
body_dict = {'passwordCredentials': {}, 'tenantName': 'demo'}
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_no_username(self):
"""Verify skipping username raises the right exception."""
body_dict = _build_user_auth(password="pass",
tenant_name="demo")
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_bind_without_remote_user(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='BAR')
token = self.controller.authenticate({}, body_dict)
self.assertNotIn('bind', token['access']['token'])
def test_change_default_domain_id(self):
# If the default_domain_id config option is not the default then the
# user in auth data is from the new default domain.
# 1) Create a new domain.
new_domain_id = uuid.uuid4().hex
new_domain = {
'description': uuid.uuid4().hex,
'enabled': True,
'id': new_domain_id,
'name': uuid.uuid4().hex,
}
self.assignment_api.create_domain(new_domain_id, new_domain)
# 2) Create user "foo" in new domain with different password than
# default-domain foo.
new_user_id = uuid.uuid4().hex
new_user_password = uuid.uuid4().hex
new_user = {
'id': new_user_id,
'name': self.user_foo['name'],
'domain_id': new_domain_id,
'password': new_user_password,
'email': '[email protected]',
}
self.identity_api.create_user(new_user_id, new_user)
# 3) Update the default_domain_id config option to the new domain
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
# 4) Authenticate as "foo" using the password in the new domain.
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=new_user_password)
# The test is successful if this doesn't raise, so no need to assert.
self.controller.authenticate({}, body_dict)
class AuthWithRemoteUser(AuthTest):
def setUp(self):
super(AuthWithRemoteUser, self).setUp()
def test_unscoped_remote_authn(self):
"""Verify getting an unscoped token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth()
remote_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_unscoped_remote_authn_jsonless(self):
"""Verify that external auth with invalid request fails."""
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{'REMOTE_USER': 'FOO'},
None)
def test_scoped_remote_authn(self):
"""Verify getting a token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name='BAR')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth(
tenant_name='BAR')
remote_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_scoped_nometa_remote_authn(self):
"""Verify getting a token with external authn and no metadata."""
body_dict = _build_user_auth(
username='TWO',
password='two2',
tenant_name='BAZ')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth(tenant_name='BAZ')
remote_token = self.controller.authenticate(
{'environment': {'REMOTE_USER': 'TWO'}}, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_scoped_remote_authn_invalid_user(self):
"""Verify that external auth with invalid user fails."""
body_dict = _build_user_auth(tenant_name="BAR")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{'environment': {'REMOTE_USER': uuid.uuid4().hex}},
body_dict)
def test_bind_with_kerberos(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(tenant_name="BAR")
token = self.controller.authenticate(self.context_with_remote_user,
body_dict)
self.assertEqual('FOO', token['access']['token']['bind']['kerberos'])
def test_bind_without_config_opt(self):
self.config_fixture.config(group='token', bind=['x509'])
body_dict = _build_user_auth(tenant_name='BAR')
token = self.controller.authenticate(self.context_with_remote_user,
body_dict)
self.assertNotIn('bind', token['access']['token'])
class AuthWithTrust(AuthTest):
def setUp(self):
super(AuthWithTrust, self).setUp()
trust.Manager()
self.trust_controller = trust.controllers.TrustV3()
self.auth_v3_controller = auth.controllers.Auth()
self.trustor = self.user_foo
self.trustee = self.user_two
self.assigned_roles = [self.role_member['id'],
self.role_browser['id']]
for assigned_role in self.assigned_roles:
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
self.sample_data = {'trustor_user_id': self.trustor['id'],
'trustee_user_id': self.trustee['id'],
'project_id': self.tenant_bar['id'],
'impersonation': True,
'roles': [{'id': self.role_browser['id']},
{'name': self.role_member['name']}]}
expires_at = timeutils.strtime(timeutils.utcnow() +
datetime.timedelta(minutes=10),
fmt=TIME_FORMAT)
self.create_trust(expires_at=expires_at)
def config_overrides(self):
super(AuthWithTrust, self).config_overrides()
self.config_fixture.config(group='trust', enabled=True)
def _create_auth_context(self, token_id):
token_ref = self.token_api.get_token(token_id)
auth_context = authorization.token_to_auth_context(
token_ref['token_data'])
return {'environment': {authorization.AUTH_CONTEXT_ENV: auth_context},
'token_id': token_id,
'host_url': HOST_URL}
def create_trust(self, expires_at=None, impersonation=True):
username = self.trustor['name']
password = 'foo2'
body_dict = _build_user_auth(username=username, password=password)
self.unscoped_token = self.controller.authenticate({}, body_dict)
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
trust_data = copy.deepcopy(self.sample_data)
trust_data['expires_at'] = expires_at
trust_data['impersonation'] = impersonation
self.new_trust = self.trust_controller.create_trust(
context, trust=trust_data)['trust']
def build_v2_token_request(self, username, password):
body_dict = _build_user_auth(username=username, password=password)
self.unscoped_token = self.controller.authenticate({}, body_dict)
unscoped_token_id = self.unscoped_token['access']['token']['id']
request_body = _build_user_auth(token={'id': unscoped_token_id},
trust_id=self.new_trust['id'],
tenant_id=self.tenant_bar['id'])
return request_body
def test_create_trust_bad_data_fails(self):
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
bad_sample_data = {'trustor_user_id': self.trustor['id'],
'project_id': self.tenant_bar['id'],
'roles': [{'id': self.role_browser['id']}]}
self.assertRaises(exception.ValidationError,
self.trust_controller.create_trust,
context, trust=bad_sample_data)
def test_create_trust_no_roles(self):
context = {'token_id': self.unscoped_token['access']['token']['id']}
self.sample_data['roles'] = []
self.assertRaises(exception.Forbidden,
self.trust_controller.create_trust,
context, trust=self.sample_data)
def test_create_trust(self):
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
self.assertTrue(timeutils.parse_strtime(self.new_trust['expires_at'],
fmt=TIME_FORMAT))
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
self.new_trust['links']['self'])
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
self.new_trust['roles_links']['self'])
for role in self.new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_expires_bad(self):
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="bad")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="Z")
def test_get_trust(self):
context = {'token_id': self.unscoped_token['access']['token']['id'],
'host_url': HOST_URL}
trust = self.trust_controller.get_trust(context,
self.new_trust['id'])['trust']
self.assertEqual(self.trustor['id'], trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
for role in self.new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_no_impersonation(self):
self.create_trust(expires_at=None, impersonation=False)
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
self.assertIs(self.new_trust['impersonation'], False)
auth_response = self.fetch_v2_token_from_trust()
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], self.new_trust['trustee_user_id'])
# TODO(ayoung): Endpoints
def test_create_trust_impersonation(self):
self.create_trust(expires_at=None)
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
self.assertIs(self.new_trust['impersonation'], True)
auth_response = self.fetch_v2_token_from_trust()
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], self.new_trust['trustor_user_id'])
def test_token_from_trust_wrong_user_fails(self):
request_body = self.build_v2_token_request('FOO', 'foo2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def fetch_v2_token_from_trust(self):
request_body = self.build_v2_token_request('TWO', 'two2')
auth_response = self.controller.authenticate({}, request_body)
return auth_response
def fetch_v3_token_from_trust(self):
v3_password_data = {
'identity': {
"methods": ["password"],
"password": {
"user": {
"id": self.trustee["id"],
"password": self.trustee["password"]}}
},
'scope': {
'project': {
'id': self.tenant_baz['id']}}}
auth_response = (self.auth_v3_controller.authenticate_for_token
({'environment': {},
'query_string': {}},
v3_password_data))
token = auth_response.headers['X-Subject-Token']
v3_req_with_trust = {
"identity": {
"methods": ["token"],
"token": {"id": token}},
"scope": {
"OS-TRUST:trust": {"id": self.new_trust['id']}}}
token_auth_response = (self.auth_v3_controller.authenticate_for_token
({'environment': {},
'query_string': {}},
v3_req_with_trust))
return token_auth_response
def test_create_v3_token_from_trust(self):
auth_response = self.fetch_v3_token_from_trust()
trust_token_user = auth_response.json['token']['user']
self.assertEqual(self.trustor['id'], trust_token_user['id'])
trust_token_trust = auth_response.json['token']['OS-TRUST:trust']
self.assertEqual(trust_token_trust['id'], self.new_trust['id'])
self.assertEqual(self.trustor['id'],
trust_token_trust['trustor_user']['id'])
self.assertEqual(self.trustee['id'],
trust_token_trust['trustee_user']['id'])
trust_token_roles = auth_response.json['token']['roles']
self.assertEqual(2, len(trust_token_roles))
def test_v3_trust_token_get_token_fails(self):
auth_response = self.fetch_v3_token_from_trust()
trust_token = auth_response.headers['X-Subject-Token']
v3_token_data = {'identity': {
'methods': ['token'],
'token': {'id': trust_token}
}}
self.assertRaises(
exception.Forbidden,
self.auth_v3_controller.authenticate_for_token,
{'environment': {},
'query_string': {}}, v3_token_data)
def test_token_from_trust(self):
auth_response = self.fetch_v2_token_from_trust()
self.assertIsNotNone(auth_response)
self.assertEqual(2,
len(auth_response['access']['metadata']['roles']),
"user_foo has three roles, but the token should"
" only get the two roles specified in the trust.")
def assert_token_count_for_trust(self, expected_value):
tokens = self.trust_controller.token_api._list_tokens(
self.trustee['id'], trust_id=self.new_trust['id'])
token_count = len(tokens)
self.assertEqual(expected_value, token_count)
def test_delete_tokens_for_user_invalidates_tokens_from_trust(self):
self.assert_token_count_for_trust(0)
self.fetch_v2_token_from_trust()
self.assert_token_count_for_trust(1)
self.token_api.delete_tokens_for_user(self.trustee['id'])
self.assert_token_count_for_trust(0)
def test_token_from_trust_cant_get_another_token(self):
auth_response = self.fetch_v2_token_from_trust()
trust_token_id = auth_response['access']['token']['id']
request_body = _build_user_auth(token={'id': trust_token_id},
tenant_id=self.tenant_bar['id'])
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_delete_trust_revokes_token(self):
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
self.fetch_v2_token_from_trust()
trust_id = self.new_trust['id']
tokens = self.token_api._list_tokens(self.trustor['id'],
trust_id=trust_id)
self.assertEqual(1, len(tokens))
self.trust_controller.delete_trust(context, trust_id=trust_id)
tokens = self.token_api._list_tokens(self.trustor['id'],
trust_id=trust_id)
self.assertEqual(0, len(tokens))
def test_token_from_trust_with_no_role_fails(self):
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_expired_trust_get_token_fails(self):
expiry = "1999-02-18T10:10:00Z"
self.create_trust(expiry)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_token_from_trust_with_wrong_role_fails(self):
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'],
self.tenant_bar['id'],
self.role_other['id'])
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
class TokenExpirationTest(AuthTest):
@mock.patch.object(timeutils, 'utcnow')
def _maintain_token_expiration(self, mock_utcnow):
"""Token expiration should be maintained after re-auth & validation."""
now = datetime.datetime.utcnow()
mock_utcnow.return_value = now
r = self.controller.authenticate(
{},
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
original_expiration = r['access']['token']['expires']
mock_utcnow.return_value = now + datetime.timedelta(seconds=1)
r = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=unscoped_token_id)
self.assertEqual(original_expiration, r['access']['token']['expires'])
mock_utcnow.return_value = now + datetime.timedelta(seconds=2)
r = self.controller.authenticate(
{},
auth={
'token': {
'id': unscoped_token_id,
},
'tenantId': self.tenant_bar['id'],
})
scoped_token_id = r['access']['token']['id']
self.assertEqual(original_expiration, r['access']['token']['expires'])
mock_utcnow.return_value = now + datetime.timedelta(seconds=3)
r = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=scoped_token_id)
self.assertEqual(original_expiration, r['access']['token']['expires'])
def test_maintain_uuid_token_expiration(self):
self.config_fixture.config(group='signing', token_format='UUID')
self._maintain_token_expiration()
class AuthCatalog(tests.SQLDriverOverrides, AuthTest):
"""Tests for the catalog provided in the auth response."""
def config_files(self):
config_files = super(AuthCatalog, self).config_files()
# We need to use a backend that supports disabled endpoints, like the
# SQL backend.
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
def _create_endpoints(self):
def create_endpoint(service_id, region, **kwargs):
id_ = uuid.uuid4().hex
ref = {
'id': id_,
'interface': 'public',
'region': region,
'service_id': service_id,
'url': 'http://localhost/%s' % uuid.uuid4().hex,
}
ref.update(kwargs)
self.catalog_api.create_endpoint(id_, ref)
return ref
# Create a service for use with the endpoints.
def create_service(**kwargs):
id_ = uuid.uuid4().hex
ref = {
'id': id_,
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
}
ref.update(kwargs)
self.catalog_api.create_service(id_, ref)
return ref
enabled_service_ref = create_service(enabled=True)
disabled_service_ref = create_service(enabled=False)
region = uuid.uuid4().hex
# Create endpoints
enabled_endpoint_ref = create_endpoint(
enabled_service_ref['id'], region)
create_endpoint(
enabled_service_ref['id'], region, enabled=False,
interface='internal')
create_endpoint(
disabled_service_ref['id'], region)
return enabled_endpoint_ref
def test_auth_catalog_disabled_endpoint(self):
"""On authenticate, get a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate({}, body_dict)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = token['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region'],
}
self.assertEqual(exp_endpoint, endpoint)
def test_validate_catalog_disabled_endpoint(self):
"""On validate, get back a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate({}, body_dict)
# Validate
token_id = token['access']['token']['id']
validate_ref = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=token_id)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = validate_ref['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region'],
}
self.assertEqual(exp_endpoint, endpoint)
class NonDefaultAuthTest(tests.TestCase):
def test_add_non_default_auth_method(self):
self.config_fixture.config(group='auth',
methods=['password', 'token', 'custom'])
config.setup_authentication()
self.assertTrue(hasattr(CONF.auth, 'custom'))
| apache-2.0 | 159,340,296,657,682,560 | 39.839122 | 79 | 0.575406 | false |
olysonek/tuned | tests/unit/profiles/test_profile.py | 1 | 1691 | import unittest2
import tuned.profiles
import collections
class MockProfile(tuned.profiles.profile.Profile):
def _create_unit(self, name, config):
return (name, config)
class ProfileTestCase(unittest2.TestCase):
def test_init(self):
MockProfile("test", {})
def test_create_units(self):
profile = MockProfile("test", {
"main": { "anything": 10 },
"network" : { "type": "net", "devices": "*" },
"storage" : { "type": "disk" },
})
self.assertIs(type(profile.units), collections.OrderedDict)
self.assertEqual(len(profile.units), 2)
self.assertListEqual(sorted([name_config for name_config in profile.units]), sorted(["network", "storage"]))
def test_create_units_empty(self):
profile = MockProfile("test", {"main":{}})
self.assertIs(type(profile.units), collections.OrderedDict)
self.assertEqual(len(profile.units), 0)
def test_sets_name(self):
profile1 = MockProfile("test_one", {})
profile2 = MockProfile("test_two", {})
self.assertEqual(profile1.name, "test_one")
self.assertEqual(profile2.name, "test_two")
def test_change_name(self):
profile = MockProfile("oldname", {})
self.assertEqual(profile.name, "oldname")
profile.name = "newname"
self.assertEqual(profile.name, "newname")
def test_sets_options(self):
profile = MockProfile("test", {
"main": { "anything": 10 },
"network" : { "type": "net", "devices": "*" },
})
self.assertIs(type(profile.options), dict)
self.assertEqual(profile.options["anything"], 10)
def test_sets_options_empty(self):
profile = MockProfile("test", {
"storage" : { "type": "disk" },
})
self.assertIs(type(profile.options), dict)
self.assertEqual(len(profile.options), 0)
| gpl-2.0 | -6,405,430,942,689,261,000 | 28.155172 | 110 | 0.678297 | false |
kritak/textdungeon | Internal/pricerandomtester.py | 1 | 1114 | """testing random frequency of items based on price for item.
a cheap item is more common, a expensive item is very rare"""
import random
d = {"healing":50,
"berserk":60,
"clever":100,
"swiftness":100,
"might":100,
"awesomeness":500,
}
# reverse d
dr = [[1/b,a] for [a,b] in d.items()] # list of [price, drinkname]
dr.sort() # sort this list by price
pricelist1 = [a for [a,b] in dr] # list of price only
drinklist = [b for [a,b] in dr] # list of drinkname only
pricelist2 = [] # list of added up prices
kprice = 0
for p in pricelist1:
kprice += p
pricelist2.append(kprice)
print(pricelist1, pricelist2)
result = {}
print("calculating please wait...")
for x in range(10000):
y = random.random()*(pricelist2[-1]) # 1 to maxprice
for p in pricelist2:
if y < p:
drinkname = drinklist[pricelist2.index(p)]
if drinkname in result:
result[drinkname] += 1
else:
result[drinkname] = 1
break
print(result)
| gpl-2.0 | -7,916,032,930,120,072,000 | 24.906977 | 66 | 0.561939 | false |
SPARLab/BikeMaps | mapApp/views/__init__.py | 1 | 1138 | from .about import about, contact
from .alerts import alertUsers, postAlertPolygon, readAlertPoint
from .disclaimer import disclaimer
from .edit import editHazards, editShape, updateHazard
from .index import index
from .postPoint import (postHazard, postIncident, postNearmiss,
postNewInfrastructure, postTheft)
from .pushNotification import pushNotification
from .recentReports import recentReports
from .restApi import (AlertAreaDetail, AlertAreaList, APNSDeviceDetail,
APNSDeviceList, CollisionList, FilteredHazardList,
FilteredTheftList, GCMDeviceDetail, GCMDeviceList,
HazardList, IncidentList, NearmissList, OfficialList,
TheftList, TinyCollisionList, TinyHazardList,
TinyNearMissList, TinyNewInfrastructureList,
TinyTheftList, UserDetail, UserList, XHRCollisionInfo,
XHRHazardInfo, XHRNearMissInfo, XHRNewInfrastructureInfo,
XHRTheftInfo)
from .termsAndConditions import termsAndConditions
from .vis import vis
| mit | -6,684,587,237,019,043,000 | 54.9 | 79 | 0.695079 | false |
unt-libraries/django-name | name/api/serializers.py | 1 | 6208 | """Serializers for the Name App Models.
This module leverages the Django Rest Framework's Serializer
components to build JSON representations of the models defined
in this app.
These JSON representations are designed to be backwards compatible
with the API documented in previous versions.
For documentation regarding the Django Rest Framework Serializers go
to http://www.django-rest-framework.org/api-guide/serializers/
"""
from rest_framework import serializers
from .. import models
class IdentifierSerializer(serializers.ModelSerializer):
"""Serializer for the Identifier Model.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
label -> identifier.type
href -> identifier.value
"""
label = serializers.StringRelatedField(source='type')
href = serializers.CharField(source='value')
class Meta:
model = models.Identifier
fields = ('label', 'href')
class NoteSerializer(serializers.ModelSerializer):
"""Serializer for the Note Model."""
type = serializers.SerializerMethodField()
class Meta:
model = models.Note
fields = ('note', 'type')
def get_type(self, obj):
"""Sets the type field.
Returns the Note Type label, instead of the Note Type ID, which
is the default behavior.
"""
return obj.get_note_type_label().lower()
class VariantSerializer(serializers.ModelSerializer):
"""Serializer for the Variant Model."""
type = serializers.SerializerMethodField()
class Meta:
model = models.Variant
fields = ('variant', 'type')
def get_type(self, obj):
"""Sets the type field.
Returns the Variant Type label, instead of the Variant Type ID,
which is the default behavior.
"""
return obj.get_variant_type_label().lower()
class NameSerializer(serializers.ModelSerializer):
"""Serializer for the Name Model.
This serializes the the Name model to include detailed information
about the object, including the related Variants, Notes, and
Identifiers.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
authoritative_name -> name.name
begin_date -> name.begin
end_date -> name.end
The identifier field is the absolute url to the name detail
page for the model instance.
"""
authoritative_name = serializers.CharField(source='name')
begin_date = serializers.CharField(source='begin')
name_type = serializers.SerializerMethodField()
end_date = serializers.CharField(source='end')
links = IdentifierSerializer(many=True, source='identifier_set')
notes = NoteSerializer(many=True, source='note_set')
variants = VariantSerializer(many=True, source='variant_set')
identifier = serializers.HyperlinkedIdentityField(
view_name='name:detail', lookup_field='name_id')
class Meta:
model = models.Name
fields = ('authoritative_name', 'name_type', 'begin_date', 'end_date',
'identifier', 'links', 'notes', 'variants',)
def get_name_type(self, obj):
"""Sets the name_type field.
Returns the Name Type label, instead of the Name Type ID, which
is the default behavior.
"""
return obj.get_name_type_label().lower()
class NameSearchSerializer(serializers.ModelSerializer):
"""Name Model Serializer for the Name search/autocompletion
endpoint.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
begin_date -> name.begin
type -> name.get_name_type_label()
label -> Formats name.name and name.disambiguation.
The URL field is the absolute url to the name detail page for
the model instance.
"""
begin_date = serializers.CharField(source='begin')
type = serializers.SerializerMethodField()
label = serializers.SerializerMethodField()
URL = serializers.HyperlinkedIdentityField(
view_name='name:detail', lookup_field='name_id')
class Meta:
model = models.Name
fields = ('id', 'name', 'label', 'type', 'begin_date',
'disambiguation', 'URL')
def get_type(self, obj):
"""Sets the type field.
Returns the Name Type label, instead of the Name Type ID, which
is the default behavior.
"""
return obj.get_name_type_label().lower()
def get_label(self, obj):
"""Sets the label field.
Returns a string in the form of
"<name.name> (<name.disambiguation>)"
"""
if obj.disambiguation:
return '{0} ({1})'.format(obj.name, obj.disambiguation)
return obj.name
class LocationSerializer(serializers.ModelSerializer):
"""Serailizer for the Locations Model.
This includes the related Name via the belong_to_name field. The
belong_to_name field uses the NameSerializer to nest the related
Name model.
"""
belong_to_name = NameSerializer()
class Meta:
model = models.Location
fields = '__all__'
class NameStatisticsMonthSerializer(serializers.Serializer):
"""Serializer for the NameStatisticsMonth object."""
total = serializers.IntegerField()
total_to_date = serializers.IntegerField()
month = serializers.DateTimeField()
class NameStatisticsTypeSerializer(serializers.Serializer):
"""Serializer for the NameStatisticsType object.
This serializer utilizes the NameStatisticsTypeMonth to serialize
the NameStatisticsMonth instances that the object instance contains.
"""
running_total = serializers.IntegerField()
stats = NameStatisticsMonthSerializer(many=True)
class NameStatisticsSerializer(serializers.Serializer):
"""Serializer for the NameStatistics object.
This serializer utilizes the NameStatisticsTypeSerializer to
serialize the NameStatisticsType instances that the object instance
contains.
"""
created = NameStatisticsTypeSerializer()
modified = NameStatisticsTypeSerializer()
name_type_totals = serializers.DictField()
| bsd-3-clause | -1,052,281,697,192,771,800 | 31.502618 | 78 | 0.6875 | false |
mithron/opendatahack | web/main.py | 1 | 1805 | from datetime import datetime
import json
import os
from urlparse import urlparse
from pymongo.connection import Connection
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
MONGO_URL = "" # found with $>heroku config
we_live = True
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/list/", MainHandler),
(r"/([0-9]+)/", SchoolHandler)
]
settings = dict(
autoescape=None,
)
tornado.web.Application.__init__(self, handlers, **settings)
if we_live:
self.con = Connection(MONGO_URL)
self.database = self.con[urlparse(MONGO_URL).path[1:]]
else:
self.con = Connection('localhost', 27017)
self.database = self.con["moscow"]
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.database
class SchoolHandler(BaseHandler):
def get(self, inn=None):
if inn:
suppliers = list(self.db["suppliers"].find({'inn': int(inn)}, fields={"_id": False}))
self.write(json.dumps(suppliers, ensure_ascii=False, encoding='utf8'))
else:
self.write("[]")
class MainHandler(BaseHandler):
def get(self):
schools = list(self.db["suppliers"].find(fields={"full_name": True, "inn": True, "_id": False}))
self.write(json.dumps(schools, ensure_ascii=False, encoding='utf8'))
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(int(os.environ.get("PORT", 8888)))
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main() | mit | 941,218,873,183,261,400 | 26.363636 | 104 | 0.628255 | false |
aptomar/apt-file-format | test/testAptofile.py | 1 | 23249 | ################################################################
# #
# testAptofile.py #
# Copyright (c) 2013 Aptomar AS, All Rights Reserved #
# #
# Author: Jarle Bauck Hamar: <[email protected]> #
# Date: 2013-05-23 #
# #
################################################################
import unittest
import sys
import json
sys.path.append('../src')
from aptofile import Aptofile
import jsonschema
class TestManifest(unittest.TestCase):
def setUp(self):
with open('tests/header.json') as fid:
self.inst = json.load(fid)
self.schema = Aptofile.SCHEMA
def validate(self):
try:
jsonschema.validate(self.inst, self.schema, Aptofile.VALIDATOR,
format_checker = jsonschema.FormatChecker())
except jsonschema.ValidationError:
return False
return True
def test_schema_validates(self):
Aptofile.VALIDATOR.check_schema(Aptofile.SCHEMA)
def test_valid_manifest_header(self):
self.assertTrue(self.validate())
def test_manifest_missing_date(self):
del self.inst["date"]
self.assertFalse(self.validate())
def test_manifest_missing_description(self):
del self.inst["description"]
self.assertFalse(self.validate())
def test_manifest_missing_version(self):
del self.inst["manifest_version"]
self.assertFalse(self.validate())
def test_manifest_missing_generator(self):
del self.inst["generator"]
self.assertFalse(self.validate())
def test_manifest_bad_date(self):
self.inst["date"] = "tomorrow"
self.assertFalse(self.validate())
def test_manifest_disallow_additional_properties(self):
self.inst["extra"] = "large"
self.assertFalse(self.validate())
class TestAsset(unittest.TestCase):
def testCreateAsset(self):
f = 'tests/asset.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'file:/layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
#Validate after write and open
self.assertTrue(Aptofile.validateFile(f))
def testAssetMissingFile(self):
f = 'tests/asset_missing_file.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
af.addFile2Layer('resource3.png','layer2','resources', writeFile=False)
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetIncorrectLayerInGroup(self):
f = 'tests/asset_incorrect_layer_in_group.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer3'])
#Validate before write:
self.assertFalse(af.validate())
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetMissingStyle(self):
f = 'tests/asset_missing_style.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
del af.manifest['asset']['layers']['layer1']['style']
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetIncorrectDataType(self):
f = 'tests/asset_incorrect_data_type.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
d=af.manifest['asset']['layers']['layer1']['style']['data'].pop()
af.manifest['asset']['layers']['layer1']['style']['data'] = d
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
class TestImage(unittest.TestCase):
def testImage(self):
f = 'tests/image.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testImageMissingDate(self):
f = 'tests/image_missing_date.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
del af.manifest['image']['created']
self.assertFalse(Aptofile.validateFile(f))
def testImageIncorrectDate(self):
f = 'tests/image_missing_date.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
af.manifest['image']['created'] = '23.05.13'
af.validate()
self.assertFalse(Aptofile.validateFile(f))
def testImageMissingFileAndGenerator(self):
f = 'tests/image_missing_file_and_generator.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.manifest['image']['data']=['image.jpg']
del af.manifest['generator']
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testImageMissingGenerator(self):
f = 'tests/image_missing_generator.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
del af.manifest['generator']
self.assertFalse(Aptofile.validateFile(f))
class testVideo(unittest.TestCase):
def testVideo(self):
f = 'tests/video.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.addVideoFile(('tests/video/video.avi','video.avi'))
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testVideoMissingFile(self):
f = 'tests/video_missing_file.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testVideoFileNotFound(self):
f = 'tests/video_file_not_found.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.manifest['video']['data']=['video.avi']
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testVideoMissingName(self):
f = 'tests/video_missing_name.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.addVideoFile(('tests/video/video.avi','video.avi'))
self.assertTrue(af.validate())
del af.manifest['video']['name']
self.assertFalse(Aptofile.validateFile(f))
class TestPoint(unittest.TestCase):
def testPoint(self):
f = 'tests/point.apt'
with Aptofile.create(f,'point') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the point.')
af.setPointName('The Point')
af.setPointDescription('This is a description of a point.')
af.setPointType('boat')
af.setPointGeometry('data:data_describing_the_point')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testPointInvalidType(self):
f = 'tests/point_invalid_type.apt'
with Aptofile.create(f,'point') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the point.')
af.setPointName('The Point')
af.setPointDescription('This is a description of a point.')
af.setPointType('boat')
af.setPointGeometry('data:data_describing_the_point')
self.assertTrue(af.validate())
af.manifest['point']['object-type'] = 'UFO'
self.assertFalse(Aptofile.validateFile(f))
def testRoute(self):
f = 'tests/route.apt'
with Aptofile.create(f,'route') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the route.')
af.setRouteName('The Route')
af.setRouteDescription('This is a description of the route.')
af.setRouteGeometry('data:data_describing_the_route')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testRouteMissingGeometry(self):
f = 'tests/route.apt'
with Aptofile.create(f,'route') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the route.')
af.setRouteName('The Route')
af.setRouteDescription('This is a description of the route.')
af.setRouteGeometry('data:data_describing_the_route')
self.assertTrue(af.validate())
del af.manifest['route']['geometry']
self.assertFalse(Aptofile.validateFile(f))
class TestArea(unittest.TestCase):
def testArea(self):
f = 'tests/area.apt'
with Aptofile.create(f,'area') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the area.')
af.setAreaName('The Point')
af.setAreaDescription('This is a description of the area.')
af.setAreaGeometry('data:data_describing_the_area')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testAreaMissingAreaDescription(self):
f = 'tests/area_missing_area_desc.apt'
with Aptofile.create(f,'area') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the area.')
af.setAreaName('The Point')
af.setAreaDescription('This is a description of a area.')
af.setAreaGeometry('data:data_describing_the_area')
self.assertTrue(af.validate())
del af.manifest['area']['description']
self.assertFalse(Aptofile.validateFile(f))
if __name__=='__main__':
unittest.main()
| bsd-3-clause | -8,490,564,460,999,139,000 | 46.543967 | 94 | 0.555422 | false |
CloudBoltSoftware/cloudbolt-forge | ui_extensions/veeam_admin_extension/restore_backup.py | 1 | 1717 | import requests
import time
from xml.dom import minidom
from common.methods import set_progress
from xui.veeam.veeam_admin import VeeamManager
def run(server, *args, **kwargs):
set_progress(f"Starting Veeam Backup restoration... ")
veeam = VeeamManager()
server_ci = veeam.get_connection_info()
url = f'http://{server_ci.ip}:9399/api/vmRestorePoints/' + \
kwargs.get('restore_point_href') + '?action=restore'
session_id = veeam.get_veeam_server_session_id()
header = {"X-RestSvcSessionId": session_id}
response = requests.post(url=url, headers=header)
task = minidom.parseString(response.content.decode('utf-8'))
items = task.getElementsByTagName('Task')[0].attributes.items()
restoration_url = [item for item in items if item[0] == 'Href'][0][-1]
def check_state():
response = requests.get(restoration_url, headers=header)
dom = minidom.parseString(response.content.decode('utf-8'))
state = dom.getElementsByTagName('State')[0]
child = state.firstChild
return child
# Wait until the restoration to completed.
while check_state().data == 'Running':
# wait
set_progress("Waiting for restoration to complete...")
time.sleep(10)
if check_state().data == 'Finished':
set_progress("Server restoration completed successfully")
return "SUCCESS", "Server restoration completed successfully", ""
else:
set_progress("Server restoration didn't complete successfully")
return "FAILURE", "", "Server restoration didn't complete successfully"
| apache-2.0 | 4,563,522,579,595,640,300 | 38.022727 | 83 | 0.630169 | false |
ArchiveTeam/spuf-grab | pipeline.py | 1 | 11245 | # encoding=utf8
import datetime
from distutils.version import StrictVersion
import hashlib
import os.path
import random
from seesaw.config import realize, NumberConfigValue
from seesaw.externalprocess import ExternalProcess
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \
UploadWithTracker, SendDoneToTracker
import shutil
import socket
import subprocess
import sys
import time
import string
import seesaw
from seesaw.externalprocess import WgetDownload
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.util import find_executable
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.8.5"):
raise Exception("This pipeline needs seesaw version 0.8.5 or higher.")
###########################################################################
# Find a useful Wget+Lua executable.
#
# WGET_LUA will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WGET_LUA = find_executable(
"Wget+Lua",
["GNU Wget 1.14.lua.20130523-9a5c", "GNU Wget 1.14.lua.20160530-955376b"],
[
"./wget-lua",
"./wget-lua-warrior",
"./wget-lua-local",
"../wget-lua",
"../../wget-lua",
"/home/warrior/wget-lua",
"/usr/bin/wget-lua"
]
)
if not WGET_LUA:
raise Exception("No usable Wget+Lua found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20170615.01"
USER_AGENT = 'ArchiveTeam'
TRACKER_ID = 'spuf'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
escaped_item_name = item_name.replace(':', '_').replace('/', '_').replace('~', '_')
dirname = "/".join((item["data_dir"], escaped_item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, escaped_item_name,
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# NEW for 2014! Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc" % item):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
LUA_SHA1 = get_hash(os.path.join(CWD, 'spuf.lua'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'lua_hash': LUA_SHA1,
'python_version': sys.version,
}
return d
class WgetArgs(object):
def realize(self, item):
wget_args = [
WGET_LUA,
"-U", USER_AGENT,
"-nv",
"--load-cookies", "cookies.txt",
#"--no-cookies",
"--lua-script", "spuf.lua",
"-o", ItemInterpolation("%(item_dir)s/wget.log"),
"--no-check-certificate",
"--output-document", ItemInterpolation("%(item_dir)s/wget.tmp"),
"--truncate-output",
"-e", "robots=off",
"--rotate-dns",
"--recursive", "--level=inf",
"--no-parent",
"--page-requisites",
"--timeout", "30",
"--tries", "inf",
"--domains", "steampowered.com",
"--span-hosts",
"--waitretry", "30",
"--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "steam-users-forum-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("steam-users-forum-item: %(item_name)s"),
]
item_name = item['item_name']
assert ':' in item_name
item_type, item_value = item_name.split(':', 1)
item['item_type'] = item_type
item['item_value'] = item_value
tries = 0
while tries < 10:
if os.path.isfile('login.php?do=login'):
os.remove('login.php?do=login')
os.system("wget --save-cookies cookies.txt --user-agent 'ArchiveTeam' --keep-session-cookies --post-data 'vb_login_username=archiveTeam&cookieuser=1&vb_login_password=&s=&securitytoken=guest&do=login&vb_login_md5password=9aa65d84012ee50e456c4e6916089636&vb_login_md5password_utf=9aa65d84012ee50e456c4e6916089636' --referer http://forums.steampowered.com/forums/ http://forums.steampowered.com/forums/login.php?do=login")
if not os.path.isfile('login.php?do=login'):
continue
with open('login.php?do=login') as f:
if 'alt="Forum Database Error"' in f.read():
continue
break
else:
raise Exception('Could not log in.')
wget_args.append('http://forums.steampowered.com/forums/showthread.php')
if item_type == 'threads':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-thread: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/showthread.php?t={i}'.format(i=i))
elif item_type == 'forums':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-forum: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/forumdisplay.php?f={i}&daysprune=-1'.format(i=i))
wget_args.append('http://forums.steampowered.com/forums/forumdisplay.php?f={i}'.format(i=i))
elif item_type == 'members':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-member: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/member.php?u={i}'.format(i=i))
else:
raise Exception('Unknown item')
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title = "Steam Users' Forum",
project_html = """
<img class="project-logo" alt="Steam Logo" src="http://archiveteam.org/images/thumb/4/48/Steam_Icon_2014.png/100px-Steam_Icon_2014.png" />
<h2>Steam Users' Forum <span class="links"><a href="http://forums.steampowered.com/forums">Website</a> · <a href="http://tracker.archiveteam.org/spuf/">Leaderboard</a></span></h2>
<p>Getting killed June 5th.</p>
""",
utc_deadline = datetime.datetime(2017, 6, 4, 23, 59, 0)
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="spuf"),
WgetDownload(
WgetArgs(),
max_tries=2,
accept_on_exit_code=[0, 4, 8],
env={
"item_dir": ItemValue("item_dir"),
"item_value": ItemValue("item_value"),
"item_type": ItemValue("item_type"),
"warc_file_base": ItemValue("warc_file_base"),
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz")
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz")
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp",
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
| unlicense | -1,336,016,031,714,418,700 | 35.868852 | 432 | 0.574033 | false |
geggo/pyface | pyface/tree/api.py | 1 | 1198 | #------------------------------------------------------------------------------
# Copyright (c) 2005-2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
from __future__ import absolute_import
from .node_event import NodeEvent
from .node_monitor import NodeMonitor
from .node_manager import NodeManager
from .node_tree import NodeTree
from .node_tree_model import NodeTreeModel
from .node_type import NodeType
from .trait_dict_node_type import TraitDictNodeType
from .trait_list_node_type import TraitListNodeType
from .tree_model import TreeModel
from traits.etsconfig.api import ETSConfig
if ETSConfig.toolkit == 'wx':
# Tree has not yet been ported to qt
from .tree import Tree
del ETSConfig
| bsd-3-clause | 3,699,413,282,578,178,600 | 36.4375 | 79 | 0.682805 | false |
Encrylize/flask-blogger | app/utils/helpers.py | 1 | 1218 | from urllib.parse import urljoin, urlparse
from flask import request
def get_or_create(model, **kwargs):
"""
Gets or creates an instance of model.
Args:
model: SQLAlchemy model
**kwargs: Model properties
Returns:
An instance of model and True if it was created, False if it was not.
"""
instance = model.query.filter_by(**kwargs).first()
if instance:
return instance, False
else:
instance = model(**kwargs)
return instance, True
def is_safe_url(target):
"""
Checks if a URL is safe.
Args:
target: The URL to check
Returns:
True if the URL is safe, False if it is not.
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http',
'https') and ref_url.netloc == test_url.netloc
def get_redirect_target():
"""
Gets a safe redirect target.
Returns:
The first safe redirect target.
"""
for target in request.args.get('next'), request.referrer:
if not target:
continue
elif is_safe_url(target):
return target
| mit | 7,296,754,981,301,055,000 | 20 | 77 | 0.591954 | false |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/aio/operations/_images_operations.py | 1 | 29335 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ImagesOperations:
"""ImagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.Image",
**kwargs: Any
) -> "_models.Image":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Image')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Image', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.Image",
**kwargs: Any
) -> AsyncLROPoller["_models.Image"]:
"""Create or update an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param parameters: Parameters supplied to the Create Image operation.
:type parameters: ~azure.mgmt.compute.v2019_12_01.models.Image
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_12_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
image_name=image_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.ImageUpdate",
**kwargs: Any
) -> "_models.Image":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ImageUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Image', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.ImageUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.Image"]:
"""Update an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param parameters: Parameters supplied to the Update Image operation.
:type parameters: ~azure.mgmt.compute.v2019_12_01.models.ImageUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_12_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
image_name=image_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
image_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
image_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an Image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
image_name=image_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def get(
self,
resource_group_name: str,
image_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Image":
"""Gets an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_12_01.models.Image
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ImageListResult"]:
"""Gets the list of images under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ImageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_12_01.models.ImageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ImageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ImageListResult"]:
"""Gets the list of Images in the subscription. Use nextLink property in the response to get the
next page of Images. Do this till nextLink is null to fetch all the Images.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ImageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_12_01.models.ImageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ImageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images'} # type: ignore
| mit | -7,266,428,311,589,017,000 | 47.407591 | 181 | 0.634907 | false |
pradyu1993/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 1 | 34415 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# License: BSD style
import numpy as np
from scipy import linalg, optimize, rand
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import array2d, check_random_state
from ..utils import deprecated
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
def solve_triangular(x, y, lower=True):
return linalg.solve(x, y)
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = array2d(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) / 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij.astype(np.int)
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood rstimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
`theta_`: array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
`reduced_likelihood_function_value_`: array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
# Run input checks
self._check_params()
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) with the observations of the
scalar output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X = array2d(X)
y = np.asarray(y).ravel()[:, np.newaxis]
# Check shapes of DOE & observations
n_samples_X, n_features = X.shape
n_samples_y = y.shape[0]
if n_samples_X != n_samples_y:
raise ValueError("X and y must have the same number of rows.")
else:
n_samples = n_samples_X
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if np.min(np.sum(D, axis=1)) == 0. \
and self.corr != correlation.pure_nugget:
raise Exception("Multiple input features cannot have the same"
" value")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
+ "likely something is going wrong with the "
+ "regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
+ "n_samples=%d must be greater than the "
+ "regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
+ "autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
+ "Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
+ "Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
+ "Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simulatneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like
An array with shape (n_eval, ) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) with the Mean Squared Error at x.
"""
# Check input shapes
X = array2d(X)
n_eval, n_features_X = X.shape
n_samples, n_features = self.X.shape
# Run input checks
self._check_params(n_samples)
if n_features_X != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
+ "should match the sample size used for fit() "
+ "which is %d.") % (n_features_X, n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
+ "at instanciation. Need to recompute "
+ "autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T)
else:
# Ordinary Kriging
u = np.zeros(y.shape)
MSE = self.sigma2 * (1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if np.min(np.sum(D, axis=1)) == 0. \
and self.corr != correlation.pure_nugget:
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
+ "of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
@deprecated("to be removed in 0.14, access ``self.theta_`` etc. directly "
" after fit.")
def arg_max_reduced_likelihood_function(self):
return self._arg_max_reduced_likelihood_function()
@property
@deprecated('``theta`` is deprecated and will be removed in 0.14, '
'please use ``theta_`` instead.')
def theta(self):
return self.theta_
@property
@deprecated("``reduced_likelihood_function_value`` is deprecated and will"
"be removed in 0.14, please use "
"``reduced_likelihood_function_value_`` instead.")
def reduced_likelihood_function_value(self):
return self.reduced_likelihood_function_value_
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print "The chosen optimizer is: " + str(self.optimizer)
if self.random_start > 1:
print str(self.random_start) + " random starts are required."
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(theta=10.
** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t: \
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t: \
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = np.log10(self.thetaL) \
+ rand(self.theta0.size).reshape(self.theta0.shape) \
* np.log10(self.thetaU / self.thetaL)
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0), constraints, iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_minus_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
optimal_rlf_value = - optimal_minus_rlf_value
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print "%s completed" % (5 * percent_completed)
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = array2d(self.theta0.min())
self.thetaL = array2d(self.thetaL.min())
self.thetaU = array2d(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print "Proceeding along dimension %d..." % (i + 1)
self.theta0 = array2d(theta_iso)
self.thetaL = array2d(thetaL[0, i])
self.thetaU = array2d(thetaU[0, i])
def corr_cut(t, d):
return corr(array2d(np.hstack([
optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i + 1)::]])), d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError(("This optimizer ('%s') is not "
+ "implemented yet. Please contribute!")
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError(("regr should be one of %s or callable, "
+ "%s was given.")
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = array2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError(("corr should be one of %s or callable, "
+ "%s was given.")
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
+ "'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = array2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = array2d(self.thetaL)
self.thetaU = array2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
+ "same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
+ "thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
+ "neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if not self.optimizer in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause | -5,101,911,511,660,186,000 | 37.366778 | 79 | 0.555165 | false |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/core/internals.py | 1 | 151884 | import copy
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalent, _maybe_convert_string_to_object,
is_categorical)
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
import pandas.core.common as com
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
len(self.values), len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if copy:
values = values.copy()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, len(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.get_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.get(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(self.values.ravel(), dtype, copy=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return [self.copy()] if copy else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(convert_numeric=False)
return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if isinstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
n = new[i] if isinstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def get_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(detail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.append(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalent(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
formatter = None
if float_format and decimal != '.':
formatter = lambda v : (float_format % v).replace('.',decimal,1)
elif decimal != '.':
formatter = lambda v : ('%g' % v).replace('.',decimal,1)
elif float_format:
formatter = lambda v : float_format % v
if formatter is None and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatter:
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (isinstance(element, (float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
elif isinstance(value, Timedelta):
value = value.value
elif isinstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif isinstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" Coerce values and other to float64, with null values converted to
NaN. values is always ndarray-like, other may not be """
def masker(v):
mask = isnull(v)
v = v.astype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
elif lib.isscalar(other):
other = np.float64(other)
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
#### FIXME ####
# should use the core.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
def get_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(self, convert_dates=True, convert_numeric=True, convert_timedeltas=True,
copy=True, by_item=True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = com._possibly_convert_objects(
values.ravel(), convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = com._possibly_convert_objects(
self.values.ravel(), convert_dates=convert_dates,
convert_numeric=convert_numeric
).reshape(self.values.shape)
blocks.append(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(convert_dates=True,
convert_numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replace):
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replace, value):
blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replace:
blk[0], = blk[0]._replace_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
result = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter,
regex=regex)
if not isinstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
@property
def shape(self):
return (len(self.mgr_locs), len(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.fillna(value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(values=values.fillna(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,len(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
isinstance(element, datetime) or
isnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
""" Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be """
values = values.view('i8')
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif isinstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
other = np.array(other, dtype='i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
return value
def fillna(self, value, limit=None,
inplace=False, downcast=None):
# straight putmask here
values = self.values if inplace else self.values.copy()
mask = isnull(self.values)
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
return [self if inplace else
make_block(values,
fastpath=True, placement=self.mgr_locs)]
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from pandas.core.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(values.view('i8').ravel(),
tz=None,
format=format,
na_rep=na_rep).reshape(values.shape)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def get_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, copy=False, fastpath=True):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return make_block(np.empty(values.shape, dtype=dtype),
placement, fastpath=True,)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return make_block(new_values, ndim=self.ndim,
fastpath=fastpath, placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = com.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.get_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' % (old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return com.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return com.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values,
mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4
and '0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(
unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = com.pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % com.pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if not block.is_sparse and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.format(len(self.items),
tot_items))
def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager integrity check
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if f == 'where' and kwargs.get('align', True):
align_copy = True
align_keys = ['other', 'cond']
elif f == 'putmask' and kwargs.get('align', True):
align_copy = False
align_keys = ['new', 'mask']
elif f == 'eval':
align_copy = False
align_keys = ['other']
elif f == 'fillna':
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k]) for k in align_keys
if hasattr(kwargs[k], 'reindex_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
applied = getattr(b, f)(**kwargs)
if isinstance(applied, list):
result_blocks.extend(applied)
else:
result_blocks.append(applied)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def isnull(self, **kwargs):
return self.apply('apply', **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
def eval(self, **kwargs):
return self.apply('eval', **kwargs)
def setitem(self, **kwargs):
return self.apply('setitem', **kwargs)
def putmask(self, **kwargs):
return self.apply('putmask', **kwargs)
def diff(self, **kwargs):
return self.apply('diff', **kwargs)
def interpolate(self, **kwargs):
return self.apply('interpolate', **kwargs)
def shift(self, **kwargs):
return self.apply('shift', **kwargs)
def fillna(self, **kwargs):
return self.apply('fillna', **kwargs)
def downcast(self, **kwargs):
return self.apply('downcast', **kwargs)
def astype(self, dtype, **kwargs):
return self.apply('astype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply('convert', **kwargs)
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replace """
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
return _possibly_compare(values, getattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replace(s, d, inplace=inplace,
regex=regex)
if isinstance(result, list):
new_rb.extend(result)
else:
new_rb.append(result)
else:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.apply('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = com.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0,
allow_fill=False)
new_blocks.append(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == 'all':
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [ copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply('copy', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].get_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workaround for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(values=vals, placement=block.mgr_locs,
klass=block.__class__, fastpath=True,)]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].values[:, loc]
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isnull(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isnull(self.items)]
# allow a single nan location indexer
if not np.isscalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc, fastpath=fastpath)
else:
if isnull(item):
raise ValueError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0, allow_dups=True)
def iget(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
if not fastpath or block.is_sparse or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager([ block.make_block_same_class(values,
placement=slice(0, len(values)),
ndim=1,
fastpath=True) ],
self.axes[1])
def get_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.get_loc(x)
for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
full_loc[0] = self._blklocs[full_loc[0]]
# FIXME: this may return non-upcasted types?
return blk.values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_sparse = isinstance(value, SparseArray)
value_is_cat = is_categorical(value)
value_is_nonconsolidatable = value_is_sparse or value_is_cat
if value_is_sparse:
# sparse
assert self.ndim == 2
def value_getitem(placement):
return value
elif value_is_cat:
# categorical
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
len(removed_blknos))
self._blknos = com.take_1d(new_blknos, self._blknos, axis=0,
allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_nonconsolidatable:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.copy(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
len(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(values=value_getitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert %s, already exists' % item)
if not isinstance(loc, int):
raise TypeError("loc must be int")
block = make_block(values=value,
ndim=self.ndim,
placement=slice(loc, loc+1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = self.items.insert(loc, item)
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, copy=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit)
return self.reindex_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, copy=copy)
def reindex_indexer(self, new_axis, indexer, axis, fill_value=None,
allow_dups=False, copy=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis,
fill_tuple=(fill_value if fill_value is not None else
blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.getitem_block(slobj,
new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = com._maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = com.take_1d(self._blknos, slobj, fill_value=-1,
allow_fill=allow_fill)
blklocs = com.take_1d(self._blklocs, slobj, fill_value=-1,
allow_fill=allow_fill)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(self._make_na_block(
placement=mgr_locs, fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's only one item
# and each mgr loc is a copy of that single item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(blk.take_nd(
blklocs[mgr_locs.indexer], axis=0,
new_mgr_locs=mgr_locs, fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64') if isinstance(indexer, slice) \
else np.asanyarray(indexer, dtype='int64')
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True)
def merge(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concat_indexes([l, r])
new_blocks = [blk.copy(deep=False)
for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.copy(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.append(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(block.equals(oblock) for block, oblock in
zip(self_blocks, other_blocks))
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not isinstance(block, Block):
block = make_block(block,
placement=slice(0, len(axis)),
ndim=1, fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, copy=True):
# if we are the same and don't copy, just return
if self.index.equals(new_axis):
if copy:
return self.copy(deep=True)
else:
return self
values = self._block.get_values()
if indexer is None:
indexer = self.items.get_indexer_for(new_axis)
if fill_value is None:
# FIXME: is fill_value used correctly in sparse blocks?
if not self._block.is_sparse:
fill_value = self._block.fill_value
else:
fill_value = np.nan
new_values = com.take_1d(values, indexer,
fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = com.interpolate_2d(new_values, method=method,
limit=limit, fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, copy=copy,
placement=slice(0, len(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.apply('convert', **kwargs)
@property
def dtype(self):
return self._values.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
@property
def values(self):
return self._values.view()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(),copy=False)
@property
def itemsize(self):
return self._values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
implied = tuple(map(int, [len(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
passed,implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement is
# basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [make_block(values=blocks[0],
placement=slice(0, len(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if isinstance(v, (SparseArray, ABCSparseSeries)):
sparse_items.append((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.append((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if hasattr(v, 'tz') and v.tz is not None:
object_items.append((i, k, v))
else:
datetime_items.append((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2 ** 63 - 1).any():
object_items.append((i, k, v))
continue
int_items.append((i, k, v))
elif v.dtype == np.bool_:
bool_items.append((i, k, v))
elif is_categorical(v):
cat_items.append((i, k, v))
else:
object_items.append((i, k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _simple_blockify(
complex_items, np.complex128)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(
datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(
bool_items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(
object_items, np.object_)
blocks.extend(object_blocks)
if len(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if len(cat_items) > 0:
cat_blocks = [ make_block(array,
klass=CategoricalBlock,
fastpath=True,
placement=[i]
) for i, names, array in cat_items ]
blocks.extend(cat_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(
list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(
array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return len(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not len(blocks):
return None
counts = defaultdict(lambda: [])
for x in blocks:
counts[type(x)].append(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = len(counts[IntBlock]) > 0
have_bool = len(counts[BoolBlock]) > 0
have_object = len(counts[ObjectBlock]) > 0
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_td64 = len(counts[TimeDeltaBlock]) > 0
have_cat = len(counts[CategoricalBlock]) > 0
have_sparse = len(counts[SparseBlock]) > 0
have_numeric = have_float or have_complex or have_int
has_non_numeric = have_dt64 or have_td64 or have_cat
if (have_object or
(have_bool and (have_numeric or have_dt64 or have_td64)) or
(have_numeric and has_non_numeric) or
have_cat or
have_dt64 or
have_td64):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
lcd = _lcd_dtype(counts[IntBlock])
kinds = set([i.dtype.kind for i in counts[IntBlock]])
if len(kinds) == 1:
return lcd
if lcd == 'uint64' or lcd == 'int64':
return np.dtype('int64')
# return 1 bigger on the itemsize if unsinged
if lcd.kind == 'u':
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
if isinstance(merged_blocks, list):
new_blocks.extend(merged_blocks)
else:
new_blocks.append(merged_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values,
fastpath=True, placement=new_mgr_locs)
# no merge
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _possibly_compare(a, b, op):
res = op(a, b)
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
if np.isscalar(res) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return res
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
from pandas.core.internals import make_block
panel_shape = (len(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and minor
# labels, for converting to panel format.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
values = values
for i in range(len(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(
np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
def _get_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = com._ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.get_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_rename = left.intersection(right)
if len(to_rename) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenamer),
_transform_index(right, rrenamer))
def _transform_index(index, func):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
"""
if isinstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new block, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
"""
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.array([n] * len(m))
elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndmin=1), len(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
nn_at = nn.astype(v.dtype)
comp = (nn == nn_at)
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
# change the dtype
dtype, _ = com._maybe_promote(n.dtype)
nv = v.astype(dtype)
try:
nv[m] = n[m]
except ValueError:
idx, = np.where(np.squeeze(m))
for mask_index, new_val in zip(idx, n[m]):
nv[mask_index] = new_val
return nv
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
"""
concat_plan = combine_concat_plans([get_mgr_concatenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers],
concat_axis)
blocks = [make_block(concatenate_join_units(join_units, concat_axis,
copy=copy),
placement=placement)
for placement, join_units in concat_plan]
return BlockManager(blocks, axes)
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
# dtypes = set()
upcast_classes = set()
null_upcast_classes = set()
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if com.is_categorical_dtype(dtype):
upcast_cls = 'category'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_null:
null_upcast_classes.add(upcast_cls)
else:
upcast_classes.add(upcast_cls)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return com.CategoricalDtype(), np.nan
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
raise AssertionError("invalid dtype determination in get_concat_dtype")
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy and concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = com._concat_compat(to_concat, axis=concat_axis)
return concat_values
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = com.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = com.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not needing to reindex its
# block: no ax0 reindexing took place and block placement was
# sequential before.
((ax0_indexer is None
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs are sequential (and
# length match is checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
class JoinUnit(object):
def __init__(self, block, shape, indexers={}):
# Passing shape explicitly is required for cases when block is None.
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self):
return '%s(%r, %s)' % (self.__class__.__name__,
self.block, self.indexers)
@cache_readonly
def needs_filling(self):
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return com._get_dtype(com._maybe_promote(self.block.dtype,
self.block.fill_value)[0])
return self._dtype
@cache_readonly
def is_null(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values_flat = self.block.values.ravel()
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isnull(values_flat[i: i + chunk_len]).all():
return False
return True
@cache_readonly
def needs_block_conversion(self):
""" we might need to convert the joined values to a suitable block repr """
block = self.block
return block is not None and (block.is_sparse or block.is_categorical)
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_null and not getattr(self.block,'is_categorical',None):
missing_arr = np.empty(self.shape, dtype=empty_dtype)
if np.prod(self.shape):
# NumPy 1.6 workaround: this statement gets strange if all
# blocks are of same dtype and some of them are empty:
# empty one are considered "null" so they must be filled,
# but no dtype upcasting happens and the dtype may not
# allow NaNs.
#
# In general, no one should get hurt when one tries to put
# incorrect values into empty array, but numpy 1.6 is
# strict about that.
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if self.block.is_categorical:
# preserve the categoricals for validation in _concat_compat
return self.block.values
elif self.block.is_sparse:
# preserve the sparse array for validation in _concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = com.take_nd(values, indexer, axis=ax,
fill_value=fill_value)
return values
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
if len(arr) == 0:
# Handle empty arr case separately: numpy 1.6 chokes on that.
return np.empty((0, 2), dtype=arr.dtype)
else:
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer,
length)
elif (isinstance(slice_or_indexer, np.ndarray) and
slice_or_indexer.dtype == np.bool_):
return 'mask', slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return 'fancy', indexer, len(indexer)
| mit | 1,726,036,148,876,224,800 | 32.963327 | 134 | 0.537957 | false |
ludbb/secp256k1-py | tests/test_schnorr.py | 1 | 1732 | import pytest
import secp256k1
def test_schnorr_simple():
if not secp256k1.HAS_SCHNORR:
pytest.skip('secp256k1_schnorr not enabled, skipping')
return
inst = secp256k1.PrivateKey()
raw_sig = inst.schnorr_sign(b'hello')
assert inst.pubkey.schnorr_verify(b'hello', raw_sig)
key2 = secp256k1.PrivateKey()
assert not key2.pubkey.schnorr_verify(b'hello', raw_sig)
blank = secp256k1.PublicKey()
pubkey = blank.schnorr_recover(b'hello', raw_sig)
pub = secp256k1.PublicKey(pubkey)
assert pub.serialize() == inst.pubkey.serialize()
def test_schnorr_partial():
if not secp256k1.HAS_SCHNORR:
pytest.skip('secp256k1_schnorr not enabled, skipping')
return
signer1 = secp256k1.PrivateKey()
pubnonce1, privnonce1 = signer1.schnorr_generate_nonce_pair(b'hello')
signer2 = secp256k1.PrivateKey()
pubnonce2, privnonce2 = signer2.schnorr_generate_nonce_pair(b'hello')
# First test partial signatures with only two signers.
partial1 = signer1.schnorr_partial_sign(b'hello', privnonce1, pubnonce2)
partial2 = signer2.schnorr_partial_sign(b'hello', privnonce2, pubnonce1)
blank = secp256k1.PublicKey(flags=secp256k1.NO_FLAGS)
sig = blank.schnorr_partial_combine([partial1, partial2])
# Recover the public key from the combined signature.
pubkey = secp256k1.PublicKey().schnorr_recover(b'hello', sig)
assert blank.public_key is None
# Check that the combined public keys from signer1 and signer2
# match the recovered public key.
blank.combine(
[signer1.pubkey.public_key, signer2.pubkey.public_key])
assert blank.public_key
assert secp256k1.PublicKey(pubkey).serialize() == blank.serialize()
| mit | 3,662,207,017,736,819,700 | 35.083333 | 76 | 0.711894 | false |
Youwotma/portia | slybot/slybot/pageactions.py | 1 | 1528 | import json
import re
LUA_SOURCE = """
function main(splash)
assert(splash:go(splash.args.url))
assert(splash:runjs(splash.args.js_source))
assert(splash:wait_for_resume(splash.args.slybot_actions_source))
splash:set_result_content_type("text/html")
return splash.html()
end
"""
JS_SOURCE = """
function main(splash) {
var events = (%s);
try{
__slybot__performEvents(events, function(){
splash.resume();
});
}catch(e){
splash.error(e);
}
}
"""
def filter_for_url(url):
def _filter(page_action):
accept = page_action.get('accept')
reject = page_action.get('reject')
if reject and re.search(reject, url):
return False
if accept and not re.search(accept, url):
return False
return True
return _filter
class PageActionsMiddleware(object):
def process_request(self, request, spider):
splash_options = request.meta.get('splash', None)
if not splash_options: # Already processed or JS disabled
return
splash_args = splash_options.get('args', {})
events = spider.page_actions
url = splash_args['url']
events = filter(filter_for_url(url), events)
if len(events):
splash_options['endpoint'] = 'execute'
splash_args.update({
"lua_source": LUA_SOURCE,
"slybot_actions_source": (JS_SOURCE % json.dumps(events)),
})
__all__ = ['PageActionsMiddleware']
| bsd-3-clause | -9,078,013,978,702,002,000 | 26.781818 | 74 | 0.590314 | false |
Erotemic/utool | utool/util_decor.py | 1 | 35459 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import builtins
import inspect
import textwrap
import six
import sys
import functools
import os
from utool import util_print
from utool import util_time
from utool import util_iter
from utool import util_dbg
from utool import util_arg
from utool import util_type
from utool import util_inject
from utool._internal import meta_util_six
(print, rrr, profile) = util_inject.inject2(__name__, '[decor]')
if util_type.HAVE_NUMPY:
import numpy as np
# Commandline to toggle certain convinience decorators
SIG_PRESERVE = util_arg.get_argflag('--sigpreserve')
#SIG_PRESERVE = not util_arg.SAFE or util_arg.get_argflag('--sigpreserve')
ONEX_REPORT_INPUT = '--onex-report-input' in sys.argv
#IGNORE_TRACEBACK = '--smalltb' in sys.argv or '--ignoretb' in sys.argv
# FIXME: dupliated in _internal/py2_syntax_funcs
IGNORE_TRACEBACK = not ('--nosmalltb' in sys.argv or '--noignoretb' in sys.argv)
#if util_arg.STRICT:
# IGNORE_TRACEBACK = False
# do not ignore traceback when profiling
PROFILING = hasattr(builtins, 'profile')
UNIQUE_NUMPY = True
NOINDENT_DECOR = False
#os.environ.get('UTOOL_AUTOGEN_SPHINX_RUNNING', 'OFF')
#def composed(*decs):
# """ combines multiple decorators """
# def deco(f):
# for dec in reversed(decs):
# f = dec(f)
# return f
# return deco
def test_ignore_exec_traceback():
r"""
CommandLine:
python -m utool.util_decor --test-test_ignore_exec_traceback
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_decor import * # NOQA
>>> result = test_ignore_exec_traceback()
>>> print(result)
"""
import utool as ut
@ut.indent_func
def foobar():
print('foobar')
raise AssertionError('This error is exepcted')
try:
print('printing foobar')
foobar()
except AssertionError as ex:
#import sys
#exc_type, exc_value, exc_traceback = sys.exc_info()
#print(exc_traceback)
# TODO: ensure decorators are not printed in stack trace
ut.printex(ex, 'There is no error. This is a test', tb=True)
if six.PY2:
# Use version that has special python2 only syntax.
# can not include it here for that reason
from utool._internal import py2_syntax_funcs
ignores_exc_tb = py2_syntax_funcs.ignores_exc_tb
else:
def ignores_exc_tb(*args, **kwargs):
"""
PYTHON 3 VERSION
ignore_exc_tb decorates a function and remove both itself
and the function from any exception traceback that occurs.
This is useful to decorate other trivial decorators
which are polluting your stacktrace.
if IGNORE_TRACEBACK is False then this decorator does nothing
(and it should do nothing in production code!)
References:
https://github.com/jcrocholl/pep8/issues/34 # NOQA
http://legacy.python.org/dev/peps/pep-3109/
"""
outer_wrapper = kwargs.get('outer_wrapper', True)
def ignores_exc_tb_closure(func):
# HACK JUST TURN THIS OFF
return func
if not IGNORE_TRACEBACK:
# if the global enforces that we should not ignore anytracebacks
# then just return the original function without any modifcation
return func
#@wraps(func)
def wrp_noexectb(*args, **kwargs):
try:
#import utool
#if utool.DEBUG:
# print('[IN IGNORETB] args=%r' % (args,))
# print('[IN IGNORETB] kwargs=%r' % (kwargs,))
return func(*args, **kwargs)
except Exception:
# PYTHON 3.3 NEW METHODS
exc_type, exc_value, exc_traceback = sys.exc_info()
# Code to remove this decorator from traceback
# Remove two levels to remove this one as well
exc_type, exc_value, exc_traceback = sys.exc_info()
try:
exc_traceback = exc_traceback.tb_next
exc_traceback = exc_traceback.tb_next
except Exception:
pass
ex = exc_type(exc_value)
ex.__traceback__ = exc_traceback
raise ex
if outer_wrapper:
wrp_noexectb = preserve_sig(wrp_noexectb, func)
return wrp_noexectb
if len(args) == 1:
# called with one arg means its a function call
func = args[0]
return ignores_exc_tb_closure(func)
else:
# called with no args means kwargs as specified
return ignores_exc_tb_closure
# NEW PYTHON 2.7/3 VERSION
#def ignores_exc_tb(*args, **kwargs):
# """
# ignore_exc_tb decorates a function and remove both itself
# and the function from any exception traceback that occurs.
# This is useful to decorate other trivial decorators
# which are polluting your stacktrace.
# if IGNORE_TRACEBACK is False then this decorator does nothing
# (and it should do nothing in production code!)
# References:
# https://github.com/jcrocholl/pep8/issues/34 # NOQA
# http://legacy.python.org/dev/peps/pep-3109/
# """
# outer_wrapper = kwargs.get('outer_wrapper', True)
# def ignores_exc_tb_closure(func):
# if not IGNORE_TRACEBACK:
# # if the global enforces that we should not ignore anytracebacks
# # then just return the original function without any modifcation
# return func
# if six.PY2:
# #python2_func = """
# common_wrp_noexcept_tb = """
# def wrp_noexectb(*args, **kwargs):
# try:
# return func(*args, **kwargs)
# except Exception:
# exc_type, exc_value, exc_traceback = sys.exc_info()
# # Code to remove this decorator from traceback
# # Remove two levels to remove this one as well
# exc_type, exc_value, exc_traceback = sys.exc_info()
# try:
# exc_traceback = exc_traceback.tb_next
# exc_traceback = exc_traceback.tb_next
# except Exception:
# pass
# """
# if six.PY2:
# python2_reraise = """
# raise exc_type, exc_value, exc_traceback
# """
# six_reraise = python2_reraise
# elif six.PY3:
# python3_reraise = """
# ex = exc_type(exc_value)
# ex.__traceback__ = exc_traceback
# raise ex
# """
# six_reraise = python3_reraise
# wrp_noexcept_tb_codeblock = common_wrp_noexcept_tb + six_reraise
# globals_ = globals()
# locals_ = locals()
# six.exec_(wrp_noexcept_tb_codeblock, globals_, locals_)
# wrp_noexectb = locals_['wrp_noexectb']
# if outer_wrapper:
# wrp_noexectb = preserve_sig(wrp_noexectb, func)
# return wrp_noexectb
# if len(args) == 1:
# # called with one arg means its a function call
# func = args[0]
# return ignores_exc_tb_closure(func)
# else:
# # called with no args means kwargs as specified
# return ignores_exc_tb_closure
def on_exception_report_input(func_=None, force=False, keys=None):
"""
If an error is thrown in the scope of this function's stack frame then the
decorated function name and the arguments passed to it will be printed to
the utool print function.
"""
def _closure_onexceptreport(func):
if not ONEX_REPORT_INPUT and not force:
return func
@ignores_exc_tb(outer_wrapper=False)
#@wraps(func)
def wrp_onexceptreport(*args, **kwargs):
try:
#import utool
#if utool.DEBUG:
# print('[IN EXCPRPT] args=%r' % (args,))
# print('[IN EXCPRPT] kwargs=%r' % (kwargs,))
return func(*args, **kwargs)
except Exception as ex:
from utool import util_str
print('ERROR occured! Reporting input to function')
if keys is not None:
from utool import util_inspect
from utool import util_list
from utool import util_dict
argspec = util_inspect.get_func_argspec(func)
in_kwargs_flags = [key in kwargs for key in keys]
kwarg_keys = util_list.compress(keys, in_kwargs_flags)
kwarg_vals = [kwargs.get(key) for key in kwarg_keys]
flags = util_list.not_list(in_kwargs_flags)
arg_keys = util_list.compress(keys, flags)
arg_idxs = [argspec.args.index(key) for key in arg_keys]
num_nodefault = len(argspec.args) - len(argspec.defaults)
default_vals = (([None] * (num_nodefault)) +
list(argspec.defaults))
args_ = list(args) + default_vals[len(args) + 1:]
arg_vals = util_list.take(args_, arg_idxs)
requested_dict = dict(util_list.flatten(
[zip(kwarg_keys, kwarg_vals), zip(arg_keys, arg_vals)]))
print('input dict = ' + util_str.repr4(
util_dict.dict_subset(requested_dict, keys)))
# (print out specific keys only)
pass
arg_strs = ', '.join([repr(util_str.truncate_str(str(arg)))
for arg in args])
kwarg_strs = ', '.join([
util_str.truncate_str('%s=%r' % (key, val))
for key, val in six.iteritems(kwargs)])
msg = ('\nERROR: funcname=%r,\n * args=%s,\n * kwargs=%r\n' % (
meta_util_six.get_funcname(func), arg_strs, kwarg_strs))
msg += ' * len(args) = %r\n' % len(args)
msg += ' * len(kwargs) = %r\n' % len(kwargs)
util_dbg.printex(ex, msg, pad_stdout=True)
raise
wrp_onexceptreport = preserve_sig(wrp_onexceptreport, func)
return wrp_onexceptreport
if func_ is None:
return _closure_onexceptreport
else:
return _closure_onexceptreport(func_)
def debug_function_exceptions(func):
def _wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as ex:
import utool as ut
ut.printex(ex)
import inspect # NOQA
trace = inspect.trace()
locals_ = trace[-1][0].f_locals
print('-- <TRACE LOCALS> --')
for level, t in enumerate(trace[1:]):
frame = t[0]
locals_ = frame.f_locals
local_repr_dict = {key: ut.trunc_repr(val)
for key, val in locals_.items()}
print('LOCALS LEVEL %d' % (level,))
print(ut.repr3(local_repr_dict, strvals=True, nl=1))
print('-- </TRACE LOCALS> --')
#import utool
#utool.embed()
raise
return _wrapper
#class DebugContext(object):
# def __enter__():
# pass
# def __exit__(self, exc_type, exc_value, exc_traceback):
# pass
def _indent_decor(lbl):
"""
does the actual work of indent_func
"""
def closure_indent(func):
if util_arg.TRACE:
@ignores_exc_tb(outer_wrapper=False)
#@wraps(func)
def wrp_indent(*args, **kwargs):
with util_print.Indenter(lbl):
print(' ...trace[in]')
ret = func(*args, **kwargs)
print(' ...trace[out]')
return ret
else:
@ignores_exc_tb(outer_wrapper=False)
#@wraps(func)
def wrp_indent(*args, **kwargs):
with util_print.Indenter(lbl):
ret = func(*args, **kwargs)
return ret
wrp_indent_ = ignores_exc_tb(wrp_indent)
wrp_indent_ = preserve_sig(wrp_indent, func)
return wrp_indent_
return closure_indent
def indent_func(input_):
"""
Takes either no arguments or an alias label
"""
if isinstance(input_, six.string_types):
# A label was specified
lbl = input_
return _indent_decor(lbl)
elif isinstance(input_, (bool, tuple)):
# Allow individually turning of of this decorator
func = input_
return func
else:
# Use the function name as the label
func = input_
lbl = '[' + meta_util_six.get_funcname(func) + ']'
return _indent_decor(lbl)(func)
def tracefunc_xml(func):
"""
Causes output of function to be printed in an XML style block
"""
funcname = meta_util_six.get_funcname(func)
def wrp_tracefunc2(*args, **kwargs):
verbose = kwargs.get('verbose', True)
if verbose:
print('<%s>' % (funcname,))
with util_print.Indenter(' '):
ret = func(*args, **kwargs)
if verbose:
print('</%s>' % (funcname,))
return ret
wrp_tracefunc2_ = ignores_exc_tb(wrp_tracefunc2)
wrp_tracefunc2_ = preserve_sig(wrp_tracefunc2_, func)
return wrp_tracefunc2_
#----------
def accepts_scalar_input(func):
"""
DEPRICATE in favor of accepts_scalar_input2
only accepts one input as vector
accepts_scalar_input is a decorator which expects to be used on class
methods. It lets the user pass either a vector or a scalar to a function,
as long as the function treats everything like a vector. Input and output
is sanitized to the user expected format on return.
Args:
func (func):
Returns:
func: wrp_asi
CommandLine:
python -m utool.util_decor --test-accepts_scalar_input
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_decor import * # NOQA
>>> @accepts_scalar_input
... def foobar(self, list_):
... return [x + 1 for x in list_]
>>> self = None # dummy self because this decorator is for classes
>>> assert 2 == foobar(self, 1)
>>> assert [2, 3] == foobar(self, [1, 2])
"""
#@on_exception_report_input
@ignores_exc_tb(outer_wrapper=False)
#@wraps(func)
def wrp_asi(self, input_, *args, **kwargs):
#if HAVE_PANDAS:
# if isinstance(input_, (pd.DataFrame, pd.Series)):
# input_ = input_.values
if util_iter.isiterable(input_):
# If input is already iterable do default behavior
return func(self, input_, *args, **kwargs)
else:
# If input is scalar, wrap input, execute, and unpack result
#ret = func(self, (input_,), *args, **kwargs)
ret = func(self, [input_], *args, **kwargs)
if ret is not None:
return ret[0]
wrp_asi = preserve_sig(wrp_asi, func)
return wrp_asi
def accepts_scalar_input2(argx_list=[0], outer_wrapper=True):
r"""
FIXME: change to better name. Complete implementation.
used in IBEIS setters
accepts_scalar_input2 is a decorator which expects to be used on class
methods. It lets the user pass either a vector or a scalar to a function,
as long as the function treats everything like a vector. Input and output
is sanitized to the user expected format on return.
Args:
argx_list (list): indexes of args that could be passed in as scalars to
code that operates on lists. Ensures that decorated function gets
the argument as an iterable.
"""
assert isinstance(argx_list, (list, tuple)), (
'accepts_scalar_input2 must be called with argument positions')
def closure_asi2(func):
#@on_exception_report_input
@ignores_exc_tb(outer_wrapper=False)
def wrp_asi2(self, *args, **kwargs):
# Hack in case wrapping a function with varargs
argx_list_ = [argx for argx in argx_list if argx < len(args)]
__assert_param_consistency(args, argx_list_)
if all([util_iter.isiterable(args[ix]) for ix in argx_list_]):
# If input is already iterable do default behavior
return func(self, *args, **kwargs)
else:
# If input is scalar, wrap input, execute, and unpack result
args_wrapped = [(arg,) if ix in argx_list_ else arg
for ix, arg in enumerate(args)]
ret = func(self, *args_wrapped, **kwargs)
if ret is not None:
return ret[0]
if outer_wrapper:
wrp_asi2 = on_exception_report_input(preserve_sig(wrp_asi2, func))
return wrp_asi2
return closure_asi2
def __assert_param_consistency(args, argx_list_):
"""
debugging function for accepts_scalar_input2
checks to make sure all the iterable inputs are of the same length
"""
if util_arg.NO_ASSERTS:
return
if len(argx_list_) == 0:
return True
argx_flags = [util_iter.isiterable(args[argx]) for argx in argx_list_]
try:
assert all([argx_flags[0] == flag for flag in argx_flags]), (
'invalid mixing of iterable and scalar inputs')
except AssertionError as ex:
print('!!! ASSERTION ERROR IN UTIL_DECOR !!!')
for argx in argx_list_:
print('[util_decor] args[%d] = %r' % (argx, args[argx]))
raise ex
def accepts_scalar_input_vector_output(func):
"""
DEPRICATE IN FAVOR OF accepts_scalar_input2
accepts_scalar_input_vector_output
Notes:
Input: Excpeted Output 1to1 Expected Output 1toM
scalar : 1 x [X]
n element list : [1, 2, 3] [x, y, z] [[X], [Y], [Z]]
1 element list : [1] [x] [[X]]
0 element list : [] [] []
There seems to be no real issue here, I be the thing that tripped me up
was when using sql and getting multiple columns that returned the
values inside of the N-tuple whereas when you get one column you get
one element inside of a 1-tuple, no that still makes sense. There was
something where when you couln't unpack it becuase it was already
empty...
"""
@ignores_exc_tb(outer_wrapper=False)
#@wraps(func)
def wrp_asivo(self, input_, *args, **kwargs):
#import utool
#if utool.DEBUG:
# print('[IN SIVO] args=%r' % (args,))
# print('[IN SIVO] kwargs=%r' % (kwargs,))
if util_iter.isiterable(input_):
# If input is already iterable do default behavior
return func(self, input_, *args, **kwargs)
else:
# If input is scalar, wrap input, execute, and unpack result
result = func(self, (input_,), *args, **kwargs)
# The output length could be 0 on a scalar input
if len(result) == 0:
return []
else:
assert len(result) == 1, 'error in asivo'
return result[0]
return wrp_asivo
# TODO: Rename to listget_1to1 1toM etc...
getter_1to1 = accepts_scalar_input
getter_1toM = accepts_scalar_input_vector_output
#----------
def accepts_numpy(func):
""" Allows the first input to be a numpy array and get result in numpy form """
#@ignores_exc_tb
#@wraps(func)
def wrp_accepts_numpy(self, input_, *args, **kwargs):
if not (util_type.HAVE_NUMPY and isinstance(input_, np.ndarray)):
# If the input is not numpy, just call the function
return func(self, input_, *args, **kwargs)
else:
# TODO: use a variant of util_list.unflat_unique_rowid_map
# If the input is a numpy array, and return the output with the same
# shape as the input
if UNIQUE_NUMPY:
# Remove redundant input (because we are passing it to SQL)
input_list, inverse_unique = np.unique(input_, return_inverse=True)
else:
input_list = input_.flatten()
# Call the function in list format
# TODO: is this necessary?
input_list = input_list.tolist()
output_list = func(self, input_list, *args, **kwargs)
# Put the output back into numpy
if UNIQUE_NUMPY:
# Reconstruct redundant queries
output_arr = np.array(output_list)[inverse_unique]
output_shape = tuple(list(input_.shape) + list(output_arr.shape[1:]))
return np.array(output_arr).reshape(output_shape)
else:
return np.array(output_list).reshape(input_.shape)
wrp_accepts_numpy = preserve_sig(wrp_accepts_numpy, func)
return wrp_accepts_numpy
def memoize_nonzero(func):
"""
Memoization decorator for functions taking a nonzero number of arguments.
References:
http://code.activestate.com/recipes/578231-fastest-memoization-decorator
"""
class _memorizer(dict):
def __init__(self, func):
self.func = func
def __call__(self, *args):
return self[args]
def __missing__(self, key):
ret = self[key] = self.func(*key)
return ret
return _memorizer(func)
def memoize_single(func):
""" Memoization decorator for a function taking a single argument
References:
http://code.activestate.com/recipes/578231-fastest-memoization-decorator
"""
class memodict_single(dict):
def __missing__(self, key):
ret = self[key] = func(key)
return ret
return memodict_single().__getitem__
def memoize_zero(func):
""" Memoization decorator for a function taking no arguments """
wrp_memoize_single = memoize_single(func)
def wrp_memoize_zero():
return wrp_memoize_single(None)
return wrp_memoize_zero
def memoize(func):
"""
simple memoization decorator
References:
https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
Args:
func (function): live python function
Returns:
func:
CommandLine:
python -m utool.util_decor memoize
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_decor import * # NOQA
>>> import utool as ut
>>> closure = {'a': 'b', 'c': 'd'}
>>> incr = [0]
>>> def foo(key):
>>> value = closure[key]
>>> incr[0] += 1
>>> return value
>>> foo_memo = memoize(foo)
>>> assert foo('a') == 'b' and foo('c') == 'd'
>>> assert incr[0] == 2
>>> print('Call memoized version')
>>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'
>>> assert incr[0] == 4
>>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'
>>> print('Counter should no longer increase')
>>> assert incr[0] == 4
>>> print('Closure changes result without memoization')
>>> closure = {'a': 0, 'c': 1}
>>> assert foo('a') == 0 and foo('c') == 1
>>> assert incr[0] == 6
>>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'
"""
cache = func._util_decor_memoize_cache = {}
# @functools.wraps(func)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
memoizer = preserve_sig(memoizer, func)
memoizer.cache = cache
return memoizer
def interested(func):
@indent_func
#@ignores_exc_tb
#@wraps(func)
def wrp_interested(*args, **kwargs):
sys.stdout.write('#\n')
sys.stdout.write('#\n')
sys.stdout.write(
'<!INTERESTED>: ' + meta_util_six.get_funcname(func) + '\n')
print('INTERESTING... ' + (' ' * 30) + ' <----')
return func(*args, **kwargs)
return wrp_interested
def tracefunc(func):
lbl = '[trace.' + meta_util_six.get_funcname(func) + ']'
def wrp_tracefunc(*args, **kwargs):
print(lbl + ' +--- ENTER ---')
with util_print.Indenter(lbl + ' |'):
ret = func(*args, **kwargs)
print(lbl + ' L___ EXIT ____')
return ret
return wrp_tracefunc
def show_return_value(func):
from utool.util_str import func_str
#@wraps(func)
def wrp_show_return_value(*args, **kwargs):
ret = func(*args, **kwargs)
#print('%s(*%r, **%r) returns %r' % (meta_util_six.get_funcname(func), args, kwargs, rv))
print(func_str(func, args, kwargs) + ' -> ret=%r' % (ret,))
return ret
return wrp_show_return_value
def time_func(func):
#@wraps(func)
def wrp_time(*args, **kwargs):
with util_time.Timer(meta_util_six.get_funcname(func)):
return func(*args, **kwargs)
wrp_time = preserve_sig(wrp_time, func)
return wrp_time
#def rename_func(newname):
# import utool as ut
# return ut.partial(ut.set_funcname, newname=newname)
#class copy_argspec(object):
# """
# copy_argspec is a signature modifying decorator.
# Specifically, it copies the signature from `source_func` to the wrapper, and
# the wrapper will call the original function (which should be using *args,
# **kwds). The argspec, docstring, and default values are copied from
# src_func, and __module__ and __dict__ from tgt_func.
# .. References
# http://stackoverflow.com/questions/18625510/how-can-i-programmatically-change-the-argspec-of-a-function-not-in-a-python-de
# """
# def __init__(self, src_func):
# self.argspec = inspect.getargspec(src_func)
# self.src_doc = src_func.__doc__
# self.src_defaults = src_func.func_defaults
# def __call__(self, tgt_func):
# try:
# tgt_argspec = inspect.getargspec(tgt_func)
# need_self = False
# if len(tgt_argspec) > 0 and len(tgt_argspec[0]) > 0 and tgt_argspec[0][0] == 'self':
# need_self = True
# name = tgt_func.__name__
# argspec = self.argspec
# if len(argspec) > 0 and len(argspec[0]) > 0 and argspec[0][0] == 'self':
# need_self = False
# if need_self:
# newargspec = (['self'] + argspec[0],) + argspec[1:]
# else:
# newargspec = argspec
# signature = inspect.formatargspec(formatvalue=lambda val: "",
# *newargspec)[1:-1]
# new_func = (
# 'def _wrapper_({signature}):\n'
# ' return {tgt_func}({signature})'
# ).format(signature=signature, tgt_func='tgt_func')
# evaldict = {'tgt_func' : tgt_func}
# exec new_func in evaldict
# wrapped = evaldict['_wrapper_']
# wrapped.__name__ = name
# wrapped.__doc__ = self.src_doc
# wrapped.func_defaults = self.src_defaults
# wrapped.__module__ = tgt_func.__module__
# wrapped.__dict__ = tgt_func.__dict__
# return wrapped
# except Exception as ex:
# util_dbg.printex(ex, 'error wrapping: %r' % (tgt_func,))
# raise
def lazyfunc(func):
"""
Returns a memcached version of a function
"""
closuremem_ = [{}]
def wrapper(*args, **kwargs):
mem = closuremem_[0]
key = (repr(args), repr(kwargs))
try:
return mem[key]
except KeyError:
mem[key] = func(*args, **kwargs)
return mem[key]
return wrapper
def apply_docstr(docstr_func):
"""
Changes docstr of one functio to that of another
"""
def docstr_applier(func):
#docstr = meta_util_six.get_funcdoc(docstr_func)
#meta_util_six.set_funcdoc(func, docstr)
if isinstance(docstr_func, six.string_types):
olddoc = meta_util_six.get_funcdoc(func)
if olddoc is None:
olddoc = ''
newdoc = olddoc + docstr_func
meta_util_six.set_funcdoc(func, newdoc)
return func
else:
preserved_func = preserve_sig(func, docstr_func)
return preserved_func
return docstr_applier
def preserve_sig(wrapper, orig_func, force=False):
"""
Decorates a wrapper function.
It seems impossible to presever signatures in python 2 without eval
(Maybe another option is to write to a temporary module?)
Args:
wrapper: the function wrapping orig_func to change the signature of
orig_func: the original function to take the signature from
References:
http://emptysqua.re/blog/copying-a-python-functions-signature/
https://code.google.com/p/micheles/source/browse/decorator/src/decorator.py
TODO:
checkout funcsigs
https://funcsigs.readthedocs.org/en/latest/
CommandLine:
python -m utool.util_decor --test-preserve_sig
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> #ut.rrrr(False)
>>> def myfunction(self, listinput_, arg1, *args, **kwargs):
>>> " just a test function "
>>> return [x + 1 for x in listinput_]
>>> #orig_func = ut.take
>>> orig_func = myfunction
>>> wrapper = ut.accepts_scalar_input2([0])(orig_func)
>>> _wrp_preserve1 = ut.preserve_sig(wrapper, orig_func, True)
>>> _wrp_preserve2 = ut.preserve_sig(wrapper, orig_func, False)
>>> print('_wrp_preserve2 = %r' % (_wrp_preserve1,))
>>> print('_wrp_preserve2 = %r' % (_wrp_preserve2,))
>>> #print('source _wrp_preserve1 = %s' % (ut.get_func_sourcecode(_wrp_preserve1),))
>>> #print('source _wrp_preserve2 = %s' % (ut.get_func_sourcecode(_wrp_preserve2)),)
>>> result = str(_wrp_preserve1)
>>> print(result)
"""
#if True:
# import functools
# return functools.wraps(orig_func)(wrapper)
from utool._internal import meta_util_six
from utool import util_str
from utool import util_inspect
if wrapper is orig_func:
# nothing to do
return orig_func
orig_docstr = meta_util_six.get_funcdoc(orig_func)
orig_docstr = '' if orig_docstr is None else orig_docstr
orig_argspec = util_inspect.get_func_argspec(orig_func)
wrap_name = meta_util_six.get_funccode(wrapper).co_name
orig_name = meta_util_six.get_funcname(orig_func)
# At the very least preserve info in a dictionary
_utinfo = {}
_utinfo['orig_func'] = orig_func
_utinfo['wrap_name'] = wrap_name
_utinfo['orig_name'] = orig_name
_utinfo['orig_argspec'] = orig_argspec
if hasattr(wrapper, '_utinfo'):
parent_wrapper_utinfo = wrapper._utinfo
_utinfo['parent_wrapper_utinfo'] = parent_wrapper_utinfo
if hasattr(orig_func, '_utinfo'):
parent_orig_utinfo = orig_func._utinfo
_utinfo['parent_orig_utinfo'] = parent_orig_utinfo
# environment variable is set if you are building documentation
# preserve sig if building docs
building_docs = os.environ.get('UTOOL_AUTOGEN_SPHINX_RUNNING', 'OFF') == 'ON'
if force or SIG_PRESERVE or building_docs:
# PRESERVES ALL SIGNATURES WITH EXECS
src_fmt = r'''
def _wrp_preserve{defsig}:
""" {orig_docstr} """
try:
return wrapper{callsig}
except Exception as ex:
import utool as ut
msg = ('Failure in signature preserving wrapper:\n')
ut.printex(ex, msg)
raise
'''
# Put wrapped function into a scope
globals_ = {'wrapper': wrapper}
locals_ = {}
# argspec is :ArgSpec(args=['bar', 'baz'], varargs=None, keywords=None,
# defaults=(True,))
# get orig functions argspec
# get functions signature
# Get function call signature (no defaults)
# Define an exec function
argspec = inspect.getargspec(orig_func)
(args, varargs, varkw, defaults) = argspec
defsig = inspect.formatargspec(*argspec)
callsig = inspect.formatargspec(*argspec[0:3])
# TODO:
# ut.func_defsig
# ut.func_callsig
src_fmtdict = dict(defsig=defsig, callsig=callsig, orig_docstr=orig_docstr)
src = textwrap.dedent(src_fmt).format(**src_fmtdict)
# Define the new function on the fly
# (I wish there was a non exec / eval way to do this)
#print(src)
code = compile(src, '<string>', 'exec')
six.exec_(code, globals_, locals_)
#six.exec_(src, globals_, locals_)
# Use functools.update_wapper to complete preservation
_wrp_preserve = functools.update_wrapper(locals_['_wrp_preserve'], orig_func)
# Keep debug info
_utinfo['src'] = src
# Set an internal sig variable that we may use
#_wrp_preserve.__sig__ = defsig
else:
# PRESERVES SOME SIGNATURES NO EXEC
# signature preservation is turned off. just preserve the name.
# Does not use any exec or eval statments.
_wrp_preserve = functools.update_wrapper(wrapper, orig_func)
# Just do something to preserve signature
DEBUG_WRAPPED_DOCSTRING = False
if DEBUG_WRAPPED_DOCSTRING:
new_docstr_fmtstr = util_str.codeblock(
'''
Wrapped function {wrap_name}({orig_name})
orig_argspec = {orig_argspec}
orig_docstr = {orig_docstr}
'''
)
else:
new_docstr_fmtstr = util_str.codeblock(
'''
{orig_docstr}
'''
)
new_docstr = new_docstr_fmtstr.format(
wrap_name=wrap_name, orig_name=orig_name, orig_docstr=orig_docstr,
orig_argspec=orig_argspec)
meta_util_six.set_funcdoc(_wrp_preserve, new_docstr)
_wrp_preserve._utinfo = _utinfo
return _wrp_preserve
def dummy_args_decor(*args, **kwargs):
def dummy_args_closure(func):
return func
return dummy_args_closure
class classproperty(property):
"""
Decorates a method turning it into a classattribute
References:
https://stackoverflow.com/questions/1697501/python-staticmethod-with-property
"""
def __get__(self, cls, owner):
return classmethod(self.fget).__get__(None, owner)()
if __name__ == '__main__':
"""
CommandLine:
python -c "import utool, utool.util_decor; utool.doctest_funcs(utool.util_decor)"
python -m utool.util_decor
python -m utool.util_decor --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 | 495,489,549,317,991,400 | 35.745078 | 127 | 0.564145 | false |
jimmycallin/master-thesis | architectures/nn_discourse_parser/nets/data_reader.py | 1 | 6857 | import json
import codecs
class DRelation(object):
"""Implicit discourse relation object
The object is created from the CoNLL-json formatted data.
The format can be a bit clunky to get certain information.
So convenient methods should be implemented here mostly to be used
by the feature functions
"""
def __init__(self, relation_dict, parse):
self.relation_dict = relation_dict
self.parse = parse
self._arg_tokens = {}
self._arg_tokens[1] = None
self._arg_tokens[2] = None
self._arg_words = {}
self._arg_words[1] = None
self._arg_words[2] = None
self._arg_tree = {}
self._arg_tree[1] = None
self._arg_tree[2] = None
self._arg1_tree = None
self._arg1_tree_token_indices = None
self._arg2_tree = None
self._arg2_tree_token_indices = None
@property
def senses(self):
return self.relation_dict['Sense']
def arg_words(self, arg_pos):
"""Returns a list of Word objects"""
assert(arg_pos == 1 or arg_pos == 2)
if self._arg_words[arg_pos] is None:
key = 'Arg%s' % arg_pos
word_list = self.relation_dict[key]['TokenList']
self._arg_words[arg_pos] = [Word(x, self.parse[self.doc_id]) for x in word_list]
return self._arg_words[arg_pos]
def arg_tree(self, arg_pos):
"""Extract the tree for the argument
One tree only. Truncated as needed
Returns:
1) tree string
2) token indices (not address tuples) of that tree.
"""
assert(arg_pos == 1 or arg_pos == 2)
if self._arg_tree[arg_pos] is None:
trees, sentence_indices = self.arg_trees(arg_pos)
if arg_pos == 1:
tree = trees[-1]
sentence_index = sentence_indices[-1]
elif arg_pos == 2:
tree = trees[0]
sentence_index = sentence_indices[0]
key = 'Arg%s' % arg_pos
token_indices = [x[4] for x in self.relation_dict[key]['TokenList'] if x[3] == sentence_index]
self._arg_tree[arg_pos] = (tree, token_indices)
return self._arg_tree[arg_pos]
def arg_dtree_rule_list(self, arg_pos):
"""Returns a list of arcs in the dependency tree(s) for the arg """
assert(arg_pos == 1 or arg_pos == 2)
token_list = self.arg_token_addresses(arg_pos)
sentence_indices = set([x[3] for x in token_list])
sentence_index_to_dependency_tree = {}
for sentence_index in sentence_indices:
dependencies = \
self.parse[self.doc_id]['sentences'][sentence_index]['dependencies']
index_to_dependency = {}
# a dependency looks like this [u'prep', u'reported-8', u'In-1']
for dep in dependencies:
rel_type = dep[0]
head, _ = dep[1].rsplit('-', 1)
dependent, index = dep[2].rsplit('-', 1)
index_to_dependency[int(index)] = [rel_type, head, dependent]
sentence_index_to_dependency_tree[sentence_index] = index_to_dependency
rule_list = []
for token_address in token_list:
_, _, _, sentence_index, token_index = token_address
dtree = sentence_index_to_dependency_tree[sentence_index]
if token_index in dtree:
rule_list.append('_'.join(dtree[token_index]))
return rule_list
def arg_token_addresses(self, arg_pos):
assert(arg_pos == 1 or arg_pos == 2)
key = 'Arg%s' % arg_pos
return self.relation_dict[key]['TokenList']
@property
def doc_id(self):
return self.relation_dict['DocID']
@property
def relation_id(self):
return self.relation_dict['ID']
@property
def relation_type(self):
return self.relation_dict['Type']
@property
def doc_relation_id(self):
return '%s_%s' % (self.doc_id, self.relation_id)
def arg_tokens(self, arg_pos):
"""Returns a list of raw tokens"""
assert(arg_pos == 1 or arg_pos == 2)
if self._arg_tokens[arg_pos] is None:
key = 'Arg%s' % arg_pos
token_list = self.relation_dict[key]['TokenList']
self._arg_tokens[arg_pos] = [self.parse[self.doc_id]['sentences'][x[3]]['words'][x[4]][0] for x in token_list]
return self._arg_tokens[arg_pos]
def arg_trees(self, arg_pos):
key = 'Arg%s' % arg_pos
token_list = self.relation_dict[key]['TokenList']
sentence_indices = set([x[3] for x in token_list])
return [self.parse[self.doc_id]['sentences'][x]['parsetree'] for x in sentence_indices], list(sentence_indices)
def __repr__(self):
return self.relation_dict.__repr__()
def __str__(self):
return self.relation_dict.__str__()
class Word(object):
"""Word class wrapper
[u"'ve",
{u'CharacterOffsetBegin':2449,
u'CharacterOffsetEnd':2452,
u'Linkers':[u'arg2_15006',u'arg1_15008'],
u'PartOfSpeech':u'VBP'}]
"""
def __init__(self, word_address, parse):
self.word_address = word_address
self.word_token, self.word_info = parse['sentences'][word_address[3]]['words'][word_address[4]]
@property
def pos(self):
return self.word_info['PartOfSpeech']
@property
def lemma(self):
return self.word_info['Lemma']
@property
def sentence_index(self):
return self.word_address[3]
def extract_implicit_relations(data_folder, label_function=None):
#parse_file = '%s/pdtb-parses-plus.json' % data_folder
#parse_file = '%s/pdtb-parses.json' % data_folder
parse_file = '%s/parses.json' % data_folder
parse = json.load(codecs.open(parse_file, encoding='utf8'))
#relation_file = '%s/pdtb-data-plus.json' % data_folder
#relation_file = '%s/pdtb-data.json' % data_folder
relation_file = '%s/relations.json' % data_folder
relation_dicts = [json.loads(x) for x in open(relation_file)]
relations = [DRelation(x, parse) for x in relation_dicts if x['Type'] == 'Implicit']
if label_function is not None:
relations = [x for x in relations if label_function.label(x) is not None]
return relations
def extract_non_explicit_relations(data_folder, label_function=None):
parse_file = '%s/pdtb-parses.json' % data_folder
parse = json.load(codecs.open(parse_file, encoding='utf8'))
relation_file = '%s/pdtb-data.json' % data_folder
relation_dicts = [json.loads(x) for x in open(relation_file)]
relations = [DRelation(x, parse) for x in relation_dicts if x['Type'] != 'Explicit']
if label_function is not None:
relations = [x for x in relations if label_function.label(x) is not None]
return relations
| mit | 4,789,138,021,986,704,000 | 35.473404 | 122 | 0.589616 | false |
m0re4u/LeRoT-SCLP | lerot/tests/test_utils.py | 1 | 4440 | # This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
import unittest
import cStringIO
import numpy as np
import lerot.query as query
import lerot.utils as utils
class TestUtils(unittest.TestCase):
def setUp(self):
pass
def testSplitArgStr(self):
split = utils.split_arg_str("--a 10 --b foo --c \"--d bar --e 42\"")
self.assertEqual(split, ["--a", "10", "--b", "foo", "--c",
"--d bar --e 42"], "wrong split (1): %s" % ", ".join(split))
split = utils.split_arg_str("\"--a\" 10 --b foo --c --d bar --e 42")
self.assertEqual(split, ["--a", "10", "--b", "foo", "--c", "--d",
"bar", "--e", "42"], "wrong split (2): %s" % ", ".join(split))
split = utils.split_arg_str("\"--a\"\" 10\"--b foo --c --d bar --e 42")
self.assertEqual(split, ["--a", " 10", "--b", "foo", "--c", "--d",
"bar", "--e", "42"], "wrong split (2): %s" % ", ".join(split))
def testRank(self):
scores = [2.1, 2.9, 2.3, 2.3, 5.5]
self.assertIn(utils.rank(scores, ties="random"),
[[0, 3, 1, 2, 4], [0, 3, 2, 1, 4]])
self.assertIn(utils.rank(scores, reverse=True, ties="random"),
[[4, 1, 3, 2, 0], [4, 1, 2, 3, 0]])
self.assertEqual(utils.rank(scores, reverse=True, ties="first"),
[4, 1, 2, 3, 0])
self.assertEqual(utils.rank(scores, reverse=True, ties="last"),
[4, 1, 3, 2, 0])
scores = [2.1, 2.9, 2.3, 2.3, 5.5, 2.9]
self.assertIn(utils.rank(scores, ties="random"),
[[0, 4, 2, 1, 5, 3],
[0, 3, 2, 1, 5, 4],
[0, 4, 1, 2, 5, 3],
[0, 3, 1, 2, 5, 4]])
self.assertIn(utils.rank(scores, reverse=True, ties="random"),
[[5, 1, 3, 4, 0, 2],
[5, 2, 3, 4, 0, 1],
[5, 1, 4, 3, 0, 2],
[5, 2, 4, 3, 0, 1]])
self.assertEqual(utils.rank(scores, reverse=True, ties="first"),
[5, 1, 3, 4, 0, 2])
self.assertEqual(utils.rank(scores, reverse=True, ties="last"),
[5, 2, 4, 3, 0, 1])
def test_create_ranking_vector(self):
feature_count = 5
# Create queries to test with
test_queries = """
1 qid:373 1:0.080000 2:0.500000 3:0.500000 4:0.500000 5:0.160000
0 qid:373 1:0.070000 2:0.180000 3:0.000000 4:0.250000 5:0.080000
0 qid:373 1:0.150000 2:0.016000 3:0.250000 4:0.250000 5:0.150000
0 qid:373 1:0.100000 2:0.250000 3:0.500000 4:0.750000 5:0.130000
0 qid:373 1:0.050000 2:0.080000 3:0.250000 4:0.250000 5:0.060000
0 qid:373 1:0.050000 2:1.000000 3:0.250000 4:0.250000 5:0.160000
"""
hard_gamma = [1, 0.63092975357, 0.5, 0.43067655807, 0.38685280723,
0.3562071871]
hard_ranking_vector = [0.27938574, 1.11639191, 1.02610328, 1.29150486,
0.42166665]
query_fh = cStringIO.StringIO(test_queries)
this_query = query.Queries(query_fh, feature_count)['373']
query_fh.close()
fake_ranking = sorted(this_query.get_docids())
# gamma, ranking_vector = utils.create_ranking_vector(
ranking_vector = utils.create_ranking_vector(
this_query, fake_ranking)
# self.assertEqual(len(gamma), len(hard_gamma))
self.assertEqual(feature_count, len(ranking_vector))
# for i in xrange(0, len(gamma)):
# self.assertAlmostEqual(gamma[i], hard_gamma[i])
for j in xrange(0, feature_count):
self.assertAlmostEqual(ranking_vector[j], hard_ranking_vector[j])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 290,573,641,075,573,600 | 43.848485 | 79 | 0.537838 | false |
rwl/openpowersystem | cdpsm/iec61970/core/voltage_level.py | 1 | 2591 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" A collection of equipment at one common system voltage forming a switchgear. The equipment typically consist of breakers, busbars, instrumentation, control, regulation and protection devices as well as assemblies of all these.
"""
# <<< imports
# @generated
from cdpsm.iec61970.core.equipment_container import EquipmentContainer
from cdpsm.iec61970.core.base_voltage import BaseVoltage
from cdpsm.iec61970.core.substation import Substation
from cdpsm.iec61970.domain import Voltage
from google.appengine.ext import db
# >>> imports
class VoltageLevel(EquipmentContainer):
""" A collection of equipment at one common system voltage forming a switchgear. The equipment typically consist of breakers, busbars, instrumentation, control, regulation and protection devices as well as assemblies of all these.
"""
# <<< voltage_level.attributes
# @generated
# The bus bar's low voltage limit
low_voltage_limit = Voltage
# The bus bar's high voltage limit
high_voltage_limit = Voltage
# >>> voltage_level.attributes
# <<< voltage_level.references
# @generated
# The base voltage used for all equipment within the VoltageLevel.
base_voltage = db.ReferenceProperty(BaseVoltage,
collection_name="voltage_level")
# Virtual property. The association is used in the naming hierarchy.
pass # bays
# The association is used in the naming hierarchy.
substation = db.ReferenceProperty(Substation,
collection_name="voltage_levels")
# >>> voltage_level.references
# <<< voltage_level.operations
# @generated
# >>> voltage_level.operations
# EOF -------------------------------------------------------------------------
| agpl-3.0 | -1,730,218,190,851,964,200 | 38.861538 | 235 | 0.677345 | false |
camilonova/sentry | src/sentry/utils/runner.py | 1 | 11831 | #!/usr/bin/env python
"""
sentry.utils.runner
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from logan.runner import run_app, configure_app
import base64
import os
import pkg_resources
import warnings
USE_GEVENT = os.environ.get('USE_GEVENT')
KEY_LENGTH = 40
CONFIG_TEMPLATE = """
# This file is just Python, with a touch of Django which means you
# you can inherit and tweak settings to your hearts content.
from sentry.conf.server import *
import os.path
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
# You can swap out the engine for MySQL easily by changing this value
# to ``django.db.backends.mysql`` or to PostgreSQL with
# ``django.db.backends.postgresql_psycopg2``
# If you change this, you'll also need to install the appropriate python
# package: psycopg2 (Postgres) or mysql-python
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONF_ROOT, 'sentry.db'),
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
## Redis ##
###########
# Generic Redis configuration used as defaults for various things including:
# Buffers, Quotas, TSDB
SENTRY_REDIS_OPTIONS = {
'hosts': {
0: {
'host': '127.0.0.1',
'port': 6379,
}
}
}
###########
## Cache ##
###########
# If you wish to use memcached, install the dependencies and adjust the config
# as shown:
#
# pip install python-memcached
#
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': ['127.0.0.1:11211'],
# }
# }
#
# SENTRY_CACHE = 'sentry.cache.django.DjangoCache'
SENTRY_CACHE = 'sentry.cache.redis.RedisCache'
###########
## Queue ##
###########
# See http://sentry.readthedocs.org/en/latest/queue/index.html for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
CELERY_ALWAYS_EAGER = False
BROKER_URL = 'redis://localhost:6379'
#################
## Rate Limits ##
#################
SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter'
####################
## Update Buffers ##
####################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
############
## Quotas ##
############
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota'
##########
## TSDB ##
##########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB'
##################
## File storage ##
##################
# Any Django storage backend is compatible with Sentry. For more solutions see
# the django-storages package: https://django-storages.readthedocs.org/en/latest/
SENTRY_FILESTORE = 'django.core.files.storage.FileSystemStorage'
SENTRY_FILESTORE_OPTIONS = {
'location': '/tmp/sentry-files',
}
################
## Web Server ##
################
# You MUST configure the absolute URI root for Sentry:
SENTRY_URL_PREFIX = 'http://sentry.example.com' # No trailing slash!
# If you're using a reverse proxy, you should enable the X-Forwarded-Proto
# and X-Forwarded-Host headers, and uncomment the following settings
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# USE_X_FORWARDED_HOST = True
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
'workers': 3, # the number of gunicorn workers
'limit_request_line': 0, # required for raven-js
'secure_scheme_headers': {'X-FORWARDED-PROTO': 'https'},
}
#################
## Mail Server ##
#################
# For more information check Django's documentation:
# https://docs.djangoproject.com/en/1.3/topics/email/?from=olddocs#e-mail-backends
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_USE_TLS = False
# The email address to send on behalf of
SERVER_EMAIL = 'root@localhost'
# If you're using mailgun for inbound mail, set your API key and configure a
# route to forward to /api/hooks/mailgun/inbound/
MAILGUN_API_KEY = ''
###########
## etc. ##
###########
# If this file ever becomes compromised, it's important to regenerate your SECRET_KEY
# Changing this value will result in all current sessions being invalidated
SECRET_KEY = %(default_key)r
# http://twitter.com/apps/new
# It's important that input a callback URL, even if its useless. We have no idea why, consult Twitter.
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
# http://developers.facebook.com/setup/
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
# http://code.google.com/apis/accounts/docs/OAuth2.html#Registering
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
# https://github.com/settings/applications/new
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
# https://trello.com/1/appKey/generate
TRELLO_API_KEY = ''
TRELLO_API_SECRET = ''
# https://confluence.atlassian.com/display/BITBUCKET/OAuth+Consumers
BITBUCKET_CONSUMER_KEY = ''
BITBUCKET_CONSUMER_SECRET = ''
"""
def generate_settings():
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
"""
output = CONFIG_TEMPLATE % dict(
default_key=base64.b64encode(os.urandom(KEY_LENGTH)),
)
return output
def install_plugins(settings):
from sentry.plugins import register
# entry_points={
# 'sentry.plugins': [
# 'phabricator = sentry_phabricator.plugins:PhabricatorPlugin'
# ],
# },
installed_apps = list(settings.INSTALLED_APPS)
for ep in pkg_resources.iter_entry_points('sentry.apps'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
sys.stderr.write("Failed to load app %r:\n%s\n" % (ep.name, traceback.format_exc()))
else:
installed_apps.append(ep.module_name)
settings.INSTALLED_APPS = tuple(installed_apps)
for ep in pkg_resources.iter_entry_points('sentry.plugins'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
sys.stderr.write("Failed to load plugin %r:\n%s\n" % (ep.name, traceback.format_exc()))
else:
register(plugin)
def initialize_receivers():
# force signal registration
import sentry.receivers # NOQA
def initialize_gevent():
from gevent import monkey
monkey.patch_all()
try:
import psycopg2 # NOQA
except ImportError:
pass
else:
from sentry.utils.gevent import make_psycopg_green
make_psycopg_green()
def initialize_app(config):
from django.utils import timezone
from sentry.app import env
if USE_GEVENT:
from django.db import connections
connections['default'].allow_thread_sharing = True
env.data['config'] = config.get('config_path')
env.data['start_date'] = timezone.now()
settings = config['settings']
install_plugins(settings)
skip_migration_if_applied(
settings, 'kombu.contrib.django', 'djkombu_queue')
skip_migration_if_applied(
settings, 'social_auth', 'social_auth_association')
apply_legacy_settings(config)
# Commonly setups don't correctly configure themselves for production envs
# so lets try to provide a bit more guidance
if settings.CELERY_ALWAYS_EAGER and not settings.DEBUG:
warnings.warn('Sentry is configured to run asynchronous tasks in-process. '
'This is not recommended within production environments. '
'See http://sentry.readthedocs.org/en/latest/queue/index.html for more information.')
initialize_receivers()
def apply_legacy_settings(config):
settings = config['settings']
# SENTRY_USE_QUEUE used to determine if Celery was eager or not
if hasattr(settings, 'SENTRY_USE_QUEUE'):
warnings.warn('SENTRY_USE_QUEUE is deprecated. Please use CELERY_ALWAYS_EAGER instead. '
'See http://sentry.readthedocs.org/en/latest/queue/index.html for more information.', DeprecationWarning)
settings.CELERY_ALWAYS_EAGER = (not settings.SENTRY_USE_QUEUE)
if settings.SENTRY_URL_PREFIX in ('', 'http://sentry.example.com'):
# Maybe also point to a piece of documentation for more information?
# This directly coincides with users getting the awkward
# `ALLOWED_HOSTS` exception.
print('')
print('\033[91m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\033[0m')
print('\033[91m!! SENTRY_URL_PREFIX is not configured !!\033[0m')
print('\033[91m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\033[0m')
print('')
# Set `ALLOWED_HOSTS` to the catch-all so it works
settings.ALLOWED_HOSTS = ['*']
# Set ALLOWED_HOSTS if it's not already available
if not settings.ALLOWED_HOSTS:
from urlparse import urlparse
urlbits = urlparse(settings.SENTRY_URL_PREFIX)
if urlbits.hostname:
settings.ALLOWED_HOSTS = (urlbits.hostname,)
if not settings.SERVER_EMAIL and hasattr(settings, 'SENTRY_SERVER_EMAIL'):
warnings.warn('SENTRY_SERVER_EMAIL is deprecated. Please use SERVER_EMAIL instead.', DeprecationWarning)
settings.SERVER_EMAIL = settings.SENTRY_SERVER_EMAIL
def skip_migration_if_applied(settings, app_name, table_name,
name='0001_initial'):
from south.migration import Migrations
from sentry.utils.db import table_exists
import types
migration = Migrations(app_name)[name]
def skip_if_table_exists(original):
def wrapped(self):
# TODO: look into why we're having to return some ridiculous
# lambda
if table_exists(table_name):
return lambda x=None: None
return original()
wrapped.__name__ = original.__name__
return wrapped
migration.forwards = types.MethodType(
skip_if_table_exists(migration.forwards), migration)
def configure(config_path=None):
configure_app(
project='sentry',
config_path=config_path,
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=initialize_app,
)
def main():
if USE_GEVENT:
print("Configuring Sentry with gevent bindings")
initialize_gevent()
run_app(
project='sentry',
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=initialize_app,
)
if __name__ == '__main__':
main()
| bsd-3-clause | 2,167,006,475,195,505,400 | 28.284653 | 127 | 0.646944 | false |
jetskijoe/SickGear | tests/name_parser_tests.py | 1 | 28709 | from __future__ import print_function
import datetime
import os.path
import test_lib as test
import sys
import unittest
sys.path.insert(1, os.path.abspath('..'))
sys.path.insert(1, os.path.abspath('../lib'))
from sickbeard.name_parser import parser
import sickbeard
sickbeard.SYS_ENCODING = 'UTF-8'
DEBUG = VERBOSE = False
simple_test_cases = {
'standard': {
'Mr.Show.Name.S01E02.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Mr Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
'Show.Name.S01E02': parser.ParseResult(None, 'Show Name', 1, [2]),
'Show Name - S01E02 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show.1.0.Name.S01.E03.My.Ep.Name-Group':
parser.ParseResult(None, 'Show 1.0 Name', 1, [3], 'My.Ep.Name', 'Group'),
'Show.Name.S01E02E03.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2, 3], 'Source.Quality.Etc', 'Group'),
'Mr. Show Name - S01E02-03 - My Ep Name': parser.ParseResult(None, 'Mr. Show Name', 1, [2, 3], 'My Ep Name'),
'Show.Name.S01.E02.E03': parser.ParseResult(None, 'Show Name', 1, [2, 3]),
'Show.Name-0.2010.S01E02.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name-0 2010', 1, [2], 'Source.Quality.Etc', 'Group'),
'S01E02 Ep Name': parser.ParseResult(None, None, 1, [2], 'Ep Name'),
'Show Name - S06E01 - 2009-12-20 - Ep Name':
parser.ParseResult(None, 'Show Name', 6, [1], '2009-12-20 - Ep Name'),
'Show Name - S06E01 - -30-': parser.ParseResult(None, 'Show Name', 6, [1], '30-'),
'Show-Name-S06E01-720p': parser.ParseResult(None, 'Show-Name', 6, [1], '720p'),
'Show-Name-S06E01-1080i': parser.ParseResult(None, 'Show-Name', 6, [1], '1080i'),
'Show.Name.S06E01.Other.WEB-DL': parser.ParseResult(None, 'Show Name', 6, [1], 'Other.WEB-DL'),
'Show.Name.S06E01 Some-Stuff Here': parser.ParseResult(None, 'Show Name', 6, [1], 'Some-Stuff Here'),
'Show.Name.S01E15-11001001': parser.ParseResult(None, 'Show Name', 1, [15], None),
'Show.Name.S01E02.Source.Quality.Etc-Group - [stuff]':
parser.ParseResult(None, 'Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
},
'fov': {
'Show_Name.1x02.Source_Quality_Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2], 'Source_Quality_Etc', 'Group'),
'Show Name 1x02': parser.ParseResult(None, 'Show Name', 1, [2]),
'Show Name 1x02 x264 Test': parser.ParseResult(None, 'Show Name', 1, [2], 'x264 Test'),
'Show Name - 1x02 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show_Name.1x02x03x04.Source_Quality_Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'Source_Quality_Etc', 'Group'),
'Show Name - 1x02-03-04 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'My Ep Name'),
'1x02 Ep Name': parser.ParseResult(None, None, 1, [2], 'Ep Name'),
'Show-Name-1x02-720p': parser.ParseResult(None, 'Show-Name', 1, [2], '720p'),
'Show-Name-1x02-1080i': parser.ParseResult(None, 'Show-Name', 1, [2], '1080i'),
'Show Name [05x12] Ep Name': parser.ParseResult(None, 'Show Name', 5, [12], 'Ep Name'),
'Show.Name.1x02.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2], 'WEB-DL'),
},
'standard_repeat': {
'Show.Name.S01E02.S01E03.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2, 3], 'Source.Quality.Etc', 'Group'),
'Show.Name.S01E02.S01E03': parser.ParseResult(None, 'Show Name', 1, [2, 3]),
'Show Name - S01E02 - S01E03 - S01E04 - Ep Name':
parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'Ep Name'),
'Show.Name.S01E02.S01E03.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2, 3], 'WEB-DL'),
},
'fov_repeat': {
'Show.Name.1x02.1x03.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2, 3], 'Source.Quality.Etc', 'Group'),
'Show.Name.1x02.1x03': parser.ParseResult(None, 'Show Name', 1, [2, 3]),
'Show Name - 1x02 - 1x03 - 1x04 - Ep Name': parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'Ep Name'),
'Show.Name.1x02.1x03.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2, 3], 'WEB-DL'),
},
'bare': {
'Show.Name.102.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
'show.name.2010.123.source.quality.etc-group':
parser.ParseResult(None, 'show name 2010', 1, [23], 'source.quality.etc', 'group'),
'show.name.2010.222.123.source.quality.etc-group':
parser.ParseResult(None, 'show name 2010.222', 1, [23], 'source.quality.etc', 'group'),
'Show.Name.102': parser.ParseResult(None, 'Show Name', 1, [2]),
'the.event.401.hdtv-lol': parser.ParseResult(None, 'the event', 4, [1], 'hdtv', 'lol'),
# 'show.name.2010.special.hdtv-blah': None,
},
'stupid': {
'tpz-abc102': parser.ParseResult(None, None, 1, [2], None, 'tpz'),
'tpz-abc.102': parser.ParseResult(None, None, 1, [2], None, 'tpz'),
},
'no_season': {
'Show Name - 01 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'01 - Ep Name': parser.ParseResult(None, None, None, [1], 'Ep Name'),
'Show Name - 01 - Ep Name - WEB-DL': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name - WEB-DL'),
'Show.Name.2015.04.19.Ep.Name.Part.2.PROPER.PDTV.x264-GROUP':
parser.ParseResult(None, 'Show Name', release_group='GROUP', extra_info='Ep.Name.Part.2.PROPER.PDTV.x264',
air_date=datetime.date(2015, 4, 19)),
},
'no_season_general': {
'Show.Name.E23.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [23], 'Source.Quality.Etc', 'Group'),
'Show Name - Episode 01 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'Show.Name.Part.3.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 1, [3], 'Source.Quality.Etc', 'Group'),
'Show.Name.Part.1.and.Part.2.Blah-Group': parser.ParseResult(None, 'Show Name', 1, [1, 2], 'Blah', 'Group'),
'Show.Name.Part.IV.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [4], 'Source.Quality.Etc', 'Group'),
'Deconstructed.E07.1080i.HDTV.DD5.1.MPEG2-TrollHD':
parser.ParseResult(None, 'Deconstructed', None, [7], '1080i.HDTV.DD5.1.MPEG2', 'TrollHD'),
'Show.Name.E23.WEB-DL': parser.ParseResult(None, 'Show Name', None, [23], 'WEB-DL'),
},
'no_season_multi_ep': {
'Show.Name.E23-24.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [23, 24], 'Source.Quality.Etc', 'Group'),
'Show Name - Episode 01-02 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1, 2], 'Ep Name'),
'Show.Name.E23-24.WEB-DL': parser.ParseResult(None, 'Show Name', None, [23, 24], 'WEB-DL'),
},
'season_only': {
'Show.Name.S02.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', 2, [], 'Source.Quality.Etc', 'Group'),
'Show Name Season 2': parser.ParseResult(None, 'Show Name', 2),
'Season 02': parser.ParseResult(None, None, 2),
},
'scene_date_format': {
'Show.Name.2010.11.23.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010, 11, 23)),
'Show Name - 2010.11.23': parser.ParseResult(None, 'Show Name', air_date=datetime.date(2010, 11, 23)),
'Show.Name.2010.23.11.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010, 11, 23)),
'Show Name - 2010-11-23 - Ep Name':
parser.ParseResult(None, 'Show Name', extra_info='Ep Name', air_date=datetime.date(2010, 11, 23)),
'2010-11-23 - Ep Name': parser.ParseResult(None, extra_info='Ep Name', air_date=datetime.date(2010, 11, 23)),
'Show.Name.2010.11.23.WEB-DL':
parser.ParseResult(None, 'Show Name', None, [], 'WEB-DL', None, datetime.date(2010, 11, 23)),
},
'uk_date_format': {
'Show.Name.23.11.2010.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010, 11, 23)),
'Show Name - 23.11.2010': parser.ParseResult(None, 'Show Name', air_date=datetime.date(2010, 11, 23)),
'Show.Name.11.23.2010.Source.Quality.Etc-Group':
parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010, 11, 23)),
'Show Name - 23-11-2010 - Ep Name':
parser.ParseResult(None, 'Show Name', extra_info='Ep Name', air_date=datetime.date(2010, 11, 23)),
'23-11-2010 - Ep Name': parser.ParseResult(None, extra_info='Ep Name', air_date=datetime.date(2010, 11, 23)),
'Show.Name.23.11.2010.WEB-DL':
parser.ParseResult(None, 'Show Name', None, [], 'WEB-DL', None, datetime.date(2010, 11, 23)),
},
'anime_ultimate': {
'[Tsuki] Bleach - 301 [1280x720][61D1D4EE]':
parser.ParseResult(None, 'Bleach', None, [], '1280x720', 'Tsuki', None, [301]),
'[Tsuki] Fairy Tail - 70 [1280x720][C4807111]':
parser.ParseResult(None, 'Fairy Tail', None, [], '1280x720', 'Tsuki', None, [70]),
'[SGKK] Bleach 312v2 [720p MKV]':
parser.ParseResult(None, 'Bleach', None, [], '720p MKV', 'SGKK', None, [312]),
'[BSS-Anon] Tengen Toppa Gurren Lagann - 22-23 [1280x720][h264][6039D9AF]':
parser.ParseResult(None, 'Tengen Toppa Gurren Lagann', None, [], '1280x720', 'BSS-Anon', None, [22, 23]),
'[SJSUBS]_Naruto_Shippuden_-_02_[480p AAC]':
parser.ParseResult(None, 'Naruto Shippuden', None, [], '480p AAC', 'SJSUBS', None, [2]),
'[SFW-Chihiro] Dance in the Vampire Bund - 12 [1920x1080 Blu-ray FLAC][2F6DBC66].mkv':
parser.ParseResult(
None, 'Dance in the Vampire Bund', None, [], '1920x1080 Blu-ray FLAC', 'SFW-Chihiro', None, [12]),
'[SHiN-gx] Hanasaku Iroha - 01 [1280x720 h.264 AAC][BDC36683]':
parser.ParseResult(None, 'Hanasaku Iroha', None, [], '1280x720 h.264 AAC', 'SHiN-gx', None, [1]),
'[SFW-Chihiro] Dance in the Vampire Bund - 02 [1920x1080 Blu-ray FLAC][C1FA0A09]':
parser.ParseResult(
None, 'Dance in the Vampire Bund', None, [], '1920x1080 Blu-ray FLAC', 'SFW-Chihiro', None, [2]),
'[HorribleSubs] No. 6 - 11 [720p]':
parser.ParseResult(None, 'No. 6', None, [], '720p', 'HorribleSubs', None, [11]),
'[HorribleSubs] D Gray-Man - 312 (480p) [F501C9BE]':
parser.ParseResult(None, 'D Gray-Man', None, [], '480p', 'HorribleSubs', None, [312]),
'[SGKK] Tengen Toppa Gurren Lagann - 45-46 (720p h264) [F501C9BE]':
parser.ParseResult(None, 'Tengen Toppa Gurren Lagann', None, [], '720p h264', 'SGKK', None, [45, 46]),
'[Stratos-Subs]_Infinite_Stratos_-_12_(1280x720_H.264_AAC)_[379759DB]':
parser.ParseResult(None, 'Infinite Stratos', None, [], '1280x720_H.264_AAC', 'Stratos-Subs', None, [12]),
'[ShinBunBu-Subs] Bleach - 02-03 (CX 1280x720 x264 AAC)':
parser.ParseResult(None, 'Bleach', None, [], 'CX 1280x720 x264 AAC', 'ShinBunBu-Subs', None, [2, 3]),
'[Doki] Hanasaku Iroha - 03 (848x480 h264 AAC) [CB1AA73B]':
parser.ParseResult(None, 'Hanasaku Iroha', None, [], '848x480 h264 AAC', 'Doki', None, [3]),
'[UTW]_Fractal_-_01_[h264-720p][96D3F1BF]':
parser.ParseResult(None, 'Fractal', None, [], 'h264-720p', 'UTW', None, [1]),
'[a-s]_inuyasha_-_028_rs2_[BFDDF9F2]':
parser.ParseResult(None, 'inuyasha', None, [], 'BFDDF9F2', 'a-s', None, [28]),
'[HorribleSubs] Fairy Tail S2 - 37 [1080p]':
parser.ParseResult(None, 'Fairy Tail S2', None, [], '1080p', 'HorribleSubs', None, [37]),
'[HorribleSubs] Sword Art Online II - 23 [720p]':
parser.ParseResult(None, 'Sword Art Online II', None, [], '720p', 'HorribleSubs', None, [23]),
},
'anime_standard': {
'[Cthuko] Shirobako - 05v2 [720p H264 AAC][80C9B09B]':
parser.ParseResult(None, 'Shirobako', None, [], '720p H264 AAC', 'Cthuko', None, [5]),
'[Ayako]_Minami-ke_Okaeri_-_01v2_[1024x576 H264+AAC][B1912CD8]':
parser.ParseResult(None, 'Minami-ke Okaeri', None, [], '1024x576 H264+AAC', 'Ayako', None, [1]),
'Show.Name.123-11001001': parser.ParseResult(None, 'Show Name', None, [], None, None, None, [123]),
},
'anime_ep_name': {
'[TzaTziki]_One_Piece_279_Chopper_Man_1_[720p][8AE5F25D]':
parser.ParseResult(None, 'One Piece', None, [], '720p', 'TzaTziki', None, [279]),
"[ACX]Wolf's_Rain_-_04_-_Scars_in_the_Wasteland_[octavarium]_[82B7E357]":
parser.ParseResult(None, "Wolf's Rain", None, [], 'octavarium', 'ACX', None, [4]),
'[ACX]Black Lagoon - 02v2 - Mangrove Heaven [SaintDeath] [7481F875]':
parser.ParseResult(None, 'Black Lagoon', None, [], 'SaintDeath', 'ACX', None, [2]),
},
'anime_standard_round': {
'[SGKK] Bleach - 312v2 (1280x720 h264 AAC) [F501C9BE]':
parser.ParseResult(None, 'Bleach', None, [], '1280x720 h264 AAC', 'SGKK', None, [312]),
},
'anime_slash': {
'[SGKK] Bleach 312v1 [720p/MKV]': parser.ParseResult(None, 'Bleach', None, [], '720p', 'SGKK', None, [312]),
'[SGKK] Bleach 312 [480p/MKV]': parser.ParseResult(None, 'Bleach', None, [], '480p', 'SGKK', None, [312])
},
'anime_standard_codec': {
'[Ayako]_Infinite_Stratos_-_IS_-_07_[H264][720p][EB7838FC]':
parser.ParseResult(None, 'Infinite Stratos', None, [], '720p', 'Ayako', None, [7]),
'[Ayako] Infinite Stratos - IS - 07v2 [H264][720p][44419534]':
parser.ParseResult(None, 'Infinite Stratos', None, [], '720p', 'Ayako', None, [7]),
'[Ayako-Shikkaku] Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne - 10 [LQ][h264][720p] [8853B21C]':
parser.ParseResult(None, 'Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne', None, [],
'720p', 'Ayako-Shikkaku', None, [10]),
'[Tsuki] Fairy Tail - 72 [XviD][C4807111]':
parser.ParseResult(None, 'Fairy Tail', None, [], 'C4807111', 'Tsuki', None, [72]),
'Bubblegum Crisis Tokyo 2040 - 25 [aX] [F4E2E558]':
parser.ParseResult(None, 'Bubblegum Crisis Tokyo 2040', None, [], 'aX', None, None, [25]),
},
'anime_and_normal': {
'Bleach - s02e03 - 012 - Name & Name': parser.ParseResult(None, 'Bleach', 2, [3], None, None, None, [12]),
'Bleach - s02e03e04 - 012-013 - Name & Name':
parser.ParseResult(None, 'Bleach', 2, [3, 4], None, None, None, [12, 13]),
'Bleach - s16e03-04 - 313-314': parser.ParseResult(None, 'Bleach', 16, [3, 4], None, None, None, [313, 314]),
'Blue Submarine No. 6 s16e03e04 313-314':
parser.ParseResult(None, 'Blue Submarine No. 6', 16, [3, 4], None, None, None, [313, 314]),
'Bleach.s16e03-04.313-314': parser.ParseResult(None, 'Bleach', 16, [3, 4], None, None, None, [313, 314]),
'.hack roots s01e01 001.mkv': parser.ParseResult(None, 'hack roots', 1, [1], None, None, None, [1]),
'.hack sign s01e01 001.mkv': parser.ParseResult(None, 'hack sign', 1, [1], None, None, None, [1])
},
'anime_and_normal_reverse': {
'Bleach - 012 - s02e03 - Name & Name': parser.ParseResult(None, 'Bleach', 2, [3], None, None, None, [12]),
'Blue Submarine No. 6 - 012-013 - s02e03e04 - Name & Name':
parser.ParseResult(None, 'Blue Submarine No. 6', 2, [3, 4], None, None, None, [12, 13]),
'07-GHOST - 012-013 - s02e03e04 - Name & Name':
parser.ParseResult(None, '07-GHOST', 2, [3, 4], None, None, None, [12, 13]),
'3x3 Eyes - 012-013 - s02e03-04 - Name & Name':
parser.ParseResult(None, '3x3 Eyes', 2, [3, 4], None, None, None, [12, 13]),
},
'anime_and_normal_front': {
'165.Naruto Shippuuden.s08e014':
parser.ParseResult(None, 'Naruto Shippuuden', 8, [14], None, None, None, [165]),
'165-166.Naruto Shippuuden.s08e014e015':
parser.ParseResult(None, 'Naruto Shippuuden', 8, [14, 15], None, None, None, [165, 166]),
'165-166.07-GHOST.s08e014-015': parser.ParseResult(None, '07-GHOST', 8, [14, 15], None, None, None, [165, 166]),
'165-166.3x3 Eyes.S08E014E015': parser.ParseResult(None, '3x3 Eyes', 8, [14, 15], None, None, None, [165, 166]),
},
'anime_bare': {
'One Piece 102': parser.ParseResult(None, 'One Piece', None, [], None, None, None, [102]),
'bleach - 010': parser.ParseResult(None, 'bleach', None, [], None, None, None, [10]),
'Naruto Shippuden - 314v2': parser.ParseResult(None, 'Naruto Shippuden', None, [], None, None, None, [314]),
'Blue Submarine No. 6 104-105':
parser.ParseResult(None, 'Blue Submarine No. 6', None, [], None, None, None, [104, 105]),
'Samurai X: Trust & Betrayal (OVA) 001-002':
parser.ParseResult(None, 'Samurai X: Trust & Betrayal (OVA)', None, [], None, None, None, [1, 2]),
"[ACX]_Wolf's_Spirit_001.mkv": parser.ParseResult(None, "Wolf's Spirit", None, [], None, 'ACX', None, [1])
}
}
combination_test_cases = [
('/test/path/to/Season 02/03 - Ep Name.avi',
parser.ParseResult(None, None, 2, [3], 'Ep Name'),
['no_season', 'season_only']),
('Show.Name.S02.Source.Quality.Etc-Group/tpz-sn203.avi',
parser.ParseResult(None, 'Show Name', 2, [3], 'Source.Quality.Etc', 'Group'),
['stupid', 'season_only']),
('MythBusters.S08E16.720p.HDTV.x264-aAF/aaf-mb.s08e16.720p.mkv',
parser.ParseResult(None, 'MythBusters', 8, [16], '720p.HDTV.x264', 'aAF'),
['standard']),
('/home/drop/storage/TV/Terminator The Sarah Connor Chronicles' +
'/Season 2/S02E06 The Tower is Tall, But the Fall is Short.mkv',
parser.ParseResult(None, None, 2, [6], 'The Tower is Tall, But the Fall is Short'),
['standard']),
(r'/Test/TV/Jimmy Fallon/Season 2/Jimmy Fallon - 2010-12-15 - blah.avi',
parser.ParseResult(None, 'Jimmy Fallon', extra_info='blah', air_date=datetime.date(2010, 12, 15)),
['scene_date_format']),
(r'/X/30 Rock/Season 4/30 Rock - 4x22 -.avi',
parser.ParseResult(None, '30 Rock', 4, [22]),
['fov']),
('Season 2\\Show Name - 03-04 - Ep Name.ext',
parser.ParseResult(None, 'Show Name', 2, [3, 4], extra_info='Ep Name'),
['no_season', 'season_only']),
('Season 02\\03-04-05 - Ep Name.ext',
parser.ParseResult(None, None, 2, [3, 4, 5], extra_info='Ep Name'),
['no_season', 'season_only']),
]
unicode_test_cases = [
(u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
parser.ParseResult(
u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
u'The Big Bang Theory', 2, [7], u'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', u'SHELDON',
version=-1)
),
('The.Big.Bang.Theory.2x07.The.Panty.Pi\xc3\xb1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
parser.ParseResult(
u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
u'The Big Bang Theory', 2, [7], u'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', u'SHELDON',
version=-1)
),
]
failure_cases = ['7sins-jfcs01e09-720p-bluray-x264']
class UnicodeTests(test.SickbeardTestDBCase):
def _test_unicode(self, name, result):
result.which_regex = ['fov']
parse_result = parser.NameParser(True, testing=True).parse(name)
self.assertEqual(parse_result, result)
# this shouldn't raise an exception
void = repr(str(parse_result))
void += ''
def test_unicode(self):
for (name, result) in unicode_test_cases:
self._test_unicode(name, result)
class FailureCaseTests(test.SickbeardTestDBCase):
@staticmethod
def _test_name(name):
np = parser.NameParser(True)
try:
parse_result = np.parse(name)
except (parser.InvalidNameException, parser.InvalidShowException):
return True
if VERBOSE:
print('Actual: ', parse_result.which_regex, parse_result)
return False
def test_failures(self):
for name in failure_cases:
self.assertTrue(self._test_name(name))
class ComboTests(test.SickbeardTestDBCase):
def _test_combo(self, name, result, which_regexes):
if VERBOSE:
print()
print('Testing', name)
np = parser.NameParser(True)
try:
test_result = np.parse(name)
except parser.InvalidShowException:
return False
if DEBUG:
print(test_result, test_result.which_regex)
print(result, which_regexes)
self.assertEqual(test_result, result)
for cur_regex in which_regexes:
self.assertTrue(cur_regex in test_result.which_regex)
self.assertEqual(len(which_regexes), len(test_result.which_regex))
def test_combos(self):
for (name, result, which_regexes) in combination_test_cases:
# Normalise the paths. Converts UNIX-style paths into Windows-style
# paths when test is run on Windows.
self._test_combo(os.path.normpath(name), result, which_regexes)
class BasicTests(test.SickbeardTestDBCase):
def _test_names(self, np, section, transform=None, verbose=False):
if VERBOSE or verbose:
print('Running', section, 'tests')
for cur_test_base in simple_test_cases[section]:
if transform:
cur_test = transform(cur_test_base)
else:
cur_test = cur_test_base
if VERBOSE or verbose:
print('Testing', cur_test)
result = simple_test_cases[section][cur_test_base]
if not result:
self.assertRaises(parser.InvalidNameException, np.parse, cur_test)
return
else:
test_result = np.parse(cur_test)
try:
# self.assertEqual(test_result.which_regex, [section])
self.assertEqual(test_result, result)
except:
print('air_by_date:', test_result.is_air_by_date, 'air_date:', test_result.air_date)
print('anime:', test_result.is_anime, 'ab_episode_numbers:', test_result.ab_episode_numbers)
print(test_result)
print(result)
raise
def test_standard_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'standard')
def test_standard_repeat_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'standard_repeat')
def test_fov_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'fov')
def test_fov_repeat_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'fov_repeat')
def test_bare_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'bare')
def test_stupid_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'stupid')
def test_no_season_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'no_season')
def test_no_season_general_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'no_season_general')
def test_no_season_multi_ep_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'no_season_multi_ep')
def test_season_only_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'season_only')
def test_scene_date_format_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'scene_date_format')
def test_uk_date_format_names(self):
np = parser.NameParser(False, testing=True)
self._test_names(np, 'uk_date_format')
def test_standard_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'standard', lambda x: x + '.avi')
def test_standard_repeat_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'standard_repeat', lambda x: x + '.avi')
def test_fov_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'fov', lambda x: x + '.avi')
def test_fov_repeat_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'fov_repeat', lambda x: x + '.avi')
def test_bare_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'bare', lambda x: x + '.avi')
def test_stupid_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'stupid', lambda x: x + '.avi')
def test_no_season_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'no_season', lambda x: x + '.avi')
def test_no_season_general_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'no_season_general', lambda x: x + '.avi')
def test_no_season_multi_ep_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'no_season_multi_ep', lambda x: x + '.avi')
def test_season_only_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'season_only', lambda x: x + '.avi')
def test_scene_date_format_file_names(self):
np = parser.NameParser(testing=True)
self._test_names(np, 'scene_date_format', lambda x: x + '.avi')
def test_combination_names(self):
pass
def test_anime_ultimate(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_ultimate')
def test_anime_standard(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_standard')
def test_anime_ep_name(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_ep_name')
def test_anime_slash(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_slash')
def test_anime_codec(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_standard_codec')
def test_anime_and_normal(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_and_normal')
def test_anime_and_normal_reverse(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_and_normal_reverse')
def test_anime_and_normal_front(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_and_normal_front')
def test_anime_bare(self):
np = parser.NameParser(False, TVShow(is_anime=True), testing=True)
self._test_names(np, 'anime_bare')
class TVShow(object):
def __init__(self, is_anime=False):
self.is_anime = is_anime
if __name__ == '__main__':
if len(sys.argv) > 1:
suite = unittest.TestLoader().loadTestsFromName('name_parser_tests.BasicTests.test_' + sys.argv[1])
else:
suite = unittest.TestLoader().loadTestsFromTestCase(BasicTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(ComboTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(UnicodeTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(FailureCaseTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-3.0 | -5,314,493,947,444,374,000 | 47.991468 | 120 | 0.600718 | false |
ganga-devs/ganga | ganga/GangaDirac/Lib/Server/DiracCommands.py | 1 | 18300 | # Dirac commands
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
@diracCommand
def getJobGroupJobs(jg):
''' Return jobs in a group'''
return dirac.selectJobs(jobGroup=jg)
@diracCommand
def kill(id):
''' Kill a given DIRAC Job ID within DIRAC '''
return dirac.deleteJob(id)
@diracCommand
def peek(id):
''' Peek at the DIRAC Job id and return what we saw '''
return dirac.peekJob(id)
@diracCommand
def getJobCPUTime(id):
''' Get the amount of CPU time taken by the DIRAC Job id'''
return dirac.getJobCPUTime(id)
@diracCommand
def reschedule(id):
''' Reschedule within DIRAC a given DIRAC Job id'''
return dirac.reschedule(id)
@diracCommand
def submit(djob, mode='wms'):
''' Submit a DIRAC job given by the jdl:djob with a given mode '''
return dirac.submitJob(djob, mode=mode)
@diracCommand
def ping(system, service):
''' Ping a given service on a given system running DIRAC '''
return dirac.ping(system, service)
@diracCommand
def removeFile(lfn):
''' Remove a given LFN from the DFC'''
ret = {}
if type(lfn) is list:
for l in lfn:
ret.update(dirac.removeFile(l))
else:
ret.update(dirac.removeFile(lfn))
return ret
@diracCommand
def getMetadata(lfn):
''' Return the metadata associated with a given :DN'''
return dirac.getLfnMetadata(lfn)
@diracCommand
def getReplicas(lfns):
''' Return the locations of the replicas of a given LFN in a dict format, SE: location '''
return dirac.getReplicas(lfns, active=True, preferDisk = True)
@diracCommand
def getReplicasForJobs(lfns):
''' Return the locations of the replicas of a given LFN in a dict format, SE: location.
This is for use in the splitter to negate copies at SEs that are not to be used for user jobs '''
return dirac.getReplicasForJobs(lfns)
@diracCommand
def getAccessURL(lfn, SE, protocol=False):
''' Return the access URL for the given LFN, storage element and protocol. The protocol should be in the form of a list '''
return dirac.getAccessURL(lfn, SE, False, protocol)
@diracCommand
def getFile(lfns, destDir=''):
''' Put the physical file behind the LFN in the destDir path'''
return dirac.getFile(lfns, destDir=destDir)
@diracCommand
def replicateFile(lfn, destSE, srcSE='', locCache=''):
''' Replicate a given LFN from a srcSE to a destSE'''
res = dirac.replicateFile(lfn, destSE, srcSE, locCache)
return res
@diracCommand
def removeReplica(lfn, sE):
''' Remove the physical files and LFN from the DFC'''
return dirac.removeReplica(lfn, sE)
@diracCommand
def getOutputData(id, outputFiles='', destinationDir=''):
''' Return output data of a requeted DIRAC Job id, place outputFiles in a given destinationDir') '''
return dirac.getJobOutputData(id, outputFiles, destinationDir)
@diracCommand
def splitInputData(files, files_per_job):
''' Split list of files ito a list of list of smaller files (below files_per_job in length) and return the list of lists'''
return dirac.splitInputData(files, files_per_job)
@diracCommand
def getInputDataCatalog(lfns, site, xml_file):
''' Get the XML describing the given LFNs at a given site'''
return dirac.getInputDataCatalog(lfns, site, xml_file)
@diracCommand
def uploadFile(lfn, file, diracSEs, guid=None):
''' Upload a given file to an lfn with 1 replica places at each element in diracSEs. Use a given guid if given'''
outerr = {}
for se in diracSEs:
result = dirac.addFile(lfn, file, se, guid)
if result.get('OK', False) and lfn in result.get('Value', {'Successful': {}})['Successful']:
result['Value']['Successful'][lfn].update({'DiracSE': se})
md = dirac.getLfnMetadata(lfn)
if md.get('OK', False) and lfn in md.get('Value', {'Successful': {}})['Successful']:
guid = md['Value']['Successful'][lfn]['GUID']
result['Value']['Successful'][lfn].update({'GUID': guid})
return result
outerr.update({se: result})
return outerr
@diracCommand
def addFile(lfn, file, diracSE, guid):
''' Upload a given file to an lfn with 1 replica places at each element in diracSEs. Use a given guid if given'''
return dirac.addFile(lfn, file, diracSE, guid)
@diracCommand
def getOutputSandbox(id, outputDir=os.getcwd(), unpack=True, oversized=True, noJobDir=True, pipe_out=True):
'''
Get the outputsandbox and return the output from Dirac to the calling function
id: the DIRAC jobid of interest
outputDir: output directory locall on disk to use
oversized: is this output sandbox oversized this will be modified
noJobDir: should we create a folder with the DIRAC job ID?
output: should I output the Dirac output or should I return a python object (False)
unpack: should the sandbox be untarred when downloaded'''
result = dirac.getOutputSandbox(id, outputDir, oversized, noJobDir, unpack)
if result is not None and result.get('OK', False):
if not noJobDir:
tmpdir = os.path.join(outputDir, str(id))
os.system('mv -f %s/* %s/. ; rm -rf %s' % (tmpdir, outputDir, tmpdir))
os.system('for file in $(ls %s/*Ganga_*.log); do ln -s ${file} %s/stdout; break; done' % (outputDir, outputDir))
#So the download failed. Maybe the sandbox was oversized and stored on the grid. Check in the job parameters and download it
else:
parameters = dirac.getJobParameters(id)
if parameters is not None and parameters.get('OK', False):
parameters = parameters['Value']
if 'OutputSandboxLFN' in parameters:
result = dirac.getFile(parameters['OutputSandboxLFN'], destDir=outputDir)
dirac.removeFile(parameters['OutputSandboxLFN'])
return result
@diracCommand
def getOutputDataInfo(id, pipe_out=True):
''' Get information on the output data generated by a job of ID and pipe it out or return it'''
ret = {}
result = getOutputDataLFNs(id, pipe_out=False)
if result.get('OK', False) and 'Value' in result:
for lfn in result.get('Value', []):
file_name = os.path.basename(lfn)
ret[file_name] = {}
ret[file_name]['LFN'] = lfn
md = dirac.getLfnMetadata(lfn)
if md.get('OK', False) and lfn in md.get('Value', {'Successful': {}})['Successful']:
ret[file_name]['GUID'] = md['Value']['Successful'][lfn]['GUID']
# this catches if fail upload, note lfn still exists in list as
# dirac tried it
elif md.get('OK', False) and lfn in md.get('Value', {'Failed': {}})['Failed']:
ret[file_name]['LFN'] = '###FAILED###'
ret[file_name]['LOCATIONS'] = md['Value']['Failed'][lfn]
ret[file_name]['GUID'] = 'NotAvailable'
continue
rp = dirac.getReplicas(lfn)
if rp.get('OK', False) and lfn in rp.get('Value', {'Successful': {}})['Successful']:
ret[file_name]['LOCATIONS'] = rp['Value']['Successful'][lfn].keys()
return ret
# could shrink this with dirac.getJobOutputLFNs from ##dirac
@diracCommand
def getOutputDataLFNs(id, pipe_out=True):
''' Get the outputDataLFN which have been generated by a Dirac job of ID and pipe it out or return it'''
parameters = dirac.getJobParameters(id)
lfns = []
ok = False
message = 'The outputdata LFNs could not be found.'
if parameters is not None and parameters.get('OK', False):
parameters = parameters['Value']
# remove the sandbox if it has been uploaded
sandbox = None
if 'OutputSandboxLFN' in parameters:
sandbox = parameters['OutputSandboxLFN']
# now find out about the outputdata
if 'UploadedOutputData' in parameters:
lfn_list = parameters['UploadedOutputData']
import re
lfns = re.split(',\s*', lfn_list)
if sandbox is not None and sandbox in lfns:
lfns.remove(sandbox)
ok = True
elif parameters is not None and 'Message' in parameters:
message = parameters['Message']
result = {'OK': ok}
if ok:
result['Value'] = lfns
else:
result['Message'] = message
return result
@diracCommand
def normCPUTime(id, pipe_out=True):
''' Get the normalied CPU time that has been used by a DIRAC job of ID and pipe it out or return it'''
parameters = dirac.getJobParameters(id)
ncput = None
if parameters is not None and parameters.get('OK', False):
parameters = parameters['Value']
if 'NormCPUTime(s)' in parameters:
ncput = parameters['NormCPUTime(s)']
return ncput
@diracCommand
def finished_job(id, outputDir=os.getcwd(), unpack=True, oversized=True, noJobDir=True, downloadSandbox = True):
''' Nesting function to reduce number of calls made against DIRAC when finalising a job, takes arguments such as getOutputSandbox
Returns the CPU time of the job as a dict, the output sandbox information in another dict and a dict of the LFN of any uploaded data'''
out_cpuTime = normCPUTime(id, pipe_out=False)
if downloadSandbox:
out_sandbox = getOutputSandbox(id, outputDir, unpack, oversized, noJobDir, pipe_out=False)
else:
out_sandbox = None
out_dataInfo = getOutputDataInfo(id, pipe_out=False)
outStateTime = {'completed' : getStateTime(id, 'completed', pipe_out=False)}
return (out_cpuTime, out_sandbox, out_dataInfo, outStateTime)
@diracCommand
def finaliseJobs(inputDict, statusmapping, downloadSandbox=True, oversized=True, noJobDir=True):
''' A function to get the necessaries to finalise a whole bunch of jobs. Returns a dict of job information and a dict of stati.'''
returnDict = {}
statusList = dirac.getJobStatus(list(inputDict))
for diracID in inputDict:
returnDict[diracID] = {}
returnDict[diracID]['cpuTime'] = normCPUTime(diracID, pipe_out=False)
if downloadSandbox:
returnDict[diracID]['outSandbox'] = getOutputSandbox(diracID, inputDict[diracID], oversized, noJobDir, pipe_out=False)
else:
returnDict[diracID]['outSandbox'] = None
returnDict[diracID]['outDataInfo'] = getOutputDataInfo(diracID, pipe_out=False)
returnDict[diracID]['outStateTime'] = {'completed' : getStateTime(diracID, 'completed', pipe_out=False)}
return returnDict, statusList
@diracCommand
def status(job_ids, statusmapping, pipe_out=True):
'''Function to check the statuses and return the Ganga status of a job after looking it's DIRAC status against a Ganga one'''
# Translate between the many statuses in DIRAC and the few in Ganga
#return {'OK':True, 'Value':[['WIP', 'WIP', 'WIP', 'WIP', 'WIP']]}
result = dirac.getJobStatus(job_ids)
if not result['OK']:
return result
status_list = []
bulk_status = result['Value']
for _id in job_ids:
job_status = bulk_status.get(_id, {})
minor_status = job_status.get('MinorStatus', None)
dirac_status = job_status.get('Status', None)
dirac_site = job_status.get('Site', None)
ganga_status = statusmapping.get(dirac_status, None)
if ganga_status is None:
ganga_status = 'failed'
dirac_status = 'Unknown: No status for Job'
#if dirac_status == 'Completed' and (minor_status not in ['Pending Requests']):
# ganga_status = 'running'
if minor_status in ['Uploading Output Data']:
ganga_status = 'running'
try:
from DIRAC.Core.DISET.RPCClient import RPCClient
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
app_status = monitoring.getJobAttributes(_id)['Value']['ApplicationStatus']
except:
app_status = "unknown ApplicationStatus"
status_list.append([minor_status, dirac_status, dirac_site, ganga_status, app_status])
return status_list
@diracCommand
def getStateTime(id, status, pipe_out=True):
''' Return the state time from DIRAC corresponding to DIRACJob tranasitions'''
log = dirac.getJobLoggingInfo(id)
if 'Value' not in log:
return None
L = log['Value']
checkstr = ''
if status == 'running':
checkstr = 'Running'
elif status == 'completed':
checkstr = 'Done'
elif status == 'completing':
checkstr = 'Completed'
elif status == 'failed':
checkstr = 'Failed'
else:
checkstr = ''
if checkstr == '':
print("%s" % None)
return
for l in L:
if checkstr in l[0]:
T = datetime.datetime(*(time.strptime(l[3], "%Y-%m-%d %H:%M:%S")[0:6]))
return T
return None
@diracCommand
def getBulkStateTime(job_ids, status, pipe_out=True):
''' Function to repeatedly call getStateTime for multiple Dirac Job id and return the result in a dictionary '''
result = {}
for this_id in job_ids:
result[this_id] = getStateTime(this_id, status, pipe_out=False)
return result
@diracCommand
def monitorJobs(job_ids, status_mapping, pipe_out=True):
''' This combines 'status' and 'getBulkStateTime' into 1 function call for monitoring
'''
status_info = status(job_ids, status_mapping, pipe_out=False)
state_job_status = {}
for job_id, this_stat_info in zip(job_ids, status_info):
if this_stat_info:
update_status = this_stat_info[3]
if update_status not in state_job_status:
state_job_status[update_status] = []
state_job_status[update_status].append(job_id)
state_info = {}
for this_status, these_jobs in state_job_status.items():
state_info[this_status] = getBulkStateTime(these_jobs, this_status, pipe_out=False)
return (status_info, state_info)
@diracCommand
def timedetails(id):
''' Function to return the getJobLoggingInfo for a DIRAC Job of id'''
log = dirac.getJobLoggingInfo(id)
d = {}
for i in range(0, len(log['Value'])):
d[i] = log['Value'][i]
return d
# DiracAdmin commands
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
@diracCommand
def getJobPilotOutput(id, dir):
''' Get the output of the DIRAC pilot that this job was running on and place it in dir'''
pwd = os.getcwd()
try:
os.chdir(dir)
os.system('rm -f pilot_%d/std.out && rmdir pilot_%d ' % (id, id))
result = DiracAdmin().getJobPilotOutput(id)
finally:
os.chdir(pwd)
return result
@diracCommand
def getServicePorts():
''' Get the service ports from the DiracAdmin based upon the Dirac config'''
return DiracAdmin().getServicePorts()
@diracCommand
def isSEArchive(se):
''' Ask if the specified SE is for archive '''
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
return DMSHelpers().isSEArchive(se)
@diracCommand
def getSitesForSE(se):
''' Get the Sites associated with this SE'''
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE
result = getSitesForSE(storageElement=se)
return result
@diracCommand
def getSEsForSite(site):
''' Get the list of SE associated with this site'''
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
result = getSEsForSite(site)
return result
@diracCommand
def getSESiteMapping():
'''Get the mapping of SEs and sites'''
from DIRAC.Core.Utilities.SiteSEMapping import getSESiteMapping
result = getSESiteMapping()
return result
@diracCommand
def checkSEStatus(se, access = 'Write'):
''' returns the value of a certain SE status flag (access or other)
param se: Storage Element name
type se: string
param access: type of access
type access: string in ('Read', 'Write', 'Remove', 'Check')
returns: True or False
'''
result = dirac.checkSEAccess(se, access)
return result
@diracCommand
def listFiles(baseDir, minAge = None):
''' Return a list of LFNs for files stored on the grid in the argument
directory and its subdirectories
param baseDir: Top directory to begin search
type baseDir: string
param minAge: minimum age of files to be returned
type minAge: string format: "W:D:H"
'''
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
fc = FileCatalog()
from datetime import datetime, timedelta
withMetaData = False
cutoffTime = datetime.utcnow()
import re
r = re.compile('\d:\d:\d')
if r.match(minAge):
withMetaData = True
timeList = minAge.split(':')
timeLimit = timedelta(weeks = int(timeList[0]), days = int(timeList[1]), hours = int(timeList[2]))
cutoffTime = datetime.utcnow() - timeLimit
baseDir = baseDir.rstrip('/')
activeDirs = [baseDir]
allFiles = []
emptyDirs = []
while len(activeDirs) > 0:
currentDir = activeDirs.pop()
res = fc.listDirectory(currentDir, withMetaData, timeout = 360)
if not res['OK']:
return "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] )
elif currentDir in res['Value']['Failed']:
return "Error retrieving directory contents", "%s %s" % ( currentDir, res['Value']['Failed'][currentDir] )
else:
dirContents = res['Value']['Successful'][currentDir]
subdirs = dirContents['SubDirs']
files = dirContents['Files']
if not subdirs and not files:
emptyDirs.append( currentDir )
else:
for subdir in sorted( subdirs, reverse=True):
if (not withMetaData) or subdirs[subdir]['CreationDate'] < cutoffTime:
activeDirs.append(subdir)
for filename in sorted(files):
fileOK = False
if (not withMetaData) or files[filename]['MetaData']['CreationDate'] < cutoffTime:
fileOK = True
if not fileOK:
files.pop(filename)
allFiles += sorted(files)
return allFiles
| gpl-2.0 | -7,655,071,968,776,760,000 | 35.094675 | 139 | 0.640109 | false |
rafaelvieiras/script.pseudotv.live | resources/lib/ChannelListThread.py | 1 | 9795 | # Copyright (C) 2011 Jason Anderson
#
#
# This file is part of PseudoTV.
#
# PseudoTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.
import xbmc, xbmcgui, xbmcaddon
import subprocess, os
import time, threading
import datetime
import sys, re
import random, traceback
from ChannelList import ChannelList
from Channel import Channel
from Globals import *
from Artdownloader import *
class ChannelListThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.myOverlay = None
sys.setcheckinterval(25)
self.chanlist = ChannelList()
self.paused = False
self.fullUpdating = True
self.Artdownloader = Artdownloader()
def log(self, msg, level = xbmc.LOGDEBUG):
log('ChannelListThread: ' + msg, level)
def run(self):
self.log("Starting")
self.chanlist.exitThread = False
self.chanlist.readConfig()
self.chanlist.sleepTime = 0.1
if self.myOverlay == None:
self.log("Overlay not defined. Exiting.")
return
self.chanlist.myOverlay = self.myOverlay
self.fullUpdating = (self.myOverlay.backgroundUpdating == 0)
validchannels = 0
for i in range(self.myOverlay.maxChannels):
self.chanlist.channels.append(Channel())
if self.myOverlay.channels[i].isValid:
validchannels += 1
# Don't load invalid channels if minimum threading mode is on
if self.fullUpdating and self.myOverlay.isMaster:
if validchannels < self.chanlist.enteredChannelCount:
title = 'PseudoTV Live, Background Loading...'
xbmc.executebuiltin('XBMC.Notification(%s, %s, %s)' % (title, 4000 , THUMB))
for i in range(self.myOverlay.maxChannels):
if self.myOverlay.channels[i].isValid == False:
while True:
if self.myOverlay.isExiting:
self.log("Closing thread")
return
time.sleep(1)
if self.paused == False:
break
self.chanlist.channels[i].setAccessTime(self.myOverlay.channels[i].lastAccessTime)
try:
if self.chanlist.setupChannel(i + 1, True, True, False) == True:
while self.paused:
if self.myOverlay.isExiting:
self.log("IsExiting")
return
time.sleep(1)
self.myOverlay.channels[i] = self.chanlist.channels[i]
if self.myOverlay.channels[i].isValid == True:
title = "PseudoTV Live, Channel " + str(i + 1) + " Added"
xbmc.executebuiltin('XBMC.Notification(%s, %s, %s)' % (title, 4000, THUMB))
except Exception,e:
self.log("Unknown Channel Creation Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
REAL_SETTINGS.setSetting('ForceChannelReset', 'false')
self.chanlist.sleepTime = 0.3
if REAL_SETTINGS.getSetting("ArtService_Enabled") == "true":
InfoTimer = INFOBAR_TIMER[int(REAL_SETTINGS.getSetting('InfoTimer'))]
self.ArtServiceThread = threading.Timer(float(InfoTimer), self.Artdownloader.ArtService)
self.ArtServiceThread.name = "ArtServiceThread"
self.ArtServiceThread.start()
while True:
for i in range(self.myOverlay.maxChannels):
modified = True
while modified == True and self.myOverlay.channels[i].getTotalDuration() < PREP_CHANNEL_TIME and self.myOverlay.channels[i].Playlist.size() < 16288:
# If minimum updating is on, don't attempt to load invalid channels
if self.fullUpdating == False and self.myOverlay.channels[i].isValid == False and self.myOverlay.isMaster:
break
modified = False
if self.myOverlay.isExiting:
self.log("Closing thread")
return
time.sleep(2)
curtotal = self.myOverlay.channels[i].getTotalDuration()
if self.myOverlay.isMaster:
if curtotal > 0:
# When appending, many of the channel variables aren't set, so copy them over.
# This needs to be done before setup since a rule may use one of the values.
# It also needs to be done after since one of them may have changed while being setup.
self.chanlist.channels[i].playlistPosition = self.myOverlay.channels[i].playlistPosition
self.chanlist.channels[i].showTimeOffset = self.myOverlay.channels[i].showTimeOffset
self.chanlist.channels[i].lastAccessTime = self.myOverlay.channels[i].lastAccessTime
self.chanlist.channels[i].totalTimePlayed = self.myOverlay.channels[i].totalTimePlayed
self.chanlist.channels[i].isPaused = self.myOverlay.channels[i].isPaused
self.chanlist.channels[i].mode = self.myOverlay.channels[i].mode
# Only allow appending valid channels, don't allow erasing them
try:
self.chanlist.setupChannel(i + 1, True, False, True)
except Exception,e:
self.log("Unknown Channel Appending Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
self.chanlist.channels[i].playlistPosition = self.myOverlay.channels[i].playlistPosition
self.chanlist.channels[i].showTimeOffset = self.myOverlay.channels[i].showTimeOffset
self.chanlist.channels[i].lastAccessTime = self.myOverlay.channels[i].lastAccessTime
self.chanlist.channels[i].totalTimePlayed = self.myOverlay.channels[i].totalTimePlayed
self.chanlist.channels[i].isPaused = self.myOverlay.channels[i].isPaused
self.chanlist.channels[i].mode = self.myOverlay.channels[i].mode
else:
try:
self.chanlist.setupChannel(i + 1, True, True, False)
except Exception,e:
self.log("Unknown Channel Modification Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
else:
try:
# We're not master, so no modifications...just try and load the channel
self.chanlist.setupChannel(i + 1, True, False, False)
except Exception,e:
self.log("Unknown Channel Loading Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
self.myOverlay.channels[i] = self.chanlist.channels[i]
if self.myOverlay.isMaster:
ADDON_SETTINGS.setSetting('Channel_' + str(i + 1) + '_time', str(self.myOverlay.channels[i].totalTimePlayed))
if self.myOverlay.channels[i].getTotalDuration() > curtotal and self.myOverlay.isMaster:
modified = True
# A do-while loop for the paused state
while True:
if self.myOverlay.isExiting:
self.log("Closing thread")
return
time.sleep(2)
if self.paused == False:
break
timeslept = 0
if self.fullUpdating == False and self.myOverlay.isMaster:
return
# If we're master, wait 30 minutes in between checks. If not, wait 5 minutes.
while (timeslept < 1800 and self.myOverlay.isMaster == True) or (timeslept < 300 and self.myOverlay.isMaster == False):
if self.myOverlay.isExiting:
self.log("IsExiting")
return
time.sleep(2)
timeslept += 2
self.log("All channels up to date. Exiting thread.")
def pause(self):
self.paused = True
self.chanlist.threadPaused = True
def unpause(self):
self.paused = False
self.chanlist.threadPaused = False
| gpl-3.0 | -5,893,234,249,640,738,000 | 44.347222 | 164 | 0.54099 | false |
AnoopAlias/nDeploy | scripts/update_cluster_ipmap.py | 1 | 1898 | #!/usr/bin/env python
import yaml
import argparse
import os
__author__ = "Anoop P Alias"
__copyright__ = "Copyright 2014, PiServe Technologies Pvt Ltd , India"
__license__ = "GPL"
__email__ = "[email protected]"
installation_path = "/opt/nDeploy" # Absolute Installation Path
cluster_config_file = installation_path+"/conf/ndeploy_cluster.yaml"
# Function defs
def update_ip_map(server, iphere, ipthere):
cluster_data_yaml = open(cluster_config_file, 'r')
cluster_data_yaml_parsed = yaml.safe_load(cluster_data_yaml)
cluster_data_yaml.close()
if cluster_data_yaml_parsed:
if server in cluster_data_yaml_parsed.keys():
connect_server_dict = cluster_data_yaml_parsed.get(server)
ipmap_dict = connect_server_dict.get("ipmap")
ipmap_dict[iphere] = ipthere
with open(cluster_config_file, 'w') as yaml_file:
yaml_file.write(yaml.dump(cluster_data_yaml_parsed, default_flow_style=False))
else:
mydict = {server: {'ipmap': {iphere: ipthere}}}
cluster_data_yaml_parsed.update(mydict)
with open(cluster_config_file, 'w') as yaml_file:
yaml_file.write(yaml.dump(cluster_data_yaml_parsed, default_flow_style=False))
else:
print("Invalid cluster data")
parser = argparse.ArgumentParser(description="create/update nDeploy-cluster ipmap")
parser.add_argument("slave_hostname")
parser.add_argument("ip_here")
parser.add_argument("remote_ip")
args = parser.parse_args()
server_key = args.slave_hostname
ip_here = args.ip_here
remote_ip = args.remote_ip
if os.path.isfile(cluster_config_file):
update_ip_map(server_key, ip_here, remote_ip)
else:
mydict = {server_key: {'ipmap': {ip_here: remote_ip}}}
with open(cluster_config_file, 'w') as cluster_conf:
cluster_conf.write(yaml.dump(mydict, default_flow_style=False))
| gpl-3.0 | 6,655,193,397,231,080,000 | 34.148148 | 94 | 0.674921 | false |
letouriste001/SmartForest_2.0 | python3.4Smartforest/lib/python3.4/site-packages/django/db/migrations/recorder.py | 1 | 2868 | from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.db.utils import DatabaseError
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder(object):
"""
Deals with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
@python_2_unicode_compatible
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""
Returns a set of (app, name) of applied migrations.
"""
self.ensure_schema()
return set(tuple(x) for x in self.migration_qs.values_list("app", "name"))
def record_applied(self, app, name):
"""
Records that a migration was applied.
"""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""
Records that a migration was unapplied.
"""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""
Deletes all migration records. Useful if you're testing migrations.
"""
self.migration_qs.all().delete()
| mit | 213,370,591,806,448,500 | 32.348837 | 112 | 0.642957 | false |
staffanm/layeredconfig | layeredconfig/dictsource.py | 1 | 1625 | # this should possibly be a abstract class as well
from . import ConfigSource
class DictSource(ConfigSource):
def __init__(self, **kwargs):
"""If your backend data is exposable as a python dict, you can
subclass from this class to avoid implementing :py:meth:`has`,
:py:meth:`get`, :py:meth:`keys`, :py:meth:`subsection` and
:py:meth:`subsections`. You only need to write
:py:meth:`__init__` (which should set ``self.source`` to that
exposed dict), and possibly :py:meth:`typed` and
:py:meth:`save`.
"""
super(DictSource, self).__init__(**kwargs)
self.source = {}
def subsections(self):
for (k, v) in self.source.items():
if isinstance(v, dict):
yield k
def keys(self):
for (k, v) in self.source.items():
if not isinstance(v, dict) and not isinstance(v, type):
yield k
def subsection(self, key):
# Make an object of the correct type
return self.__class__(defaults=self.source[key],
parent=self,
identifier=self.identifier)
def typed(self, key):
# if we have it, we can type it
return key in self.source and self.source[key] is not None
def has(self, key):
# should return true for real values only, not type placeholders or sub-dicts
return key in self.source and not isinstance(self.source[key], (type, dict))
def get(self, key):
return self.source[key]
def set(self, key, value):
self.source[key] = value
| bsd-3-clause | 8,381,840,833,951,817,000 | 33.574468 | 85 | 0.580923 | false |
tgbugs/hypush | test/memex/models/user_identity_test.py | 1 | 6800 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
import sqlalchemy.exc
from hyputils.memex import models
from hyputils.memex._compat import PY2
class TestUserIdentity(object):
def test_you_can_save_and_then_retrieve_field_values(
self, db_session, matchers, user
):
user_identity_1 = models.UserIdentity(
provider="provider_1", provider_unique_id="1", user=user
)
user_identity_2 = models.UserIdentity(
provider="provider_1", provider_unique_id="2", user=user
)
user_identity_3 = models.UserIdentity(
provider="provider_2", provider_unique_id="3", user=user
)
db_session.add_all([user_identity_1, user_identity_2, user_identity_3])
db_session.flush()
user_identities = (
db_session.query(models.UserIdentity)
.order_by(models.UserIdentity.provider_unique_id)
.all()
)
# Auto incrementing unique IDs should have been generated for us.
assert type(user_identities[0].id) is int
assert type(user_identities[1].id) is int
assert type(user_identities[2].id) is int
# The provider strings that we gave should have been saved.
assert user_identities[0].provider == "provider_1"
assert user_identities[1].provider == "provider_1"
assert user_identities[2].provider == "provider_2"
# The provider_unique_id strings that we gave should have been saved.
assert user_identities[0].provider_unique_id == "1"
assert user_identities[1].provider_unique_id == "2"
assert user_identities[2].provider_unique_id == "3"
def test_provider_cant_be_null(self, db_session, user):
db_session.add(models.UserIdentity(provider_unique_id="1", user=user))
with pytest.raises(
sqlalchemy.exc.IntegrityError,
match='null value in column "provider" violates not-null constraint',
):
db_session.flush()
def test_provider_id_cant_be_null(self, db_session, user):
db_session.add(models.UserIdentity(provider="provider", user=user))
with pytest.raises(
sqlalchemy.exc.IntegrityError,
match='null value in column "provider_unique_id" violates not-null constraint',
):
db_session.flush()
def test_user_cant_be_null(self, db_session):
db_session.add(models.UserIdentity(provider="provider", provider_unique_id="1"))
with pytest.raises(
sqlalchemy.exc.IntegrityError,
match='null value in column "user_id" violates not-null constraint',
):
db_session.flush()
def test_two_cant_have_the_same_provider_and_provider_id(
self, db_session, factories
):
db_session.add_all(
[
models.UserIdentity(
provider="provider", provider_unique_id="id", user=factories.User()
),
models.UserIdentity(
provider="provider", provider_unique_id="id", user=factories.User()
),
]
)
with pytest.raises(
sqlalchemy.exc.IntegrityError,
match='duplicate key value violates unique constraint "uq__user_identity__provider"',
):
db_session.flush()
def test_one_user_can_have_the_same_provider_id_from_different_providers(
self, db_session, user
):
db_session.add_all(
[
models.UserIdentity(
provider="provider_1", provider_unique_id="id", user=user
),
models.UserIdentity(
provider="provider_2", provider_unique_id="id", user=user
),
]
)
db_session.flush()
def test_different_users_can_have_the_same_provider_id_from_different_providers(
self, db_session, factories
):
db_session.add_all(
[
models.UserIdentity(
provider="provider_1",
provider_unique_id="id",
user=factories.User(),
),
models.UserIdentity(
provider="provider_2",
provider_unique_id="id",
user=factories.User(),
),
]
)
db_session.flush()
def test_removing_a_user_identity_from_a_user_deletes_the_user_identity_from_the_db(
self, db_session, user
):
# Add a couple of noise UserIdentity's. These should not be removed
# from the DB.
models.UserIdentity(provider="provider", provider_unique_id="1", user=user)
models.UserIdentity(provider="provider", provider_unique_id="2", user=user)
# The UserIdentity that we are going to remove.
user_identity = models.UserIdentity(
provider="provider", provider_unique_id="3", user=user
)
user.identities.remove(user_identity)
assert user_identity not in db_session.query(models.UserIdentity).all()
def test_deleting_a_user_identity_removes_it_from_its_user(self, db_session, user):
# Add a couple of noise UserIdentity's. These should not be removed
# from user.identities.
models.UserIdentity(provider="provider", provider_unique_id="1", user=user)
models.UserIdentity(provider="provider", provider_unique_id="2", user=user)
# The UserIdentity that we are going to remove.
user_identity = models.UserIdentity(
provider="provider", provider_unique_id="3", user=user
)
db_session.commit()
db_session.delete(user_identity)
db_session.refresh(user) # Make sure user.identities is up to date.
assert user_identity not in user.identities
def test_deleting_a_user_deletes_all_its_user_identities(self, db_session, user):
models.UserIdentity(provider="provider", provider_unique_id="1", user=user)
models.UserIdentity(provider="provider", provider_unique_id="2", user=user)
db_session.commit()
db_session.delete(user)
assert db_session.query(models.UserIdentity).count() == 0
def test_repr(self):
user_identity = models.UserIdentity(
provider="provider_1", provider_unique_id="1"
)
expected_repr = "UserIdentity(provider='provider_1', provider_unique_id='1')"
if PY2:
expected_repr = (
"UserIdentity(provider=u'provider_1', " "provider_unique_id=u'1')"
)
assert repr(user_identity) == expected_repr
@pytest.fixture
def user(self, factories):
return factories.User()
| mit | -8,386,404,518,537,078,000 | 34.978836 | 97 | 0.596176 | false |
samervin/arctic-scavengers-randomizer | arctic_cards/leaders.py | 1 | 3619 | # Fields
NAME = 'name'
SET = 'set'
USES_REFUGEES = 'uses-refugees'
TEXT = 'text'
# Set values
HQ_EXP = 'hq'
RECON_EXP = 'recon'
# Information not strictly contained on the card
COMMENT = 'comment'
class Leaders:
ALL_LEADERS = [
{
NAME: 'The Peacemaker',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may play 1 Refugee to increase the power of another tribe member\s hunt or dig actions by +2.'
},
{
NAME: 'The Gangster',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Your Refugees have a fight of 0 and they count as 2 people for the purpose of breaking tied skirmishes.'
},
{
NAME: 'The Butcher',
SET: HQ_EXP,
TEXT: 'Each round you may kill 1 of your tribe members (remove the card permanently from play) and sell his/her internal organs for 1 food and 1 med.'
},
{
NAME: 'The Fanatic',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may use 1 Refugee from your hand as a suicide bomber against an opponent. '
'Discard 1 of your opponent\'s revealed cards (your choice), the Refugee dies in the process (remove card from play).'
},
{
NAME: 'The Organizer',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may play 1 Refugee to perform a draw of 2, but only keep 1. '
'No other cards may be played to modify this draw and you may not perform another draw this round.'
},
{
NAME: 'The Cannibal',
SET: HQ_EXP,
TEXT: 'Each round you may cannibalize 1 tribe member for 3 food (and subsequently remove that card from play). '
'You may not combine food from hunting or a garden when hiring with cannibalized food.'
},
{
NAME: 'The Sergent at Arms',
SET: HQ_EXP,
TEXT: 'You are immune to the disarm action, preventing saboteurs from discarding your tools. '
'When hiring saboteurs, you pay no food (cost for you is 1 med).',
COMMENT: 'This card is misspelled as printed: the correct spelling is Sergeant.'
},
{
NAME: 'The Mentor',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may play 1 Refugee card to grant another tribe member a +1 to any action.'
},
{
NAME: 'The Excavator',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'All of your Refugees have a dig of 1. '
'If a Refugee uses a digging tool (i.e. shovel or a pick axe), ignore the tool\'s normal bonus and add +1 to the score.'
},
{
NAME: 'The Ranger',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'All of your Refugees and Tribe Families have a hunt of 1.'
},
{
NAME: 'The Swindler',
SET: RECON_EXP,
USES_REFUGEES: True,
TEXT: 'Once per turn, you may discard 1 Refugee to persuade a mercenary into joining your tribe for 1 less food '
'or discard two Refugees to reduce the price by 1 med.'
},
{
NAME: 'The Yardmaster',
SET: RECON_EXP,
TEXT: 'Once per turn, you may peek at the top 2 cards of the Junkyard. '
'Return both of them to the top or bottom of the Junkyard.'
}
]
| mit | 6,301,782,325,497,952,000 | 37.913978 | 162 | 0.546284 | false |
Samuel789/MediPi | MedManagementWeb/env/lib/python3.5/site-packages/Crypto/Cipher/DES.py | 1 | 7100 | # -*- coding: utf-8 -*-
#
# Cipher/DES.py : DES
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""DES symmetric cipher
DES `(Data Encryption Standard)`__ is a symmetric block cipher standardized
by NIST_ . It has a fixed data block size of 8 bytes.
Its keys are 64 bits long, even though 8 bits were used for integrity (now they
are ignored) and do not contribute to securty. The effective key length is
therefore 56 bits only.
DES is cryptographically secure, but its key length is too short by nowadays
standards and it could be brute forced with some effort.
**Use AES, not DES. This module is provided only for legacy purposes.**
As an example, encryption can be done as follows:
>>> from Crypto.Cipher import DES
>>>
>>> key = b'-8B key-'
>>> cipher = DES.new(key, DES.MODE_OFB)
>>> plaintext = b'sona si latine loqueris '
>>> msg = cipher.iv + cipher.encrypt(plaintext)
.. __: http://en.wikipedia.org/wiki/Data_Encryption_Standard
.. _NIST: http://csrc.nist.gov/publications/fips/fips46-3/fips46-3.pdf
:undocumented: __package__
"""
import sys
from Crypto.Cipher import _create_cipher
from Crypto.Util.py3compat import byte_string
from Crypto.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
c_size_t, expect_byte_string)
_raw_des_lib = load_pycryptodome_raw_lib(
"Crypto.Cipher._raw_des",
"""
int DES_start_operation(const uint8_t key[],
size_t key_len,
void **pResult);
int DES_encrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int DES_decrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int DES_stop_operation(void *state);
""")
def _create_base_cipher(dict_parameters):
"""This method instantiates and returns a handle to a low-level
base cipher. It will absorb named parameters in the process."""
try:
key = dict_parameters.pop("key")
except KeyError:
raise TypeError("Missing 'key' parameter")
expect_byte_string(key)
if len(key) != key_size:
raise ValueError("Incorrect DES key length (%d bytes)" % len(key))
start_operation = _raw_des_lib.DES_start_operation
stop_operation = _raw_des_lib.DES_stop_operation
cipher = VoidPointer()
result = start_operation(key,
c_size_t(len(key)),
cipher.address_of())
if result:
raise ValueError("Error %X while instantiating the DES cipher"
% result)
return SmartPointer(cipher.get(), stop_operation)
def new(key, mode, *args, **kwargs):
"""Create a new DES cipher
:Parameters:
key : byte string
The secret key to use in the symmetric cipher.
It must be 8 byte long. The parity bits will be ignored.
:Keywords:
mode : a *MODE_** constant
The chaining mode to use for encryption or decryption.
iv : byte string
(*Only* `MODE_CBC`, `MODE_CFB`, `MODE_OFB`, `MODE_OPENPGP`).
The initialization vector to use for encryption or decryption.
For `MODE_OPENPGP`, IV must be 8 bytes long for encryption
and 10 bytes for decryption (in the latter case, it is
actually the *encrypted* IV which was prefixed to the ciphertext).
For all other modes, it must be 8 bytes long.
If not provided, a random byte string is generated (you can read it
back via the ``iv`` attribute).
nonce : byte string
(*Only* `MODE_EAX` and `MODE_CTR`).
A mandatory value that must never be reused for any other encryption.
For `MODE_CTR`, its length must be in the range ``[0..7]``.
For `MODE_EAX`, there are no restrictions, but it is recommended to
use at least 16 bytes.
If not provided for `MODE_EAX`, a random byte string is generated (you
can read it back via the ``nonce`` attribute).
mac_len : integer
(*Only* `MODE_EAX`). Length of the authentication tag, in bytes.
It must be no larger than 8 (which is the default).
segment_size : integer
(*Only* `MODE_CFB`).The number of **bits** the plaintext and ciphertext
are segmented in. It must be a multiple of 8.
If not specified, it will be assumed to be 8.
initial_value : integer
(*Only* `MODE_CTR`). The initial value for the counter within
the counter block. By default it is 0.
:Return: a DES cipher, of the applicable mode:
- CBC_ mode
- CFB_ mode
- CTR_ mode
- EAX_ mode
- ECB_ mode
- OFB_ mode
- OpenPgp_ mode
.. _CBC: Crypto.Cipher._mode_cbc.CbcMode-class.html
.. _CFB: Crypto.Cipher._mode_cfb.CfbMode-class.html
.. _CTR: Crypto.Cipher._mode_ctr.CtrMode-class.html
.. _EAX: Crypto.Cipher._mode_eax.EaxMode-class.html
.. _ECB: Crypto.Cipher._mode_ecb.EcbMode-class.html
.. _OFB: Crypto.Cipher._mode_ofb.OfbMode-class.html
.. _OpenPgp: Crypto.Cipher._mode_openpgp.OpenPgpMode-class.html
"""
return _create_cipher(sys.modules[__name__], key, mode, *args, **kwargs)
#: Electronic Code Book (ECB). See `Crypto.Cipher._mode_ecb.EcbMode`.
MODE_ECB = 1
#: Cipher-Block Chaining (CBC). See `Crypto.Cipher._mode_cbc.CbcMode`.
MODE_CBC = 2
#: Cipher FeedBack (CFB). See `Crypto.Cipher._mode_cfb.CfbMode`.
MODE_CFB = 3
#: Output FeedBack (OFB). See `Crypto.Cipher._mode_ofb.OfbMode`.
MODE_OFB = 5
#: CounTer Mode (CTR). See `Crypto.Cipher._mode_ctr.CtrMode`.
MODE_CTR = 6
#: OpenPGP Mode. See `Crypto.Cipher._mode_openpgp.OpenPgpMode`.
MODE_OPENPGP = 7
#: EAX Mode. See `Crypto.Cipher._mode_eax.EaxMode`.
MODE_EAX = 9
#: Size of a data block (in bytes)
block_size = 8
#: Size of a key (in bytes)
key_size = 8
| apache-2.0 | -5,694,647,605,484,890,000 | 35.787565 | 79 | 0.613521 | false |
MridulS/BinPy | BinPy/examples/source/Combinational/DEMUX.py | 1 | 1066 |
# coding: utf-8
# Example for DEMUX class.
# In[1]:
from __future__ import print_function
from BinPy.Combinational.combinational import *
# In[2]:
# Initializing the DEMUX class
# Must be a single input
demux = DEMUX(1)
# Put select lines
# Select Lines must be power of 2
demux.selectLines(0)
# Output of demux
print (demux.output())
# In[3]:
# Input changes
# Input at index 1 is changed to 0
demux.setInput(0, 0)
# New Output of the demux
print (demux.output())
# In[4]:
# Get Input States
print (demux.getInputStates())
# In[5]:
# Using Connectors as the input lines
# Take a Connector
conn = Connector()
# Set Output of demux to Connector conn
# sets conn as the output at index 0
demux.setOutput(0, conn)
# Put this connector as the input to gate1
gate1 = AND(conn, 0)
# Output of the gate1
print (gate1.output())
# In[6]:
# Changing select lines
# selects input line 2
demux.selectLine(0, 1)
# New output of demux
print (demux.output())
# In[7]:
# Information about demux instance can be found by
print (demux)
| bsd-3-clause | -8,725,187,990,451,781,000 | 10.714286 | 50 | 0.687617 | false |
harikishen/addons-server | src/olympia/amo/tasks.py | 1 | 2584 | import datetime
from django.core.mail import EmailMessage, EmailMultiAlternatives
import olympia.core.logger
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.amo.celery import task
from olympia.amo.utils import get_email_backend
from olympia.bandwagon.models import Collection
from olympia.stats.models import Contribution
log = olympia.core.logger.getLogger('z.task')
@task
def send_email(recipient, subject, message, from_email=None,
html_message=None, attachments=None, real_email=False,
cc=None, headers=None, fail_silently=False, async=False,
max_retries=None, reply_to=None, **kwargs):
backend = EmailMultiAlternatives if html_message else EmailMessage
connection = get_email_backend(real_email)
result = backend(subject, message, from_email, to=recipient, cc=cc,
connection=connection, headers=headers,
attachments=attachments, reply_to=reply_to)
if html_message:
result.attach_alternative(html_message, 'text/html')
try:
result.send(fail_silently=False)
return True
except Exception as e:
log.error('send_mail failed with error: %s' % e)
if async:
return send_email.retry(exc=e, max_retries=max_retries)
elif not fail_silently:
raise
else:
return False
@task
def set_modified_on_object(obj, **kw):
"""Sets modified on one object at a time."""
try:
log.info('Setting modified on object: %s, %s' %
(obj.__class__.__name__, obj.pk))
obj.update(modified=datetime.datetime.now())
except Exception, e:
log.error('Failed to set modified on: %s, %s - %s' %
(obj.__class__.__name__, obj.pk, e))
@task
def delete_logs(items, **kw):
log.info('[%s@%s] Deleting logs' % (len(items), delete_logs.rate_limit))
ActivityLog.objects.filter(pk__in=items).exclude(
action__in=amo.LOG_KEEP).delete()
@task
def delete_stale_contributions(items, **kw):
log.info('[%s@%s] Deleting stale contributions' %
(len(items), delete_stale_contributions.rate_limit))
Contribution.objects.filter(
transaction_id__isnull=True, pk__in=items).delete()
@task
def delete_anonymous_collections(items, **kw):
log.info('[%s@%s] Deleting anonymous collections' %
(len(items), delete_anonymous_collections.rate_limit))
Collection.objects.filter(type=amo.COLLECTION_ANONYMOUS,
pk__in=items).delete()
| bsd-3-clause | 7,679,414,389,111,565,000 | 33 | 76 | 0.64822 | false |
sctjkc01/ofCourse | ofcourse/participants.py | 1 | 3800 | import os
from datetime import datetime, date, timedelta
from urlparse import urlparse
import yaml
from flask import Blueprint, redirect
from flask.ext.mako import render_template
import ofcourse
from ofcourse.util import app_path, get_hw_keys
participants_bp = Blueprint('participants_bp',
__name__,
template_folder=app_path('templates'))
currentYear = str(date.today().year)
currentTerm = "fall" if date.today().month > 7 else "spring"
@participants_bp.route('/')
def participants_blank():
"""
This is the default landing
for the participants listing page.
It will list all of the participants
in the current term for HFOSS
"""
return participants_year_term(currentYear, currentTerm)
@participants_bp.route('/<year_or_nick>')
def participants_year(year_or_nick):
"""
This will get all the participants
within a given year
"""
p_url = find_participant(year_or_nick)
if p_url is not None:
# render individual page
return redirect(p_url)
# otherwise render as a year
return participants(year_or_nick + '/')
@participants_bp.route('/<year>/<term>')
def participants_year_term(year, term):
"""
This will get all the participants
within a given year and term
"""
return participants(year + '/' + term + '/')
@participants_bp.route('/all')
def participants_all():
return participants('')
"""
This will get all the participants
who have taken HFOSS
"""
def participants(root_dir):
"""
Render the participants page,
which shows a directory of all
the students with their forge
links, blog posts, assignment
links, and etc.
"""
yaml_dir = app_path('people', root_dir)
student_data = []
for dirpath, dirnames, files in os.walk(yaml_dir):
dirpath = dirpath.rstrip("/")
for fname in sorted(files):
if fname.endswith('.yaml'):
with open(dirpath + '/' + fname) as students:
contents = yaml.safe_load(students)
contents['yaml'] = dirpath + '/' + fname
year_term_data = dirpath.split('/')
contents['participant_page'] = "{y}/{t}/{u}".format(
y=year_term_data[-2],
t=year_term_data[-1],
u=os.path.splitext(fname)[0]
)
for forge in contents['forges']:
url = urlparse(forge)
if "github.com" in url.netloc:
contents['github'] = url.path[1:]
contents['isActive'] = (currentYear in year_term_data and
currentTerm in year_term_data)
student_data.append(contents)
assignments = get_hw_keys()
elapsed = (datetime.today() - ofcourse.site.COURSE_START).total_seconds()
target_number = int(elapsed / timedelta(weeks=1).total_seconds() + 1 +
len(assignments))
return render_template(
'blogs.mak', name='mako',
student_data=student_data,
gravatar=ofcourse.site.gravatar,
target_number=target_number,
hw_keys=assignments
)
def find_participant(nick):
yaml_dir = app_path('people')
for dirpath, dirnames, files in os.walk(yaml_dir):
for fname in files:
if (fname.lower().startswith(nick.lower()) and
fname.endswith('.yaml')):
participant = os.path.join(
dirpath,
fname
).replace(yaml_dir, '')
participant = participant.replace('.yaml', '')
return 'participants' + participant
| apache-2.0 | 3,540,628,806,068,801,000 | 28.6875 | 77 | 0.569737 | false |
django-danceschool/django-danceschool | danceschool/discounts/tests.py | 1 | 20249 | from django.urls import reverse
from django.utils import timezone
from datetime import timedelta
from danceschool.core.constants import REG_VALIDATION_STR, updateConstant
from danceschool.core.utils.tests import DefaultSchoolTestCase
from danceschool.core.models import Invoice, Registration
from .models import (
PointGroup, PricingTierGroup, DiscountCategory, DiscountCombo, DiscountComboComponent
)
class BaseDiscountsTest(DefaultSchoolTestCase):
def create_discount(self, **kwargs):
'''
This method just creates the necessary objects to create a simple discount
with a single required component.
'''
test_group, created = PointGroup.objects.get_or_create(
name=kwargs.get('pointGroupName', 'Test points')
)
pt_group, created = PricingTierGroup.objects.get_or_create(
group=test_group,
pricingTier=self.defaultPricing,
points=kwargs.get('pricingTierGroupPoints', 5),
)
# Create a flat price combo that just knocks $5 off the regular price
test_combo = DiscountCombo(
name=kwargs.get('name', 'Test Discount'),
category=kwargs.get('category', DiscountCategory.objects.get(id=1)),
discountType=kwargs.get('discountType', DiscountCombo.DiscountType.flatPrice),
onlinePrice=kwargs.get('onlinePrice', self.defaultPricing.onlinePrice - 5),
doorPrice=kwargs.get('doorPrice', self.defaultPricing.doorPrice - 5),
dollarDiscount=kwargs.get('dollarDiscount', 10),
percentDiscount=kwargs.get('percentDiscount', 50),
percentUniversallyApplied=kwargs.get('percentUniversallyApplied', False),
active=kwargs.get('active', True),
newCustomersOnly=kwargs.get('newCustomersOnly', False),
daysInAdvanceRequired=kwargs.get('daysInAdvanceRequired', None),
expirationDate=kwargs.get('expirationDate', None),
)
test_combo.save()
test_component = DiscountComboComponent.objects.create(
discountCombo=test_combo,
pointGroup=test_group,
quantity=kwargs.get('quantity', 5),
allWithinPointGroup=kwargs.get('allWithinPointGroup', False),
)
return (test_combo, test_component)
def register_to_check_discount(self, series, expected_amount=None):
'''
This method makes it easy to determine whether discounts are working
correctly for a single class registration
'''
s = series
response = self.client.get(reverse('registration'))
self.assertEqual(response.status_code, 200)
self.assertIn(s, response.context_data.get('regOpenSeries'))
# Sign up for the series, and check that we proceed to the student information page.
# Because of the way that roles are encoded on this form, we just grab the value to pass
# from the form itself.
post_data = {'series_%s_%s' % (
s.id, response.context_data['form'].fields['series_%s' % s.id].field_choices[0].get('value')
): [1,]}
response = self.client.post(reverse('registration'), post_data, follow=True)
self.assertEqual(response.redirect_chain, [(reverse('getStudentInfo'), 302)])
invoice = Invoice.objects.get(
id=self.client.session[REG_VALIDATION_STR].get('invoiceId')
)
tr = Registration.objects.filter(invoice=invoice).first()
self.assertTrue(tr.eventregistration_set.filter(event__id=s.id).exists())
self.assertFalse(tr.final)
# Check that the student info page lists the correct subtotal with
# the discount applied
self.assertEqual(invoice.grossTotal, s.getBasePrice())
if expected_amount is not None:
self.assertEqual(response.context_data.get('invoice').total, expected_amount)
# Continue to the summary page
post_data = {
'firstName': 'Discounted',
'lastName': 'Customer',
'email': '[email protected]',
'agreeToPolicies': True,
}
return self.client.post(reverse('getStudentInfo'), post_data, follow=True)
class DiscountsConditionsTest(BaseDiscountsTest):
def test_inactive_discount(self):
'''
Make a discount inactive and make sure that it doesn't work
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(active=False)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_expired_discount(self):
'''
Create an expired discount and make sure that it doesn't work.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
expirationDate=timezone.now() + timedelta(days=-1)
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_discounts_disabled(self):
''' Disable discounts and check that they don't work anymore '''
updateConstant('general__discountsEnabled', False)
test_combo, test_component = self.create_discount()
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_notenoughpoints(self):
'''
Set the discount's components so that this discount is too small to apply, and
check that it doesn't get applied.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(quantity=10)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_noearlybird(self):
'''
Create an early registration discount that requires three day
advance registration and ensure that it does not work less than
three days in advance.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(daysInAdvanceRequired=3)
s = self.create_series(
pricingTier=self.defaultPricing,
startTime=timezone.now() + timedelta(days=1)
)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
class DiscountsTypesTest(BaseDiscountsTest):
def test_discount_applies(self):
'''
Create a flat $5 discount and test that it applies
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount()
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice() - 5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 5
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 5)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_earlybird(self):
'''
Create an early registration discount that requires three day
advance registration and ensure that it works more than
three days in advance.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(daysInAdvanceRequired=3)
s = self.create_series(
pricingTier=self.defaultPricing,
startTime=timezone.now() + timedelta(days=4)
)
response = self.register_to_check_discount(s, s.getBasePrice() - 5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 5
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 5)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_allwithinpointgroup(self):
'''
Set a discount to apply to an entire point group and check that the price
is still the flat price
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(quantity=1, allWithinPointGroup=True)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice() - 5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 5
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 5)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_dollarDiscount(self):
'''
Create a $10 off discount and check that it applies appropriately
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=10
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice() - 10)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 10
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 10)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_percentDiscount(self):
'''
Create a 50% off discount and check that it applies correctly.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.percentDiscount,
percentDiscount=50,
percentUniversallyApplied=False
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice()*0.5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, 0.5 * invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(
response.context_data.get('total_discount_amount'),
0.5 * invoice.grossTotal
)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_addOnItem(self):
'''
Create a free add-on item and ensure that it is applied correctly.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.addOn,
name='Test Free Add-On',
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertTrue(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_discountmakesitfree(self):
'''
Make the dollar discount larger than the base price and check that
the registration is free, that the registration is processed and that
a $0 invoice is created.
'''
updateConstant('general__discountsEnabled', True)
s = self.create_series(pricingTier=self.defaultPricing)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=s.getBasePrice() + 10
)
response = self.register_to_check_discount(s, 0)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(invoice.total, 0)
self.assertEqual(response.context_data.get('zero_balance'), True)
self.assertEqual(response.context_data.get('total_discount_amount'), s.getBasePrice())
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
# Since the above registration was free, check that the registration actually
# processed, and that there exists a paid Invoice for $0
finalReg = response.context_data.get('registration')
invoice = response.context_data.get('invoice')
self.assertTrue(finalReg)
self.assertTrue(finalReg.invoice)
self.assertEqual(finalReg.invoice, invoice)
self.assertTrue(invoice.status == Invoice.PaymentStatus.paid)
self.assertEqual(invoice.outstandingBalance, 0)
self.assertEqual(invoice.total, 0)
self.assertTrue(finalReg.final)
# Check that the invoice no longer has an expiration date
self.assertIsNone(invoice.expirationDate)
# Check that the RegistrationDiscount associated with this registration
# has been applied.
self.assertTrue(finalReg.registrationdiscount_set.first().applied)
# Show that multiple registrations by the same customer are not permitted
response = self.register_to_check_discount(s)
self.assertIn(
'You are already registered for',
' '.join(response.context_data['form'].errors.get('__all__'))
)
def test_largerdiscountapplies(self):
'''
Create both a $10 discount and a $20 discount, and ensure that the
larger discount applies
'''
updateConstant('general__discountsEnabled', True)
s = self.create_series(pricingTier=self.defaultPricing)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=10
)
bigger_combo, bigger_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=20,
name='Bigger Discount'
)
response = self.register_to_check_discount(s, s.getBasePrice() - 20)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 20
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 20)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [bigger_combo.name, ])
| bsd-3-clause | 1,658,974,003,004,145,400 | 43.503297 | 104 | 0.661909 | false |
smurfix/DaBroker | dabroker/base/transport/__init__.py | 1 | 4226 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of DaBroker, a distributed data access manager.
##
## DaBroker is Copyright © 2014 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
from gevent import GreenletExit
from dabroker.util.thread import prep_spawned
import logging
logger = logging.getLogger("dabroker.base.transport")
class ConnectionError(RuntimeError):
pass
class BaseCallbacks(object):
def recv(self,msg):
"""Incoming message from the other side. NOT used for receiving replies!"""
raise NotImplementedError("You need to override {}.recv()".format(self.__class__.__name__))
def send(self,msg):
"""Outgoing message to the other side. NOT used for sending replies!"""
raise NotImplementedError("You need to override {}.send()".format(self.__class__.__name__))
def ended(self,err=None):
"""Called on receiver error. Do not reconnect here!"""
pass
def reconnect(self,err=None):
"""Called after a closed connection has been cleaned up"""
pass
def register_codec(self,codec):
raise NotImplementedError("You need to override {}.register_codec()".format(self.__class__.__name__))
class RelayedError(Exception):
"""An encapsulation for a server error (with traceback)"""
def __init__(self,err,tb):
self.err = str(err)
self.tb = tb
def __repr__(self):
return "{}({})".format(self.__class__.__name__,self.err)
def __str__(self):
r = repr(self)
if self.tb is None: return r
return r+"\n"+self.tb
class BaseTransport(object):
_job = None
defaults = {}
connection = None
last_msgid = 0
def __init__(self,callbacks, cfg={}):
self.cfg = self.defaults.copy()
self.cfg.update(cfg)
self.callbacks = callbacks
self.trace = cfg.get('trace',0)
def connect(self, purge=False):
"""Connect. (Synchronously.)
Do not override!
Override .connect1() (setup) and .connect2() (initial tasks)"""
assert self.callbacks is not None
assert self.connection is None
self.connect1()
if purge:
self.purge_all()
self.connect2()
def connect1(self):
"""Set up a connection.
Call super() before your code."""
if self._job is not None:
raise RuntimeError("Already connected")
logger.debug("connecting: %r",self)
def connect2(self):
"""Add initial tasks after a connection has been established.
Call super() after your code."""
assert self._job is None
self._job = self._run_job()
self._job.start()
def disconnect(self):
"""Sever the connection; do not auto-reconnect."""
logger.debug("disconnecting: %r",self)
j,self._job = self._job,None
if j:
j.stop()
def disconnected(self, err=None):
"""Clear connection objects.
This will be called by the reader task as it exits.
Do not reconnect from here; do that in your .reconnect"""
logger.debug("disconnected: %r",self)
def purge_all(self):
"""
Clear this transport's message queue.
This should only be called when client and server are known to
be idle AND when you suspect an unprocessable message might
clog the queue.
"""
pass
def send(self,msg):
raise NotImplementedError("You need to override {}.send()".format(self.__class__.__name__))
def run(self):
raise NotImplementedError("You need to override {}.run()".format(self.__class__.__name__))
@prep_spawned
def _run_job(self):
try:
logger.debug("Running receiver loop: %r",self)
self.run()
except GreenletExit:
err=None
logger.debug("Receiver loop ends: %r",self)
self.callbacks.ended(None)
except BaseException as e:
err = e
logger.exception("Receiver loop error: %r",self)
self.callbacks.ended(e)
else:
err=None
logger.debug("Receiver loop ends: %r",self)
self.callbacks.ended(None)
finally:
self.disconnected()
if self._job is not None:
self._job = None
self.callbacks.reconnect(err)
| gpl-3.0 | -528,446,127,231,001,700 | 26.769737 | 103 | 0.689647 | false |
MattFaus/CrowdTube-Connector | youtube.py | 1 | 6824 | import os
import urlparse
from lib import gdata
import lib.gdata.youtube.client
import secrets
GDATA_API_CLIENT_ID = 'CrowdTube-Connector'
class YouTubeCaptionEditor(object):
def __init__(self, google_email, google_password, youtube_username):
self.youtube_username = youtube_username
self.youtube_client = lib.gdata.youtube.client.YouTubeClient()
# We shouldn't need this auth_token, but we'll keep it around
self.auth_token = self.youtube_client.client_login(
google_email, google_password, GDATA_API_CLIENT_ID)
# A dictionary of youtube_id and YouTubeVideo objects
self.videos = {}
def get_videos(self):
# Format copied from lib.gdata.youtube.client.py
feed_uri = '%s%s/%s' % (lib.gdata.youtube.client.YOUTUBE_USER_FEED_URI,
self.youtube_username, 'uploads')
all_videos = self.youtube_client.get_videos(uri=feed_uri)
for video in all_videos.entry:
new_video = YouTubeVideo(video, self.youtube_client)
self.videos[new_video.video_id] = new_video
def get_video(self, video_id):
video_entry = self.youtube_client.get_video_entry(video_id=video_id)
return YouTubeVideo(video_entry, self.youtube_client)
def delete_track(self, video_id, track_id):
"""Deletes an existing track."""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
response = self.youtube_client.delete_track(video_id, track_id,
client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key)
# http://docs.python.org/release/2.2.3/lib/httpresponse-objects.html
if response.status != 200:
print response.status, response.msg
return False
return True
def add_track(self, video_id, title, language, track_content):
"""Adds a caption track.
If a track with the same title already exists, this will silently fail.
"""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
track_content = track_content.encode('utf-8')
response = self.youtube_client.create_track(video_id, title, language,
track_content, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key, fmt='sub')
# Returns a TrackEntry object
return response
def update_track(self, video_id, track_id, track_content):
"""Adds a caption track."""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
track_content = track_content.encode('utf-8')
response = self.youtube_client.update_track(video_id, track_id,
track_content, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key, fmt='sub')
# Returns a TrackEntry object
return response
# TODO(mattfaus): Suck these two classes into the YouTubeCaptionEditor, above
# make the YouTubeCaptionEditor behave more like a full-fledged youtube client
# Shouldn't have to pass the youtube_client object around to the sub-classes
# No need to have dictionaries where an array would do just fine (YouTubeVideo.caption_tracks)
class YouTubeVideo(object):
def __init__(self, video_entry, youtube_client=None):
self.youtube_client = youtube_client
# tag:youtube.com,2008:video:SNrEiiJwD4Y
id_parts = video_entry.GetId().split(':')
self.video_id = id_parts[id_parts.index('video') + 1]
self.title = video_entry.title.text
caption_link = video_entry.get_link(
'http://gdata.youtube.com/schemas/2007#video.captionTracks')
self.caption_feed = caption_link.href
# TODO(mattfaus): Make this less ugly
has_entries = [
a.value for a in caption_link.GetAttributes()
if '{http://gdata.youtube.com/schemas/2007}hasEntries' == a._qname]
has_entries = has_entries[0] == 'true'
self.has_entries = has_entries
self.caption_tracks = {}
def get_caption_tracks(self, download=False):
# Don't check self.has_entries. It may be False when only a
# machine-generated caption track exists.
if not self.youtube_client:
raise ValueError('No youtube client available!')
# STOPSHIP(mattfaus): get_caption_feed() only returns the first 24 caption tracks
# so we must iterate to read more
# TODO(mattfaus): Filter this by language with the 'lr' attribute
all_captions = self.youtube_client.get_caption_feed(self.caption_feed)
for caption_entry in all_captions.entry:
new_track = YouTubeCaptionTrack(caption_entry, self.youtube_client)
self.caption_tracks[new_track.track_source] = new_track
if download:
new_track.download_track()
def get_machine_generated_track(self):
self.get_caption_tracks()
for src, caption_track in self.caption_tracks.iteritems():
print src, caption_track
if caption_track.machine_generated:
caption_track.download_track()
return caption_track
class YouTubeCaptionTrack(object):
def __init__(self, caption_entry, youtube_client):
self.youtube_client = youtube_client
self.language = caption_entry.content.lang
self.track_source = caption_entry.content.src
self.machine_generated = YouTubeCaptionTrack._is_machine_generated(
caption_entry)
# Parse the video_id and caption_id out of a url like this:
# https://gdata.youtube.com/feeds/api/videos/Jom6EtXzRMg/captiondata/Ch4LEO3ZhwUaFQjIic2vrcLuxCYSAmVuGgAiA2Fzcgw
o = urlparse.urlparse(self.track_source)
path_parts = o.path.split('/')
self.video_id = path_parts[path_parts.index('videos') + 1]
self.track_id = path_parts[path_parts.index('captiondata') + 1]
self.track_content = None
@staticmethod
def _is_machine_generated(caption_entry):
"""Looks for the derived element, and returns True if it is equal to
speechRecognition.
"""
# TODO(mattfaus): Move this to TrackEntry within youtube/data.py?
derived = caption_entry.GetElements(
tag='derived', namespace='http://gdata.youtube.com/schemas/2007')
if not derived:
return False
else:
derived = derived[0]
return derived.text == 'speechRecognition'
def download_track(self):
response = self.youtube_client.get_caption_track(
track_url=self.track_source, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key)
self.track_content = response.read(2 ** 31)
return self.track_content
| mit | -7,013,494,189,144,412,000 | 38.445087 | 120 | 0.651231 | false |
rockfruit/bika.lims | bika/lims/browser/analysisrequest/results_not_requested.py | 1 | 2747 | # This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from AccessControl import getSecurityManager
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.permissions import *
from bika.lims.browser.analysisrequest import AnalysisRequestManageResultsView
from bika.lims.content.analysisrequest import schema as AnalysisRequestSchema
from bika.lims.utils import to_utf8
from bika.lims.workflow import doActionFor
from plone.app.layout.globals.interfaces import IViewView
from DateTime import DateTime
from Products.Archetypes import PloneMessageFactory as PMF
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.interface import implements
import plone
class AnalysisRequestResultsNotRequestedView(AnalysisRequestManageResultsView):
implements(IViewView)
template = ViewPageTemplateFile("templates/analysisrequest_analyses_not_requested.pt")
def __call__(self):
ar = self.context
workflow = getToolByName(ar, 'portal_workflow')
# If is a retracted AR, show the link to child AR and show a warn msg
if workflow.getInfoFor(ar, 'review_state') == 'invalid':
childar = hasattr(ar, 'getChildAnalysisRequest') \
and ar.getChildAnalysisRequest() or None
childid = childar and childar.getRequestID() or None
message = _('This Analysis Request has been withdrawn and is shown '
'for trace-ability purposes only. Retest: ${retest_child_id}.',
mapping={"retest_child_id":childid if childid else ''})
self.context.plone_utils.addPortalMessage(message, 'warning')
# If is an AR automatically generated due to a Retraction, show it's
# parent AR information
if hasattr(ar, 'getParentAnalysisRequest') \
and ar.getParentAnalysisRequest():
par = ar.getParentAnalysisRequest()
message = _(
'This Analysis Request has been generated automatically due to '
'the retraction of the Analysis Request ${retracted_request_id}.',
mapping={"retracted_request_id": par.getRequestID()})
self.context.plone_utils.addPortalMessage(message, 'info')
can_do = getSecurityManager().checkPermission(ResultsNotRequested, ar)
if workflow.getInfoFor(ar, 'cancellation_state') == "cancelled":
self.request.response.redirect(ar.absolute_url())
elif not(can_do):
self.request.response.redirect(ar.absolute_url())
else:
return self.template()
| agpl-3.0 | 3,269,595,701,656,959,500 | 46.362069 | 90 | 0.699672 | false |
LongSeanSilvr/DC_Metro_Tracker | development_version/src/general_intents.py | 1 | 1923 | import build_response as br
# ======================================================================================================================
# Skill Behavior: Welcome Response
# ======================================================================================================================
class Welcome(object):
def __init__(self):
self.card_title = "Welcome"
self.reprompt_text = "What station would you like train times for?"
self.flag = "welcome"
def build_response(self):
output = br.build_response(self.card_title, self.flag, reprompt_text=self.reprompt_text)
return output
# ======================================================================================================================
# Skill Intent: Help
# ======================================================================================================================
class Help(object):
def __init__(self, intent, session): # Parameters are here so handler can treat this like the other intent classes
self.card_title = "Help"
self.reprompt_text = "What station would you like train times for?"
self.flag = "help"
def build_response(self):
output = br.build_response(self.card_title, self.flag, reprompt_text=self.reprompt_text)
return output
# ======================================================================================================================
# Skill Intent: Quit
# ======================================================================================================================
class Exit(object):
def __init__(self, intent, session): # Parameters are here so handler can treat this like the other intent classes
self.card_title = "Exiting"
self.flag = "exit"
def build_response(self):
output = br.build_response(self.card_title, self.flag)
return output
| gpl-3.0 | 3,137,623,135,016,649,700 | 44.785714 | 120 | 0.411856 | false |
GNOME/libgxps | regtest/TestReferences.py | 1 | 3535 | # TestReferences.py
#
# Copyright (C) 2011 Carlos Garcia Campos <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import errno
from Test import Test
from Config import Config
from Printer import get_printer
from Utils import get_document_paths_from_dir, get_skipped_tests
from Queue import Queue
from threading import Thread, RLock
class TestReferences:
def __init__(self, docsdir, refsdir):
self._docsdir = docsdir
self._refsdir = refsdir
self._skipped = get_skipped_tests(docsdir)
self._test = Test()
self.config = Config()
self.printer = get_printer()
self._total_tests = 1
self._n_tests = 0
self._queue = Queue()
self._lock = RLock()
try:
os.makedirs(self._refsdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
except:
raise
def create_refs_for_file(self, filename):
if filename in self._skipped:
with self._lock:
self._n_tests += 1
self.printer.print_default("Skipping test '%s'" % (os.path.join(self._docsdir, filename)))
return
refs_path = os.path.join(self._refsdir, filename)
try:
os.makedirs(refs_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
except:
raise
doc_path = os.path.join(self._docsdir, filename)
if not self.config.force and self._test.has_results(refs_path):
with self._lock:
self._n_tests += 1
self.printer.print_default("Results found, skipping '%s'" % doc_path)
return
if self._test.create_refs(doc_path, refs_path):
self._test.create_checksums(refs_path, self.config.checksums_only)
with self._lock:
self._n_tests += 1
self.printer.printout_ln("[%d/%d] %s: done" % (self._n_tests, self._total_tests, doc_path))
def _worker_thread(self):
while True:
doc = self._queue.get()
self.create_refs_for_file(doc)
self._queue.task_done()
def create_refs(self):
docs, total_docs = get_document_paths_from_dir(self._docsdir)
self._total_tests = total_docs
self.printer.printout_ln('Found %d documents' % (total_docs))
self.printer.printout_ln('Process %d using %d worker threads' % (os.getpid(), self.config.threads))
self.printer.printout_ln()
self.printer.printout('Spawning %d workers...' % (self.config.threads))
for n_thread in range(self.config.threads):
thread = Thread(target=self._worker_thread)
thread.daemon = True
thread.start()
for doc in docs:
self._queue.put(doc)
self._queue.join()
| lgpl-2.1 | 7,581,301,947,150,102,000 | 32.990385 | 107 | 0.614427 | false |
dakrauth/picker | picker/forms.py | 1 | 6144 | from django import forms
from django.utils import timezone
from django.utils.module_loading import import_string
from . import models as picker
from . import utils
_picker_widget = None
encoded_game_key = 'game_{}'.format
TIE_KEY = '__TIE__'
def decoded_game_key(value):
return int(value.replace('game_', ''))
def encoded_game_item(game):
return (
encoded_game_key(game.id),
str(game.winner.id) if game.winner else (TIE_KEY if game.is_tie else '')
)
def get_picker_widget(league):
global _picker_widget
if not _picker_widget:
widget_path = league.config('TEAM_PICKER_WIDGET')
if widget_path:
_picker_widget = import_string(widget_path)
_picker_widget = _picker_widget or forms.RadioSelect
return _picker_widget
class GameField(forms.ChoiceField):
def __init__(self, game, manage=False, widget=None):
choices = [(str(game.away.id), game.away), (str(game.home.id), game.home)]
if manage:
choices.insert(1, (TIE_KEY, ''))
self.game = game
self.manage = manage
self.game_id = game.id
self.is_game = True
super(GameField, self).__init__(
choices=choices,
label=game.start_time.strftime('%a, %b %d %I:%M %p'),
required=False,
help_text=game.tv,
disabled=not self.manage and (self.game.start_time <= timezone.now()),
widget=widget or get_picker_widget(game.gameset.league)
)
class FieldIter:
def __init__(self, form):
self.fields = []
self.form = form
def append(self, name):
self.fields.append(name)
def __iter__(self):
for name in self.fields:
yield self.form[name]
class BasePickForm(forms.Form):
management = False
def __init__(self, gameset, *args, **kws):
super(BasePickForm, self).__init__(*args, **kws)
self.gameset = gameset
self.game_fields = FieldIter(self)
games = list(gameset.games.select_related('home__league', 'away__league'))
if games:
for gm in games:
key = encoded_game_key(gm.id)
self.fields[key] = GameField(gm, self.management)
self.game_fields.append(key)
self.fields['points'] = forms.IntegerField(
label='{}:'.format(games[-1].vs_description),
required=False
)
class ManagementPickForm(BasePickForm):
management = True
def __init__(self, gameset, *args, **kws):
kws.setdefault('initial', {}).update(**self.get_initial_picks(gameset))
super(ManagementPickForm, self).__init__(gameset, *args, **kws)
def save(self):
gameset = self.gameset
data = self.cleaned_data.copy()
gameset.points = data.pop('points', 0) or 0
gameset.save()
for key, winner in data.items():
if winner:
pk = decoded_game_key(key)
game = gameset.games.get(pk=pk)
game.winner = None if winner == TIE_KEY else int(winner)
gameset.update_pick_status()
@staticmethod
def get_initial_picks(gameset):
return dict({
encoded_game_key(game.id): str(game.winner.id)
for game in gameset.games.played()
if game.winner
}, points=gameset.points)
class UserPickForm(BasePickForm):
def __init__(self, user, gameset, *args, **kws):
initial = self.get_initial_user_picks(gameset, user)
kws.setdefault('initial', {}).update(initial)
self.user = user
super(UserPickForm, self).__init__(gameset, *args, **kws)
def save(self):
data = self.cleaned_data.copy()
picks = picker.PickSet.objects.for_gameset_user(self.gameset, self.user)
points = data.pop('points', None)
games = {decoded_game_key(k): v for k, v in data.items() if v}
picks.update_picks(games=games, points=points)
return picks
@staticmethod
def get_initial_user_picks(gameset, user):
ps = gameset.pick_for_user(user)
initial = dict({
encoded_game_key(g_id): str(w_id) for g_id, w_id in ps.gamepicks.picked_winner_ids()
}, points=ps.points) if ps else {}
return initial
class GameForm(forms.ModelForm):
class Meta:
model = picker.Game
fields = ('start_time', 'location')
class PreferenceForm(forms.ModelForm):
class Meta:
model = picker.Preference
fields = ('autopick',)
def __init__(self, instance, *args, **kws):
kws['instance'] = instance
self.current_email = instance.user.email.lower()
kws.setdefault('initial', {})['email'] = self.current_email
super(PreferenceForm, self).__init__(*args, **kws)
for league in picker.League.objects.all():
field_name = '{}_favorite'.format(league.slug)
current = None
if instance:
try:
current = picker.PickerFavorite.objects.get(user=instance.user, league=league)
except picker.PickerFavorite.DoesNotExist:
pass
self.fields[field_name] = forms.ModelChoiceField(
picker.Team.objects.filter(league=league),
label='{} Favorite'.format(league.abbr.upper()),
empty_label='-- Select --',
required=False,
initial=current.team if current else None
)
def save(self, commit=True):
super(PreferenceForm, self).save(commit)
if commit:
picker.PickerFavorite.objects.filter(user=self.instance.user).delete()
for key in self.cleaned_data:
if not key.endswith('_favorite'):
continue
slug = key.rsplit('_')[0]
league = picker.League.objects.get(slug=slug)
picker.PickerFavorite.objects.create(
league=league,
user=self.instance.user,
team=self.cleaned_data[key]
)
| mit | -7,155,869,303,144,028,000 | 30.187817 | 98 | 0.57487 | false |
amerlyq/piony | piony/config/argparser.py | 1 | 2747 | from argparse import ArgumentParser, RawDescriptionHelpFormatter
import piony
from piony.common.exceptions import InputError
class ArgParser(object):
def __init__(self):
self.ps = ArgumentParser(prog=piony.__appname__,
formatter_class=RawDescriptionHelpFormatter,
description=piony.__doc__, epilog="Enjoy!!!")
self._setup_options()
def parse(self, argv):
if not argv:
argv = []
elif isinstance(argv, str):
argv = argv.split()
elif not isinstance(argv, list):
raise InputError("Wrong argv type: {}".format(type(argv)))
return self.ps.parse_args(argv)
def apply(self, args):
from operator import xor
res = (False, False)
dbg = {'a': (True, True), 'v': (True, False), 'k': (False, True)}
if args.verbose:
for entry in args.verbose:
res = map(xor, res, dbg[entry])
piony.G_DEBUG_VISUALS, piony.G_DEBUG_ACTIONS = res
def _setup_options(self):
## Configuration
farg = self.ps.add_argument
farg('buds', metavar='bud', nargs='*', type=str, default=None,
help="Setup profile layout in json directly on cmdline. "
"Can be specified several times -- one for each slice. "
"Or use pathes to files with slices inside.")
farg('-v', '--version', action='version', default=None,
version="%(prog)s {0}".format(piony.__version__),
help="Version of program.")
gr_window = self.ps.add_argument_group('Window')
warg = gr_window.add_argument
warg('-c', '--config', default=None,
help="Config file with default settings.")
warg('-p', '--print', default=None,
help="Toggle action print/execute to use as frontend only.")
## Appearance
warg('-s', '--size', type=int, default=None,
help="Sets window size WxH=NxN to derive all rings sizes from it.")
warg('-F', '--fullscreen', action='store_true', default=None,
help="Overlay fullscreen/local")
warg('-T', '--no-tooltip', action='store_true', default=None,
help="Disable pop-up items, for those who is irritated.")
## Process
gr_general = self.ps.add_argument_group('General')
garg = gr_general.add_argument
garg('-k', '--kill', action='store_true', default=None,
help="Kill running daemonized program.")
garg('-V', '--verbose', nargs='?', type=str,
const='a', choices=['a', 'v', 'k'], default=None,
help="Verbose (debug): [a]ll (default), [v]isuals, [k]eys.")
| gpl-3.0 | 114,584,023,838,943,360 | 41.261538 | 80 | 0.560612 | false |
strahlc/exaile | xlgui/main.py | 1 | 43837 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import datetime
import logging
import os
import re
import threading
import cairo
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Pango
from xl.nls import gettext as _
from xl import (
common,
covers,
event,
formatter,
player,
playlist,
providers,
settings,
trax,
xdg
)
from xlgui.accelerators import AcceleratorManager
from xlgui.playlist_container import PlaylistContainer
from xlgui.widgets import (
dialogs,
info,
menu,
playback
)
from xlgui.widgets.playlist import (
PlaylistPage,
PlaylistView
)
from xlgui import (
guiutil,
tray,
menu as mainmenu
)
logger = logging.getLogger(__name__)
# Length of playback step when user presses seek key (sec)
SEEK_STEP_DEFAULT = 10
# Length of volume steps when user presses up/down key
VOLUME_STEP_DEFAULT = 0.1
class MainWindow(GObject.GObject):
"""
Main Exaile Window
"""
__gproperties__ = {
'is-fullscreen': (bool, 'Fullscreen',
'Whether the window is fullscreen.',
False, # Default
GObject.PARAM_READWRITE),
}
__gsignals__ = {'main-visible-toggle': (GObject.SignalFlags.RUN_LAST, bool, ())}
_mainwindow = None
def __init__(self, controller, builder, collection):
"""
Initializes the main window
@param controller: the main gui controller
"""
GObject.GObject.__init__(self)
self.controller = controller
self.collection = collection
self.playlist_manager = controller.exaile.playlists
self.current_page = -1
self._fullscreen = False
self.resuming = False
self.window_state = 0
self.minimized = False
self.builder = builder
self.window = self.builder.get_object('ExaileWindow')
self.window.set_title('Exaile')
self.title_formatter = formatter.TrackFormatter(settings.get_option(
'gui/main_window_title_format', _('$title (by $artist)') +
' - Exaile'))
self.accelgroup = Gtk.AccelGroup()
self.window.add_accel_group(self.accelgroup)
self.accel_manager = AcceleratorManager('mainwindow-accelerators', self.accelgroup)
self.menubar = self.builder.get_object("mainmenu")
fileitem = self.builder.get_object("file_menu_item")
filemenu = menu.ProviderMenu('menubar-file-menu', self)
fileitem.set_submenu(filemenu)
edititem = self.builder.get_object("edit_menu_item")
editmenu = menu.ProviderMenu('menubar-edit-menu', self)
edititem.set_submenu(editmenu)
viewitem = self.builder.get_object("view_menu_item")
viewmenu = menu.ProviderMenu('menubar-view-menu', self)
viewitem.set_submenu(viewmenu)
toolsitem = self.builder.get_object("tools_menu_item")
toolsmenu = menu.ProviderMenu('menubar-tools-menu', self)
toolsitem.set_submenu(toolsmenu)
helpitem = self.builder.get_object("help_menu_item")
helpmenu = menu.ProviderMenu('menubar-help-menu', self)
helpitem.set_submenu(helpmenu)
self._setup_widgets()
self._setup_position()
self._setup_hotkeys()
logger.info("Connecting main window events...")
self._connect_events()
MainWindow._mainwindow = self
mainmenu._create_menus()
def _setup_hotkeys(self):
"""
Sets up accelerators that haven't been set up in UI designer
"""
hotkeys = (
('<Control>S', lambda *e: self.on_save_playlist()),
('<Shift><Control>S', lambda *e: self.on_save_playlist_as()),
('<Control>F', lambda *e: self.on_panel_filter_focus()),
('<Control>G', lambda *e: self.on_search_playlist_focus()), # FIXME
('<Control><Alt>l', lambda *e: player.QUEUE.clear()), # FIXME
('<Control>P', self._on_playpause_button),
('<Control>Right', lambda *e: self._on_seek_key(True)),
('<Control>Left', lambda *e: self._on_seek_key(False)),
('<Control>plus', lambda *e: self._on_volume_key(True)),
('<Control>minus', lambda *e: self._on_volume_key(False)),
('<Control>Page_Up', self._on_prev_tab_key),
('<Control>Page_Down', self._on_next_tab_key),
('<Alt>N', self._on_focus_playlist_container),
# These 4 are subject to change.. probably should do this
# via a different mechanism too...
('<Alt>I', lambda *e: self.controller.focus_panel('files')),
#('<Alt>C', lambda *e: self.controller.focus_panel('collection')),
('<Alt>R', lambda *e: self.controller.focus_panel('radio')),
('<Alt>L', lambda *e: self.controller.focus_panel('playlists')),
('<Alt>1', lambda *e: self._on_focus_playlist_tab(0)),
('<Alt>2', lambda *e: self._on_focus_playlist_tab(1)),
('<Alt>3', lambda *e: self._on_focus_playlist_tab(2)),
('<Alt>4', lambda *e: self._on_focus_playlist_tab(3)),
('<Alt>5', lambda *e: self._on_focus_playlist_tab(4)),
('<Alt>6', lambda *e: self._on_focus_playlist_tab(5)),
('<Alt>7', lambda *e: self._on_focus_playlist_tab(6)),
('<Alt>8', lambda *e: self._on_focus_playlist_tab(7)),
('<Alt>9', lambda *e: self._on_focus_playlist_tab(8)),
('<Alt>0', lambda *e: self._on_focus_playlist_tab(9)),
)
self.accel_group = Gtk.AccelGroup()
for key, function in hotkeys:
key, mod = Gtk.accelerator_parse(key)
self.accel_group.connect(key, mod, Gtk.AccelFlags.VISIBLE,
function)
self.window.add_accel_group(self.accel_group)
def _setup_widgets(self):
"""
Sets up the various widgets
"""
# TODO: Maybe make this stackable
self.message = dialogs.MessageBar(
parent=self.builder.get_object('player_box'),
buttons=Gtk.ButtonsType.CLOSE
)
self.message.connect('response', self.on_messagebar_response)
self.info_area = MainWindowTrackInfoPane(player.PLAYER)
self.info_area.set_auto_update(True)
self.info_area.set_padding(3, 3, 3, 3)
self.info_area.hide()
self.info_area.set_no_show_all(True)
guiutil.gtk_widget_replace(self.builder.get_object('info_area'), self.info_area)
self.volume_control = playback.VolumeControl(player.PLAYER)
self.info_area.get_action_area().pack_end(self.volume_control, False, False, 0)
self.alpha_style = None
if settings.get_option('gui/use_alpha', False):
screen = self.window.get_screen()
visual = screen.get_rgba_visual()
self.window.set_visual(visual)
self.window.connect('screen-changed', self.on_screen_changed)
self.alpha_style = Gtk.CssProvider.new()
self.window.get_style_context().add_provider(self.alpha_style,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
self._update_alpha()
playlist_area = self.builder.get_object('playlist_area')
self.playlist_container = PlaylistContainer('saved_tabs', player.PLAYER)
for notebook in self.playlist_container.notebooks:
notebook.connect_after('switch-page', self.on_playlist_container_switch_page)
page = notebook.get_current_tab()
if page is not None:
selection = page.view.get_selection()
selection.connect('changed', self.on_playlist_view_selection_changed)
playlist_area.pack_start(self.playlist_container, True, True, 3)
self.splitter = self.builder.get_object('splitter')
# In most (all?) RTL locales, the playback controls should still be LTR.
# Just in case that's not always the case, we provide a hidden option to
# force RTL layout instead. This can be removed once we're more certain
# that the default behavior (always LTR) is correct.
controls_direction = Gtk.TextDirection.RTL \
if settings.get_option('gui/rtl_playback_controls') \
else Gtk.TextDirection.LTR
self.play_image = Gtk.Image.new_from_icon_name('media-playback-start',
Gtk.IconSize.SMALL_TOOLBAR)
self.play_image.set_direction(controls_direction)
self.pause_image = Gtk.Image.new_from_icon_name('media-playback-pause',
Gtk.IconSize.SMALL_TOOLBAR)
self.pause_image.set_direction(controls_direction)
play_toolbar = self.builder.get_object('play_toolbar')
play_toolbar.set_direction(controls_direction)
for button in ('playpause', 'next', 'prev', 'stop'):
widget = self.builder.get_object('%s_button' % button)
setattr(self, '%s_button' % button, widget)
widget.get_child().set_direction(controls_direction)
self.progress_bar = playback.SeekProgressBar(player.PLAYER)
self.progress_bar.get_child().set_direction(controls_direction)
# Don't expand vertically; looks awful on Adwaita.
self.progress_bar.set_valign(Gtk.Align.CENTER)
guiutil.gtk_widget_replace(
self.builder.get_object('playback_progressbar_dummy'),
self.progress_bar
)
self.stop_button.toggle_spat = False
self.stop_button.add_events(Gdk.EventMask.POINTER_MOTION_MASK)
self.stop_button.connect('motion-notify-event',
self.on_stop_button_motion_notify_event)
self.stop_button.connect('leave-notify-event',
self.on_stop_button_leave_notify_event)
self.stop_button.connect('key-press-event',
self.on_stop_button_key_press_event)
self.stop_button.connect('key-release-event',
self.on_stop_button_key_release_event)
self.stop_button.connect('focus-out-event',
self.on_stop_button_focus_out_event)
self.stop_button.connect('button-press-event',
self.on_stop_button_press_event)
self.stop_button.connect('button-release-event',
self.on_stop_button_release_event)
self.stop_button.drag_dest_set(Gtk.DestDefaults.ALL,
[Gtk.TargetEntry.new("exaile-index-list", Gtk.TargetFlags.SAME_APP, 0)], Gdk.DragAction.COPY)
self.stop_button.connect('drag-motion',
self.on_stop_button_drag_motion)
self.stop_button.connect('drag-leave',
self.on_stop_button_drag_leave)
self.stop_button.connect('drag-data-received',
self.on_stop_button_drag_data_received)
self.statusbar = info.Statusbar(self.builder.get_object('status_bar'))
event.add_ui_callback(self.on_exaile_loaded, 'exaile_loaded')
def _connect_events(self):
"""
Connects the various events to their handlers
"""
self.builder.connect_signals({
'on_configure_event': self.configure_event,
'on_window_state_event': self.window_state_change_event,
'on_delete_event': self.on_delete_event,
'on_playpause_button_clicked': self._on_playpause_button,
'on_next_button_clicked':
lambda *e: player.QUEUE.next(),
'on_prev_button_clicked':
lambda *e: player.QUEUE.prev(),
'on_about_item_activate': self.on_about_item_activate,
# Controller
# 'on_scan_collection_item_activate': self.controller.on_rescan_collection,
# 'on_device_manager_item_activate': lambda *e: self.controller.show_devices(),
# 'on_track_properties_activate':self.controller.on_track_properties,
})
event.add_ui_callback(self.on_playback_resume, 'playback_player_resume',
player.PLAYER)
event.add_ui_callback(self.on_playback_end, 'playback_player_end',
player.PLAYER)
event.add_ui_callback(self.on_playback_end, 'playback_error',
player.PLAYER)
event.add_ui_callback(self.on_playback_start, 'playback_track_start',
player.PLAYER)
event.add_ui_callback(self.on_toggle_pause, 'playback_toggle_pause',
player.PLAYER)
event.add_ui_callback(self.on_track_tags_changed, 'track_tags_changed')
event.add_ui_callback(self.on_buffering, 'playback_buffering',
player.PLAYER)
event.add_ui_callback(self.on_playback_error, 'playback_error',
player.PLAYER)
event.add_ui_callback(self.on_playlist_tracks_added,
'playlist_tracks_added')
event.add_ui_callback(self.on_playlist_tracks_removed,
'playlist_tracks_removed')
# Settings
self._on_option_set('gui_option_set', settings, 'gui/show_info_area')
self._on_option_set('gui_option_set', settings, 'gui/show_info_area_covers')
event.add_ui_callback(self._on_option_set, 'option_set')
def _connect_panel_events(self):
"""
Sets up panel events
"""
# When there's nothing in the notebook, hide it
self.controller.panel_notebook.connect('page-added', self.on_panel_notebook_add_page)
self.controller.panel_notebook.connect('page-removed', self.on_panel_notebook_remove_page)
# panels
panels = self.controller.panel_notebook.panels
for panel_name in ('playlists', 'radio', 'files', 'collection'):
panel = panels[panel_name].panel
sort = False
if panel_name in ('files', 'collection'):
sort = True
panel.connect('append-items', lambda panel, items, force_play, sort=sort:
self.on_append_items(items, force_play, sort=sort))
panel.connect('queue-items', lambda panel, items, sort=sort:
self.on_append_items(items, queue=True, sort=sort))
panel.connect('replace-items', lambda panel, items, sort=sort:
self.on_append_items(items, replace=True, sort=sort))
## Collection Panel
panel = panels['collection'].panel
panel.connect('collection-tree-loaded', self.on_collection_tree_loaded)
## Playlist Panel
panel = panels['playlists'].panel
panel.connect('playlist-selected',
lambda panel, playlist:
self.playlist_container.create_tab_from_playlist(playlist))
## Radio Panel
panel = panels['radio'].panel
panel.connect('playlist-selected',
lambda panel, playlist:
self.playlist_container.create_tab_from_playlist(playlist))
## Files Panel
#panel = panels['files']
def _update_alpha(self):
if self.alpha_style is None:
return
opac = 1.0 - float(settings.get_option('gui/transparency'))
self.alpha_style.load_from_data(
'.background { ' +
('background-color: alpha(@theme_bg_color, %s);' % opac) +
'}'
)
def do_get_property(self, prop):
if prop.name == 'is-fullscreen':
return self._fullscreen
else:
return GObject.GObject.do_get_property(self, prop)
def do_set_property(self, prop, value):
if prop.name == 'is-fullscreen':
if value:
self.window.fullscreen()
else:
self.window.unfullscreen()
else:
GObject.GObject.do_set_property(self, prop, value)
def on_screen_changed(self, widget, event):
"""
Updates the colormap on screen change
"""
screen = widget.get_screen()
visual = screen.get_rgba_visual() or screen.get_rgb_visual()
self.window.set_visual(visual)
def on_messagebar_response(self, widget, response):
"""
Hides the messagebar if requested
"""
if response == Gtk.ResponseType.CLOSE:
widget.hide()
def on_panel_notebook_add_page(self, notebook, page, page_num):
if self.splitter.get_child1() is None:
self.splitter.pack1(self.controller.panel_notebook)
self.controller.panel_notebook.get_parent() \
.child_set_property(self.controller.panel_notebook, 'shrink', False)
def on_panel_notebook_remove_page(self, notebook, page, page_num):
if notebook.get_n_pages() == 0:
self.splitter.remove(self.controller.panel_notebook)
def on_stop_button_motion_notify_event(self, widget, event):
"""
Sets the hover state and shows SPAT icon
"""
widget.__hovered = True
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
else:
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_leave_notify_event(self, widget, event):
"""
Unsets the hover state and resets the button icon
"""
widget.__hovered = False
if not widget.is_focus() and \
~(event.get_state() & Gdk.ModifierType.SHIFT_MASK):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_key_press_event(self, widget, event):
"""
Shows SPAT icon on Shift key press
"""
if event.keyval in (Gdk.KEY_Shift_L, Gdk.KEY_Shift_R):
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
widget.toggle_spat = True
if event.keyval in (Gdk.KEY_space, Gdk.KEY_Return):
if widget.toggle_spat:
self.on_spat_clicked()
else:
player.PLAYER.stop()
def on_stop_button_key_release_event(self, widget, event):
"""
Resets the button icon
"""
if event.keyval in (Gdk.KEY_Shift_L, Gdk.KEY_Shift_R):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
widget.toggle_spat = False
def on_stop_button_focus_out_event(self, widget, event):
"""
Resets the button icon unless
the button is still hovered
"""
if not getattr(widget, '__hovered', False):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_press_event(self, widget, event):
"""
Called when the user clicks on the stop button
"""
if event.button == 1:
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
self.on_spat_clicked()
elif event.button == 3:
menu = guiutil.Menu()
menu.append(_("Toggle: Stop after Selected Track"),
self.on_spat_clicked,
'process-stop')
menu.popup(None, None, None, None, event.button, event.time)
def on_stop_button_release_event(self, widget, event):
"""
Called when the user releases the mouse from the stop button
"""
rect = widget.get_allocation()
if 0 <= event.x < rect.width and 0 <= event.y < rect.height:
player.PLAYER.stop()
def on_stop_button_drag_motion(self, widget, context, x, y, time):
"""
Indicates possible SPAT during drag motion of tracks
"""
target = widget.drag_dest_find_target(context, widget.drag_dest_get_target_list()).name()
if target == 'exaile-index-list':
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
def on_stop_button_drag_leave(self, widget, context, time):
"""
Resets the stop button
"""
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_drag_data_received(self, widget, context, x, y, selection, info, time):
"""
Allows for triggering the SPAT feature
by dropping tracks on the stop button
"""
source_widget = Gtk.drag_get_source_widget(context)
if selection.target.name() == 'exaile-index-list' and isinstance(source_widget, PlaylistView):
position = int(selection.data.split(',')[0])
if position == source_widget.playlist.spat_position:
position = -1
source_widget.playlist.spat_position = position
source_widget.queue_draw()
def on_spat_clicked(self, *e):
"""
Called when the user clicks on the SPAT item
"""
trs = self.get_selected_page().view.get_selected_items()
if not trs: return
# TODO: this works, but implement this some other way in the future
if player.QUEUE.current_playlist.spat_position == -1:
player.QUEUE.current_playlist.spat_position = trs[0][0]
else:
player.QUEUE.current_playlist.spat_position = -1
self.get_selected_page().view.queue_draw()
def on_append_items(self, tracks, force_play=False, queue=False, sort=False, replace=False):
"""
Called when a panel (or other component)
has tracks to append and possibly queue
:param tracks: The tracks to append
:param force_play: Force playing the first track if there
is no track currently playing. Otherwise
check a setting to determine whether the
track should be played
:param queue: Additionally queue tracks
:param sort: Sort before adding
:param replace: Clear playlist before adding
"""
if len(tracks) == 0:
return
page = self.get_selected_page()
if sort:
tracks = trax.sort_tracks(
('artist', 'date', 'album', 'discnumber', 'tracknumber'),
tracks)
if replace:
page.playlist.clear()
offset = len(page.playlist)
page.playlist.extend(tracks)
# extending the queue automatically starts playback
if queue:
if player.QUEUE is not page.playlist:
player.QUEUE.extend(tracks)
elif (force_play or settings.get_option( 'playlist/append_menu_starts_playback', False )) and \
not player.PLAYER.current:
page.view.play_track_at(offset, tracks[0])
def on_playback_error(self, type, player, message):
"""
Called when there has been a playback error
"""
self.message.show_error(_('Playback error encountered!'), message)
def on_buffering(self, type, player, percent):
"""
Called when a stream is buffering
"""
percent = min(percent, 100)
self.statusbar.set_status(_("Buffering: %d%%...") % percent, 1)
def on_track_tags_changed(self, type, track, tag):
"""
Called when tags are changed
"""
if track is player.PLAYER.current:
self._update_track_information()
def on_collection_tree_loaded(self, tree):
"""
Updates information on collection tree load
"""
self.statusbar.update_info()
def on_exaile_loaded(self, event_type, exaile, nothing):
"""
Updates information on exaile load
"""
self.statusbar.update_info()
event.remove_callback(self.on_exaile_loaded, 'exaile_loaded')
def on_playlist_tracks_added(self, type, playlist, tracks):
"""
Updates information on track add
"""
self.statusbar.update_info()
def on_playlist_tracks_removed(self, type, playlist, tracks):
"""
Updates information on track removal
"""
self.statusbar.update_info()
def on_toggle_pause(self, type, player, object):
"""
Called when the user clicks the play button after playback has
already begun
"""
if player.is_paused():
image = self.play_image
tooltip = _('Continue Playback')
else:
image = self.pause_image
tooltip = _('Pause Playback')
self.playpause_button.set_image(image)
self.playpause_button.set_tooltip_text(tooltip)
self._update_track_information()
def on_playlist_container_switch_page(self, notebook, page, page_num):
"""
Updates info after notebook page switch
"""
page = notebook.get_nth_page(page_num)
selection = page.view.get_selection()
selection.connect('changed', self.on_playlist_view_selection_changed)
self.statusbar.update_info()
def on_playlist_view_selection_changed(self, selection):
"""
Updates info after playlist page selection change
"""
self.statusbar.update_info()
def on_panel_filter_focus(self, *e):
"""
Gives focus to the filter field of the current panel
"""
try:
self.controller.get_active_panel().filter.grab_focus()
except (AttributeError, KeyError):
pass
def on_search_playlist_focus(self, *e):
"""
Gives focus to the playlist search bar
"""
plpage = get_selected_playlist()
if plpage:
plpage.get_search_entry().grab_focus()
def on_save_playlist(self, *e):
"""
Called when the user presses Ctrl+S
Spawns the save dialog of the currently selected playlist tab if
not custom, saves changes directly if custom
"""
tab = self.get_selected_tab()
if not tab: return
if tab.page.playlist.get_is_custom():
tab.do_save_changes_to_custom()
else:
tab.do_save_custom()
def on_save_playlist_as(self, *e):
"""
Called when the user presses Ctrl+S
Spawns the save as dialog of the current playlist tab
"""
tab = self.get_selected_tab()
if not tab: return
tab.do_save_custom()
def on_clear_playlist(self, *e):
"""
Clears the current playlist tab
"""
page = self.get_selected_page()
if page:
page.playlist.clear()
def on_open_item_activate(self, menuitem):
"""
Shows a dialog to open media
"""
def on_uris_selected(dialog, uris):
uris.reverse()
if len(uris) > 0:
self.controller.open_uri(uris.pop(), play=True)
for uri in uris:
self.controller.open_uri(uri, play=False)
dialog = dialogs.MediaOpenDialog(self.window)
dialog.connect('uris-selected', on_uris_selected)
dialog.show()
def on_open_url_item_activate(self, menuitem):
"""
Shows a dialog to open an URI
"""
def on_uri_selected(dialog, uri):
self.controller.open_uri(uri, play=False)
dialog = dialogs.URIOpenDialog(self.window)
dialog.connect('uri-selected', on_uri_selected)
dialog.show()
def on_open_directories_item_activate(self, menuitem):
"""
Shows a dialog to open directories
"""
def on_uris_selected(dialog, uris):
uris.reverse()
if len(uris) > 0:
self.controller.open_uri(uris.pop(), play=True)
for uri in uris:
self.controller.open_uri(uri, play=False)
dialog = dialogs.DirectoryOpenDialog(self.window)
# Selecting empty folders is useless
dialog.props.create_folders = False
dialog.connect('uris-selected', on_uris_selected)
dialog.show()
def on_export_current_playlist_activate(self, menuitem):
"""
Shows a dialog to export the current playlist
"""
page = self.get_selected_page()
if not page or not isinstance(page, PlaylistPage):
return
def on_message(dialog, message_type, message):
"""
Show messages in the main window message area
"""
if message_type == Gtk.MessageType.INFO:
self.message.show_info(markup=message)
elif message_type == Gtk.MessageType.ERROR:
self.message.show_error(_('Playlist export failed!'), message)
return True
dialog = dialogs.PlaylistExportDialog(page.playlist, self.window)
dialog.connect('message', on_message)
dialog.show()
def on_playlist_utilities_bar_visible_toggled(self, checkmenuitem):
"""
Shows or hides the playlist utilities bar
"""
settings.set_option('gui/playlist_utilities_bar_visible',
checkmenuitem.get_active())
def on_show_playing_track_item_activate(self, menuitem):
"""
Tries to show the currently playing track
"""
self.playlist_container.show_current_track()
def on_about_item_activate(self, menuitem):
"""
Shows the about dialog
"""
dialog = dialogs.AboutDialog(self.window)
dialog.show()
def on_playback_resume(self, type, player, data):
self.resuming = True
def on_playback_start(self, type, player, object):
"""
Called when playback starts
Sets the currently playing track visible in the currently selected
playlist if the user has chosen this setting
"""
if self.resuming:
self.resuming = False
return
self._update_track_information()
self.playpause_button.set_image(self.pause_image)
self.playpause_button.set_tooltip_text(_('Pause Playback'))
def on_playback_end(self, type, player, object):
"""
Called when playback ends
"""
self.window.set_title('Exaile')
self.playpause_button.set_image(self.play_image)
self.playpause_button.set_tooltip_text(_('Start Playback'))
def _on_option_set(self, name, object, option):
"""
Handles changes of settings
"""
if option == 'gui/main_window_title_format':
self.title_formatter.props.format = settings.get_option(
option, self.title_formatter.props.format)
elif option == 'gui/use_tray':
usetray = settings.get_option(option, False)
if self.controller.tray_icon and not usetray:
self.controller.tray_icon.destroy()
self.controller.tray_icon = None
elif not self.controller.tray_icon and usetray:
self.controller.tray_icon = tray.TrayIcon(self)
elif option == 'gui/show_info_area':
self.info_area.set_no_show_all(False)
if settings.get_option(option, True):
self.info_area.show_all()
else:
self.info_area.hide()
self.info_area.set_no_show_all(True)
elif option == 'gui/show_info_area_covers':
cover = self.info_area.cover
cover.set_no_show_all(False)
if settings.get_option(option, True):
cover.show_all()
else:
cover.hide()
cover.set_no_show_all(True)
elif option == 'gui/transparency':
self._update_alpha()
def _on_volume_key(self, is_up):
diff = int(100 * settings.get_option('gui/volue_key_step_size', VOLUME_STEP_DEFAULT))
if not is_up: diff = -diff
player.PLAYER.modify_volume(diff)
return True
def _on_seek_key(self, is_forward):
diff = settings.get_option('gui/seek_key_step_size', SEEK_STEP_DEFAULT)
if not is_forward: diff = -diff
if player.PLAYER.current:
player.PLAYER.modify_time(diff)
self.progress_bar.update_progress()
return True
def _on_prev_tab_key(self, *e):
self.playlist_container.get_current_notebook().select_prev_tab()
return True
def _on_next_tab_key(self, *e):
self.playlist_container.get_current_notebook().select_next_tab()
return True
def _on_playpause_button(self, *e):
self.playpause()
return True
def _on_focus_playlist_tab(self, tab_nr):
self.playlist_container.get_current_notebook().focus_tab(tab_nr)
return True
def _on_focus_playlist_container(self, *_e):
self.playlist_container.focus()
return True
def _update_track_information(self):
"""
Sets track information
"""
track = player.PLAYER.current
if not track:
return
self.window.set_title(self.title_formatter.format(track))
def playpause(self):
"""
Pauses the playlist if it is playing, starts playing if it is
paused. If stopped, try to start playing the next suitable track.
"""
if player.PLAYER.is_paused() or player.PLAYER.is_playing():
player.PLAYER.toggle_pause()
else:
pl = self.get_selected_page()
player.QUEUE.set_current_playlist(pl.playlist)
try:
trackpath = pl.view.get_selected_paths()[0]
pl.playlist.current_position = trackpath[0]
except IndexError:
pass
player.QUEUE.play(track=pl.playlist.current)
def _setup_position(self):
"""
Sets up the position and sized based on the size the window was
when it was last moved or resized
"""
if settings.get_option('gui/mainw_maximized', False):
self.window.maximize()
width = settings.get_option('gui/mainw_width', 500)
height = settings.get_option('gui/mainw_height', 475)
x = settings.get_option('gui/mainw_x', 10)
y = settings.get_option('gui/mainw_y', 10)
self.window.move(x, y)
self.window.resize(width, height)
pos = settings.get_option('gui/mainw_sash_pos', 200)
self.splitter.set_position(pos)
def on_delete_event(self, *e):
"""
Called when the user attempts to close the window
"""
sash_pos = self.splitter.get_position()
if sash_pos > 10:
settings.set_option('gui/mainw_sash_pos', sash_pos)
if settings.get_option('gui/use_tray', False) and \
settings.get_option('gui/close_to_tray', False):
self.window.hide()
else:
self.quit()
return True
def quit(self, *e):
"""
Quits Exaile
"""
self.window.hide()
GLib.idle_add(self.controller.exaile.quit)
return True
def on_restart_item_activate(self, menuitem):
"""
Restarts Exaile
"""
self.window.hide()
GLib.idle_add(self.controller.exaile.quit, True)
def toggle_visible(self, bringtofront=False):
"""
Toggles visibility of the main window
"""
toggle_handled = self.emit('main-visible-toggle')
if not toggle_handled:
if bringtofront and self.window.is_active() or \
not bringtofront and self.window.get_property('visible'):
self.window.hide()
else:
# the ordering for deiconify/show matters -- if this gets
# switched, then the minimization detection breaks
self.window.deiconify()
self.window.show()
def configure_event(self, *e):
"""
Called when the window is resized or moved
"""
# Don't save window size if it is maximized or fullscreen.
if settings.get_option('gui/mainw_maximized', False) or \
self._fullscreen:
return False
(width, height) = self.window.get_size()
if [width, height] != [ settings.get_option("gui/mainw_"+key, -1) for \
key in ["width", "height"] ]:
settings.set_option('gui/mainw_height', height, save=False)
settings.set_option('gui/mainw_width', width, save=False)
(x, y) = self.window.get_position()
if [x, y] != [ settings.get_option("gui/mainw_"+key, -1) for \
key in ["x", "y"] ]:
settings.set_option('gui/mainw_x', x, save=False)
settings.set_option('gui/mainw_y', y, save=False)
return False
def window_state_change_event(self, window, event):
"""
Saves the current maximized and fullscreen
states and minimizes to tray if requested
"""
if event.changed_mask & Gdk.WindowState.MAXIMIZED:
settings.set_option('gui/mainw_maximized',
bool(event.new_window_state & Gdk.WindowState.MAXIMIZED))
if event.changed_mask & Gdk.WindowState.FULLSCREEN:
self._fullscreen = bool(event.new_window_state & Gdk.WindowState.FULLSCREEN)
self.notify('is-fullscreen')
# detect minimization state changes
prev_minimized = self.minimized
if not self.minimized:
if event.changed_mask & Gdk.WindowState.ICONIFIED and \
not event.changed_mask & Gdk.WindowState.WITHDRAWN and \
event.new_window_state & Gdk.WindowState.ICONIFIED and \
not event.new_window_state & Gdk.WindowState.WITHDRAWN and \
not self.window_state & Gdk.WindowState.ICONIFIED:
self.minimized = True
else:
if event.changed_mask & Gdk.WindowState.WITHDRAWN and \
not event.new_window_state & (Gdk.WindowState.WITHDRAWN): #and \
self.minimized = False
# track this
self.window_state = event.new_window_state
if settings.get_option('gui/minimize_to_tray', False):
# old code to detect minimization
# -> it must have worked at some point, perhaps this is a GTK version
# specific set of behaviors? Current code works now on 2.24.17
#if wm_state is not None:
# if '_NET_WM_STATE_HIDDEN' in wm_state[2]:
# show tray
# window.hide
#else
# destroy tray
if self.minimized != prev_minimized and self.minimized == True:
if not settings.get_option('gui/use_tray', False) and \
self.controller.tray_icon is None:
self.controller.tray_icon = tray.TrayIcon(self)
window.hide()
elif not settings.get_option('gui/use_tray', False) and \
self.controller.tray_icon is not None:
self.controller.tray_icon.destroy()
self.controller.tray_icon = None
return False
def get_selected_page(self):
"""
Returns the currentry displayed playlist notebook page
"""
return self.playlist_container.get_current_tab()
def get_selected_playlist(self):
try:
page = self.get_selected_page()
except AttributeError:
return None
if not isinstance(page, PlaylistPage):
return None
return page
class MainWindowTrackInfoPane(info.TrackInfoPane, providers.ProviderHandler):
"""
Extends the regular track info pane by an area for custom widgets
The mainwindow-info-area-widget provider is used to show widgets
on the right of the info area. They should be small. The registered
provider should provide a method 'create_widget' that takes the info
area instance as a parameter, and that returns a Gtk.Widget to be
inserted into the widget_area of the info area, and an attribute
'name' that will be used when removing the provider.
"""
def __init__(self, player):
info.TrackInfoPane.__init__(self, player)
self.__player = player
self.widget_area = Gtk.Box()
self.get_child().pack_start(self.widget_area, False, False, 0)
self.__widget_area_widgets = {}
# call this last if we're using simple_init=True
providers.ProviderHandler.__init__(self, 'mainwindow-info-area-widget',
target=player, simple_init=True)
def get_player(self):
'''
Retrieves the player object that this info area
is associated with
'''
return self._TrackInfoPane__player
def on_provider_added(self, provider):
name = provider.name
widget = provider.create_widget(self)
old_widget = self.__widget_area_widgets.get(name)
if old_widget is not None:
self.widget_area.remove(old_widget)
old_widget.destroy()
self.__widget_area_widgets[name] = widget
self.widget_area.pack_start(widget, False, False, 0)
widget.show_all()
def on_provider_removed(self, provider):
widget = self.__widget_area_widgets.pop(provider.name, None)
if widget is not None:
self.widget_area.remove(widget)
widget.destroy()
def get_playlist_container():
return MainWindow._mainwindow.playlist_container
def get_playlist_notebook():
'''Retrieves the primary playlist notebook'''
return MainWindow._mainwindow.playlist_container.notebooks[0]
def get_selected_page():
return MainWindow._mainwindow.get_selected_page()
def get_selected_playlist():
return MainWindow._mainwindow.get_selected_playlist()
def mainwindow():
return MainWindow._mainwindow
# vim: et sts=4 sw=4
| gpl-2.0 | -6,720,579,076,938,104,000 | 35.930918 | 105 | 0.589593 | false |
chvogl/tardis | tardis/io/config_reader.py | 1 | 40145 | # Module to read the rather complex config data
import logging
import os
import pprint
from astropy import constants, units as u
import numpy as np
import pandas as pd
import yaml
import tardis
from tardis.io.model_reader import read_density_file, \
calculate_density_after_time, read_abundances_file
from tardis.io.config_validator import ConfigurationValidator
from tardis import atomic
from tardis.util import species_string_to_tuple, parse_quantity, \
element_symbol2atomic_number
import copy
pp = pprint.PrettyPrinter(indent=4)
logger = logging.getLogger(__name__)
data_dir = os.path.join(tardis.__path__[0], 'data')
default_config_definition_file = os.path.join(data_dir,
'tardis_config_definition.yml')
#File parsers for different file formats:
density_structure_fileparser = {}
inv_ni56_efolding_time = 1 / (8.8 * u.day)
inv_co56_efolding_time = 1 / (113.7 * u.day)
inv_cr48_efolding_time = 1 / (1.29602 * u.day)
inv_v48_efolding_time = 1 / (23.0442 * u.day)
inv_fe52_efolding_time = 1 / (0.497429 * u.day)
inv_mn52_efolding_time = 1 / (0.0211395 * u.day)
class ConfigurationError(ValueError):
pass
def parse_quantity_linspace(quantity_linspace_dictionary, add_one=True):
"""
parse a dictionary of the following kind
{'start': 5000 km/s,
'stop': 10000 km/s,
'num': 1000}
Parameters
----------
quantity_linspace_dictionary: ~dict
add_one: boolean, default: True
Returns
-------
~np.array
"""
start = parse_quantity(quantity_linspace_dictionary['start'])
stop = parse_quantity(quantity_linspace_dictionary['stop'])
try:
stop = stop.to(start.unit)
except u.UnitsError:
raise ConfigurationError('"start" and "stop" keyword must be compatible quantities')
num = quantity_linspace_dictionary['num']
if add_one:
num += 1
return np.linspace(start.value, stop.value, num=num) * start.unit
def parse_spectral_bin(spectral_bin_boundary_1, spectral_bin_boundary_2):
spectral_bin_boundary_1 = parse_quantity(spectral_bin_boundary_1).to('Angstrom', u.spectral())
spectral_bin_boundary_2 = parse_quantity(spectral_bin_boundary_2).to('Angstrom', u.spectral())
spectrum_start_wavelength = min(spectral_bin_boundary_1, spectral_bin_boundary_2)
spectrum_end_wavelength = max(spectral_bin_boundary_1, spectral_bin_boundary_2)
return spectrum_start_wavelength, spectrum_end_wavelength
def calculate_exponential_density(velocities, v_0, rho0):
"""
This function computes the exponential density profile.
:math:`\\rho = \\rho_0 \\times \\exp \\left( -\\frac{v}{v_0} \\right)`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho0 * np.exp(-(velocities / v_0))
return densities
def calculate_power_law_density(velocities, velocity_0, rho_0, exponent):
"""
This function computes a descret exponential density profile.
:math:`\\rho = \\rho_0 \\times \\left( \\frac{v}{v_0} \\right)^n`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
exponent : ~float
exponent used in the powerlaw
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho_0 * np.power((velocities / velocity_0), exponent)
return densities
def parse_model_file_section(model_setup_file_dict, time_explosion):
def parse_artis_model_setup_files(model_file_section_dict, time_explosion):
###### Reading the structure part of the ARTIS file pair
structure_fname = model_file_section_dict['structure_fname']
for i, line in enumerate(file(structure_fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = np.recfromtxt(structure_fname, skip_header=2, usecols=(1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], artis_model['velocities']), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS model structure file %s (no_of_shells=length of dataset)', structure_fname)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % structure_fname)
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', structure_fname,
no_of_shells, sum(masses.value))
if 'v_lowest' in model_file_section_dict:
v_lowest = parse_quantity(model_file_section_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in model_file_section_dict:
v_highest = parse_quantity(model_file_section_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
artis_model = artis_model[min_shell:max_shell]
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
###### Reading the abundance part of the ARTIS file pair
abundances_fname = model_file_section_dict['abundances_fname']
abundances = pd.DataFrame(np.loadtxt(abundances_fname)[min_shell:max_shell, 1:].transpose(),
index=np.arange(1, 31))
ni_stable = abundances.ix[28] - artis_model['ni56_fraction']
co_stable = abundances.ix[27] - artis_model['co56_fraction']
fe_stable = abundances.ix[26] - artis_model['fe52_fraction']
mn_stable = abundances.ix[25] - 0.0
cr_stable = abundances.ix[24] - artis_model['cr48_fraction']
v_stable = abundances.ix[23] - 0.0
ti_stable = abundances.ix[22] - 0.0
abundances.ix[28] = ni_stable
abundances.ix[28] += artis_model['ni56_fraction'] * np.exp(
-(time_explosion * inv_ni56_efolding_time).to(1).value)
abundances.ix[27] = co_stable
abundances.ix[27] += artis_model['co56_fraction'] * np.exp(
-(time_explosion * inv_co56_efolding_time).to(1).value)
abundances.ix[27] += (inv_ni56_efolding_time * artis_model['ni56_fraction'] /
(inv_ni56_efolding_time - inv_co56_efolding_time)) * \
(np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
abundances.ix[26] = fe_stable
abundances.ix[26] += artis_model['fe52_fraction'] * np.exp(
-(time_explosion * inv_fe52_efolding_time).to(1).value)
abundances.ix[26] += ((artis_model['co56_fraction'] * inv_ni56_efolding_time
- artis_model['co56_fraction'] * inv_co56_efolding_time
+ artis_model['ni56_fraction'] * inv_ni56_efolding_time
- artis_model['ni56_fraction'] * inv_co56_efolding_time
- artis_model['co56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['co56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
- artis_model['ni56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['ni56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
/ (inv_ni56_efolding_time - inv_co56_efolding_time))
abundances.ix[25] = mn_stable
abundances.ix[25] += (inv_fe52_efolding_time * artis_model['fe52_fraction'] /
(inv_fe52_efolding_time - inv_mn52_efolding_time)) * \
(np.exp(-(inv_mn52_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
abundances.ix[24] = cr_stable
abundances.ix[24] += artis_model['cr48_fraction'] * np.exp(
-(time_explosion * inv_cr48_efolding_time).to(1).value)
abundances.ix[24] += ((artis_model['fe52_fraction'] * inv_fe52_efolding_time
- artis_model['fe52_fraction'] * inv_mn52_efolding_time
- artis_model['fe52_fraction'] * inv_fe52_efolding_time * np.exp(
-(inv_mn52_efolding_time * time_explosion).to(1).value)
+ artis_model['fe52_fraction'] * inv_mn52_efolding_time * np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
/ (inv_fe52_efolding_time - inv_mn52_efolding_time))
abundances.ix[23] = v_stable
abundances.ix[23] += (inv_cr48_efolding_time * artis_model['cr48_fraction'] /
(inv_cr48_efolding_time - inv_v48_efolding_time)) * \
(np.exp(-(inv_v48_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
abundances.ix[22] = ti_stable
abundances.ix[22] += ((artis_model['cr48_fraction'] * inv_cr48_efolding_time
- artis_model['cr48_fraction'] * inv_v48_efolding_time
- artis_model['cr48_fraction'] * inv_cr48_efolding_time * np.exp(
-(inv_v48_efolding_time * time_explosion).to(1).value)
+ artis_model['cr48_fraction'] * inv_v48_efolding_time * np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
/ (inv_cr48_efolding_time - inv_v48_efolding_time))
if 'split_shells' in model_file_section_dict:
split_shells = int(model_file_section_dict['split_shells'])
else:
split_shells = 1
if split_shells > 1:
logger.info('Increasing the number of shells by a factor of %s' % split_shells)
no_of_shells = len(v_inner)
velocities = np.linspace(v_inner[0], v_outer[-1], no_of_shells * split_shells + 1)
v_inner = velocities[:-1]
v_outer = velocities[1:]
old_mean_densities = mean_densities
mean_densities = np.empty(no_of_shells * split_shells) * old_mean_densities.unit
new_abundance_data = np.empty((abundances.values.shape[0], no_of_shells * split_shells))
for i in xrange(split_shells):
mean_densities[i::split_shells] = old_mean_densities
new_abundance_data[:, i::split_shells] = abundances.values
abundances = pd.DataFrame(new_abundance_data, index=abundances.index)
#def parser_simple_ascii_model
return v_inner, v_outer, mean_densities, abundances
model_file_section_parser = {}
model_file_section_parser['artis'] = parse_artis_model_setup_files
try:
parser = model_file_section_parser[model_setup_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(model_file_section_parser.keys(), model_file_section_parser['type']))
return parser(model_setup_file_dict, time_explosion)
def parse_density_file_section(density_file_dict, time_explosion):
density_file_parser = {}
def parse_artis_density(density_file_dict, time_explosion):
density_file = density_file_dict['name']
for i, line in enumerate(file(density_file)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
velocities, mean_densities_0 = np.recfromtxt(density_file, skip_header=2, usecols=(1, 2), unpack=True)
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], velocities), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** mean_densities_0, 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS file %s (no_of_shells=length of dataset)', density_file)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % density_file)
min_shell = 1
max_shell = no_of_shells
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', density_file,
no_of_shells, sum(masses.value))
if 'v_lowest' in density_file_dict:
v_lowest = parse_quantity(density_file_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in density_file_dict:
v_highest = parse_quantity(density_file_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
return v_inner, v_outer, mean_densities, min_shell, max_shell
density_file_parser['artis'] = parse_artis_density
try:
parser = density_file_parser[density_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(density_file_parser.keys(), density_file_dict['type']))
return parser(density_file_dict, time_explosion)
def parse_density_section(density_dict, v_inner, v_outer, time_explosion):
density_parser = {}
#Parse density uniform
def parse_uniform(density_dict, v_inner, v_outer, time_explosion):
no_of_shells = len(v_inner)
return density_dict['value'].to('g cm^-3') * np.ones(no_of_shells)
density_parser['uniform'] = parse_uniform
#Parse density branch85 w7
def parse_branch85(density_dict, v_inner, v_outer, time_explosion):
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities,
density_dict['w7_v_0'],
density_dict['w7_rho_0'], -7)
densities = calculate_density_after_time(densities,
density_dict['w7_time_0'],
time_explosion)
return densities
density_parser['branch85_w7'] = parse_branch85
def parse_power_law(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
exponent = density_dict.pop('exponent')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities, v_0, rho_0, exponent)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['power_law'] = parse_power_law
def parse_exponential(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_exponential_density(velocities, v_0, rho_0)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['exponential'] = parse_exponential
try:
parser = density_parser[density_dict['type']]
except KeyError:
raise ConfigurationError('In density section only types %s are allowed (supplied %s) ' %
(density_parser.keys(), density_dict['type']))
return parser(density_dict, v_inner, v_outer, time_explosion)
def parse_abundance_file_section(abundance_file_dict, abundances, min_shell, max_shell):
abundance_file_parser = {}
def parse_artis(abundance_file_dict, abundances, min_shell, max_shell):
#### ---- debug ----
time_of_model = 0.0
####
fname = abundance_file_dict['name']
max_atom = 30
logger.info("Parsing ARTIS Abundance section from shell %d to %d", min_shell, max_shell)
abundances.values[:max_atom, :] = np.loadtxt(fname)[min_shell:max_shell, 1:].transpose()
return abundances
abundance_file_parser['artis'] = parse_artis
try:
parser = abundance_file_parser[abundance_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(abundance_file_parser.keys(), abundance_file_dict['type']))
return parser(abundance_file_dict, abundances, min_shell, max_shell)
def parse_supernova_section(supernova_dict):
"""
Parse the supernova section
Parameters
----------
supernova_dict: dict
YAML parsed supernova dict
Returns
-------
config_dict: dict
"""
config_dict = {}
#parse luminosity
luminosity_value, luminosity_unit = supernova_dict['luminosity_requested'].strip().split()
if luminosity_unit == 'log_lsun':
config_dict['luminosity_requested'] = 10 ** (
float(luminosity_value) + np.log10(constants.L_sun.cgs.value)) * u.erg / u.s
else:
config_dict['luminosity_requested'] = (float(luminosity_value) * u.Unit(luminosity_unit)).to('erg/s')
config_dict['time_explosion'] = parse_quantity(supernova_dict['time_explosion']).to('s')
if 'distance' in supernova_dict:
config_dict['distance'] = parse_quantity(supernova_dict['distance'])
else:
config_dict['distance'] = None
if 'luminosity_wavelength_start' in supernova_dict:
config_dict['luminosity_nu_end'] = parse_quantity(supernova_dict['luminosity_wavelength_start']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_end'] = np.inf * u.Hz
if 'luminosity_wavelength_end' in supernova_dict:
config_dict['luminosity_nu_start'] = parse_quantity(supernova_dict['luminosity_wavelength_end']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_start'] = 0.0 * u.Hz
return config_dict
def parse_spectrum_list2dict(spectrum_list):
"""
Parse the spectrum list [start, stop, num] to a list
"""
if spectrum_list[0].unit.physical_type != 'length' and \
spectrum_list[1].unit.physical_type != 'length':
raise ValueError('start and end of spectrum need to be a length')
spectrum_config_dict = {}
spectrum_config_dict['start'] = spectrum_list[0]
spectrum_config_dict['end'] = spectrum_list[1]
spectrum_config_dict['bins'] = spectrum_list[2]
spectrum_frequency = np.linspace(
spectrum_config_dict['end'].to('Hz', u.spectral()),
spectrum_config_dict['start'].to('Hz', u.spectral()),
num=spectrum_config_dict['bins'] + 1)
spectrum_config_dict['frequency'] = spectrum_frequency
return spectrum_config_dict
def parse_convergence_section(convergence_section_dict):
"""
Parse the convergence section dictionary
Parameters
----------
convergence_section_dict: ~dict
dictionary
"""
convergence_parameters = ['damping_constant', 'threshold', 'fraction',
'hold_iterations']
for convergence_variable in ['t_inner', 't_rad', 'w']:
if convergence_variable not in convergence_section_dict:
convergence_section_dict[convergence_variable] = {}
convergence_variable_section = convergence_section_dict[convergence_variable]
for param in convergence_parameters:
if convergence_variable_section.get(param, None) is None:
if param in convergence_section_dict:
convergence_section_dict[convergence_variable][param] = (
convergence_section_dict[param])
return convergence_section_dict
def calculate_w7_branch85_densities(velocities, time_explosion, time_0=19.9999584, density_coefficient=3e29):
"""
Generated densities from the fit to W7 in Branch 85 page 620 (citation missing)
Parameters
----------
velocities : `~numpy.ndarray`
velocities in cm/s
time_explosion : `float`
time since explosion needed to descale density with expansion
time_0 : `float`
time in seconds of the w7 model - default 19.999, no reason to change
density_coefficient : `float`
coefficient for the polynomial - obtained by fitting to W7, no reason to change
"""
densities = density_coefficient * (velocities * 1e-5) ** -7
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities[1:]
class ConfigurationNameSpace(dict):
"""
The configuration name space class allows to wrap a dictionary and adds
utility functions for easy access. Accesses like a.b.c are then possible
Code from http://goo.gl/KIaq8I
Parameters
----------
config_dict: ~dict
configuration dictionary
Returns
-------
config_ns: ConfigurationNameSpace
"""
@classmethod
def from_yaml(cls, fname):
"""
Read a configuration from a YAML file
Parameters
----------
fname: str
filename or path
"""
try:
yaml_dict = yaml.load(file(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
return cls.from_config_dict(yaml_dict)
@classmethod
def from_config_dict(cls, config_dict, config_definition_file=None):
"""
Validating a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
return cls(ConfigurationValidator(config_definition,
config_dict).get_config())
marker = object()
def __init__(self, value=None):
if value is None:
pass
elif isinstance(value, dict):
for key in value:
self.__setitem__(key, value[key])
else:
raise TypeError, 'expected dict'
def __setitem__(self, key, value):
if isinstance(value, dict) and not isinstance(value,
ConfigurationNameSpace):
value = ConfigurationNameSpace(value)
if key in self and hasattr(self[key], 'unit'):
value = u.Quantity(value, self[key].unit)
dict.__setitem__(self, key, value)
def __getitem__(self, key):
return super(ConfigurationNameSpace, self).__getitem__(key)
def __getattr__(self, item):
if item in self:
return self[item]
else:
super(ConfigurationNameSpace, self).__getattribute__(item)
__setattr__ = __setitem__
def __dir__(self):
return self.keys()
def get_config_item(self, config_item_string):
"""
Get configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
config_item = config_item_path[0]
if config_item.startswith('item'):
return self[config_item_path[0]]
else:
return self[config_item]
elif len(config_item_path) == 2 and\
config_item_path[1].startswith('item'):
return self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
else:
return self[config_item_path[0]].get_config_item(
'.'.join(config_item_path[1:]))
def set_config_item(self, config_item_string, value):
"""
set configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
value:
value to set the parameter with it
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
self[config_item_path[0]] = value
elif len(config_item_path) == 2 and \
config_item_path[1].startswith('item'):
current_value = self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
if hasattr(current_value, 'unit'):
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] =\
u.Quantity(value, current_value.unit)
else:
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] = value
else:
self[config_item_path[0]].set_config_item(
'.'.join(config_item_path[1:]), value)
def deepcopy(self):
return ConfigurationNameSpace(copy.deepcopy(dict(self)))
class Configuration(ConfigurationNameSpace):
"""
Tardis configuration class
"""
@classmethod
def from_yaml(cls, fname, test_parser=False):
try:
yaml_dict = yaml.load(open(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
tardis_config_version = yaml_dict.get('tardis_config_version', None)
if tardis_config_version != 'v1.0':
raise ConfigurationError('Currently only tardis_config_version v1.0 supported')
return cls.from_config_dict(yaml_dict, test_parser=test_parser)
@classmethod
def from_config_dict(cls, config_dict, atom_data=None, test_parser=False,
config_definition_file=None, validate=True):
"""
Validating and subsequently parsing a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
atom_data: ~tardis.atomic.AtomData
atom data object. if `None` will be tried to be read from
atom data file path in the config_dict [default=None]
test_parser: ~bool
switch on to ignore a working atom_data, mainly useful for
testing this reader
config_definition_file: ~str
path to config definition file, if `None` will be set to the default
in the `data` directory that ships with TARDIS
validate: ~bool
Turn validation on or off.
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
if validate:
validated_config_dict = ConfigurationValidator(config_definition,
config_dict).get_config()
else:
validated_config_dict = config_dict
#First let's see if we can find an atom_db anywhere:
if test_parser:
atom_data = None
elif 'atom_data' in validated_config_dict.keys():
atom_data_fname = validated_config_dict['atom_data']
validated_config_dict['atom_data_fname'] = atom_data_fname
else:
raise ConfigurationError('No atom_data key found in config or command line')
if atom_data is None and not test_parser:
logger.info('Reading Atomic Data from %s', atom_data_fname)
atom_data = atomic.AtomData.from_hdf5(atom_data_fname)
else:
atom_data = atom_data
#Parsing supernova dictionary
validated_config_dict['supernova']['luminosity_nu_start'] = \
validated_config_dict['supernova']['luminosity_wavelength_end'].to(
u.Hz, u.spectral())
try:
validated_config_dict['supernova']['luminosity_nu_end'] = \
(validated_config_dict['supernova']
['luminosity_wavelength_start'].to(u.Hz, u.spectral()))
except ZeroDivisionError:
validated_config_dict['supernova']['luminosity_nu_end'] = (
np.inf * u.Hz)
validated_config_dict['supernova']['time_explosion'] = (
validated_config_dict['supernova']['time_explosion'].cgs)
validated_config_dict['supernova']['luminosity_requested'] = (
validated_config_dict['supernova']['luminosity_requested'].cgs)
#Parsing the model section
model_section = validated_config_dict['model']
v_inner = None
v_outer = None
mean_densities = None
abundances = None
structure_section = model_section['structure']
if structure_section['type'] == 'specific':
start, stop, num = model_section['structure']['velocity']
num += 1
velocities = np.linspace(start, stop, num)
v_inner, v_outer = velocities[:-1], velocities[1:]
mean_densities = parse_density_section(
model_section['structure']['density'], v_inner, v_outer,
validated_config_dict['supernova']['time_explosion']).cgs
elif structure_section['type'] == 'file':
v_inner, v_outer, mean_densities, inner_boundary_index, \
outer_boundary_index = read_density_file(
structure_section['filename'], structure_section['filetype'],
validated_config_dict['supernova']['time_explosion'],
structure_section['v_inner_boundary'],
structure_section['v_outer_boundary'])
r_inner = validated_config_dict['supernova']['time_explosion'] * v_inner
r_outer = validated_config_dict['supernova']['time_explosion'] * v_outer
r_middle = 0.5 * (r_inner + r_outer)
structure_validated_config_dict = {}
structure_section['v_inner'] = v_inner.cgs
structure_section['v_outer'] = v_outer.cgs
structure_section['mean_densities'] = mean_densities.cgs
no_of_shells = len(v_inner)
structure_section['no_of_shells'] = no_of_shells
structure_section['r_inner'] = r_inner.cgs
structure_section['r_outer'] = r_outer.cgs
structure_section['r_middle'] = r_middle.cgs
structure_section['volumes'] = ((4. / 3) * np.pi * \
(r_outer ** 3 -
r_inner ** 3)).cgs
#### TODO the following is legacy code and should be removed
validated_config_dict['structure'] = \
validated_config_dict['model']['structure']
# ^^^^^^^^^^^^^^^^
abundances_section = model_section['abundances']
if abundances_section['type'] == 'uniform':
abundances = pd.DataFrame(columns=np.arange(no_of_shells),
index=pd.Index(np.arange(1, 120), name='atomic_number'), dtype=np.float64)
for element_symbol_string in abundances_section:
if element_symbol_string == 'type': continue
z = element_symbol2atomic_number(element_symbol_string)
abundances.ix[z] = float(abundances_section[element_symbol_string])
elif abundances_section['type'] == 'file':
index, abundances = read_abundances_file(abundances_section['filename'], abundances_section['filetype'],
inner_boundary_index, outer_boundary_index)
if len(index) != no_of_shells:
raise ConfigurationError('The abundance file specified has not the same number of cells'
'as the specified density profile')
abundances = abundances.replace(np.nan, 0.0)
abundances = abundances[abundances.sum(axis=1) > 0]
norm_factor = abundances.sum(axis=0)
if np.any(np.abs(norm_factor - 1) > 1e-12):
logger.warning("Abundances have not been normalized to 1. - normalizing")
abundances /= norm_factor
validated_config_dict['abundances'] = abundances
########### DOING PLASMA SECTION ###############
plasma_section = validated_config_dict['plasma']
if plasma_section['initial_t_inner'] < 0.0 * u.K:
luminosity_requested = validated_config_dict['supernova']['luminosity_requested']
plasma_section['t_inner'] = ((luminosity_requested /
(4 * np.pi * r_inner[0] ** 2 *
constants.sigma_sb)) ** .25).to('K')
logger.info('"initial_t_inner" is not specified in the plasma '
'section - initializing to %s with given luminosity',
plasma_section['t_inner'])
else:
plasma_section['t_inner'] = plasma_section['initial_t_inner']
plasma_section['t_rads'] = np.ones(no_of_shells) * \
plasma_section['initial_t_rad']
if plasma_section['disable_electron_scattering'] is False:
logger.debug("Electron scattering switched on")
validated_config_dict['montecarlo']['sigma_thomson'] = 6.652486e-25 / (u.cm ** 2)
else:
logger.warn('Disabling electron scattering - this is not physical')
validated_config_dict['montecarlo']['sigma_thomson'] = 1e-200 / (u.cm ** 2)
##### NLTE subsection of Plasma start
nlte_validated_config_dict = {}
nlte_species = []
nlte_section = plasma_section['nlte']
nlte_species_list = nlte_section.pop('species')
for species_string in nlte_species_list:
nlte_species.append(species_string_to_tuple(species_string))
nlte_validated_config_dict['species'] = nlte_species
nlte_validated_config_dict['species_string'] = nlte_species_list
nlte_validated_config_dict.update(nlte_section)
if 'coronal_approximation' not in nlte_section:
logger.debug('NLTE "coronal_approximation" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['coronal_approximation'] = False
if 'classical_nebular' not in nlte_section:
logger.debug('NLTE "classical_nebular" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['classical_nebular'] = False
elif nlte_section: #checks that the dictionary is not empty
logger.warn('No "species" given - ignoring other NLTE options given:\n%s',
pp.pformat(nlte_section))
if not nlte_validated_config_dict:
nlte_validated_config_dict['species'] = []
plasma_section['nlte'] = nlte_validated_config_dict
#^^^^^^^^^^^^^^ End of Plasma Section
##### Monte Carlo Section
montecarlo_section = validated_config_dict['montecarlo']
if montecarlo_section['last_no_of_packets'] < 0:
montecarlo_section['last_no_of_packets'] = \
montecarlo_section['no_of_packets']
default_convergence_section = {'type': 'damped',
'lock_t_inner_cycles': 1,
't_inner_update_exponent': -0.5,
'damping_constant': 0.5}
if montecarlo_section['convergence_strategy'] is None:
logger.warning('No convergence criteria selected - '
'just damping by 0.5 for w, t_rad and t_inner')
montecarlo_section['convergence_strategy'] = (
parse_convergence_section(default_convergence_section))
else:
montecarlo_section['convergence_strategy'] = (
parse_convergence_section(
montecarlo_section['convergence_strategy']))
black_body_section = montecarlo_section['black_body_sampling']
montecarlo_section['black_body_sampling'] = {}
montecarlo_section['black_body_sampling']['start'] = \
black_body_section[0]
montecarlo_section['black_body_sampling']['end'] = \
black_body_section[1]
montecarlo_section['black_body_sampling']['samples'] = \
black_body_section[2]
###### END of convergence section reading
validated_config_dict['spectrum'] = parse_spectrum_list2dict(
validated_config_dict['spectrum'])
return cls(validated_config_dict, atom_data)
def __init__(self, config_dict, atom_data):
super(Configuration, self).__init__(config_dict)
self.atom_data = atom_data
selected_atomic_numbers = self.abundances.index
if atom_data is not None:
self.number_densities = (self.abundances * self.structure.mean_densities.to('g/cm^3').value)
self.number_densities = self.number_densities.div(self.atom_data.atom_data.mass.ix[selected_atomic_numbers],
axis=0)
else:
logger.critical('atom_data is None, only sensible for testing the parser')
| bsd-3-clause | -3,658,605,933,976,384,500 | 36.553789 | 120 | 0.587695 | false |
amw2104/fireplace | fireplace/cards/classic/paladin.py | 1 | 2853 | from ..utils import *
##
# Hero Powers
# Reinforce (Uther Lightbringer)
class CS2_101:
activate = Summon(CONTROLLER, "CS2_101t")
# Reinforce (Uther Skin 1)
class CS2_101_H1:
activate = CS2_101.activate
##
# Minions
# Guardian of Kings
class CS2_088:
play = Heal(FRIENDLY_HERO, 6)
# Argent Protector
class EX1_362:
play = GiveDivineShield(TARGET)
# Aldor Peacekeeper
class EX1_382:
play = Buff(TARGET, "EX1_382e")
class EX1_382e:
atk = SET(1)
# Tirion Fordring
class EX1_383:
deathrattle = Summon(CONTROLLER, "EX1_383t")
##
# Spells
# Blessing of Might
class CS2_087:
play = Buff(TARGET, "CS2_087e")
CS2_087e = buff(atk=3)
# Holy Light
class CS2_089:
play = Heal(TARGET, 6)
# Blessing of Kings
class CS2_092:
play = Buff(TARGET, "CS2_092e")
CS2_092e = buff(+4, +4)
# Consecration
class CS2_093:
play = Hit(ENEMY_CHARACTERS, 2)
# Hammer of Wrath
class CS2_094:
play = Hit(TARGET, 3), Draw(CONTROLLER)
# Divine Favor
class EX1_349:
play = DrawUntil(CONTROLLER, Count(ENEMY_HAND))
# Lay on Hands
class EX1_354:
play = Heal(TARGET, 8), Draw(CONTROLLER) * 3
# Blessed Champion
class EX1_355:
play = Buff(TARGET, "EX1_355e")
class EX1_355e:
atk = lambda self, i: i * 2
# Humility
class EX1_360:
play = Buff(TARGET, "EX1_360e")
class EX1_360e:
atk = SET(1)
# Blessing of Wisdom
class EX1_363:
play = Buff(TARGET, "EX1_363e")
class EX1_363e:
events = Attack(OWNER).on(Draw(CONTROLLER))
# Blessing of Wisdom (Unused)
class EX1_363e2:
events = Attack(OWNER).on(Draw(OWNER_OPPONENT))
# Holy Wrath
class EX1_365:
play = Draw(CONTROLLER).then(Hit(TARGET, COST(Draw.CARD)))
# Hand of Protection
class EX1_371:
play = GiveDivineShield(TARGET)
# Avenging Wrath
class EX1_384:
def play(self):
count = self.controller.get_spell_damage(8)
yield Hit(RANDOM_ENEMY_CHARACTER, 1) * count
# Equality
class EX1_619:
play = Buff(ALL_MINIONS, "EX1_619e")
class EX1_619e:
max_health = SET(1)
##
# Secrets
# Noble Sacrifice
class EX1_130:
secret = Attack(ENEMY_MINIONS).on(FULL_BOARD | (
Reveal(SELF), Retarget(Attack.ATTACKER, Summon(CONTROLLER, "EX1_130a"))
))
# Eye for an Eye
class EX1_132:
secret = Damage(FRIENDLY_HERO).on(
Reveal(SELF), Hit(ENEMY_HERO, Damage.AMOUNT)
)
# Redemption
class EX1_136:
secret = Death(FRIENDLY + MINION).on(FULL_BOARD | (
Reveal(SELF),
Summon(CONTROLLER, Copy(Death.ENTITY)).then(SetCurrentHealth(Summon.CARD, 1))
))
# Repentance
class EX1_379:
secret = Play(OPPONENT, MINION | HERO).after(
Reveal(SELF), Buff(Play.CARD, "EX1_379e")
)
class EX1_379e:
max_health = SET(1)
##
# Weapons
# Truesilver Champion
class CS2_097:
events = Attack(FRIENDLY_HERO).on(Heal(FRIENDLY_HERO, 2))
# Sword of Justice
class EX1_366:
events = Summon(CONTROLLER, MINION).after(
Buff(Summon.CARD, "EX1_366e"),
Hit(SELF, 1)
)
EX1_366e = buff(+1, +1)
| agpl-3.0 | -3,566,954,898,071,706,600 | 14.256684 | 79 | 0.685594 | false |
renyi533/tensorflow | tensorflow/python/keras/mixed_precision/experimental/policy.py | 1 | 25763 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Policy class for mixed precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check
from tensorflow.python.keras.mixed_precision.experimental import loss_scale as keras_loss_scale_module
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util.tf_export import keras_export
# Default value of certain arguments, indicating the default behavior for
# that argument should be used.
USE_DEFAULT = 'USE_DEFAULT'
@keras_export('keras.mixed_precision.experimental.Policy')
class Policy(object):
"""A dtype policy for a Keras layer.
A dtype policy determines dtype-related aspects of a layer, such as its
computation and variable dtypes. Each layer has a policy. Policies can be
passed to the `dtype` argument of layer constructors, or a global policy can
be set with `tf.keras.mixed_precision.experimental.set_policy`. A layer will
default to the global policy if no policy is passed to it's constructor.
For many models, each layer's policy will have the same compute dtype and
variable dtype, which will typically be float32. In this case, we refer to the
singular dtype as the layer's dtype, which can be queried by the property
`tf.keras.layers.Layer.dtype`.
When mixed precision training is used, most layers will instead have a float16
or bfloat16 compute dtype and a float32 variable dtype, and so the layer does
not have a single dtype. When the variable dtype does not match the compute
dtype, variables will be automatically casted to the compute dtype to avoid
type errors. In this case, `tf.keras.layers.Layer.dtype` refers to the
variable dtype, not the compute dtype. See [the mixed precision
guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more
information on how to use mixed precision.
Certain policies also have a `tf.mixed_precision.experimental.LossScale`
instance, which is used by `tf.keras.Model`s to performance loss scaling. Loss
scaling is a technique used with mixed precision to avoid numerical underflow
in float16 gradients. Loss scaling is only done by Models in `Model.fit`,
`Model.train_on_batch`, and similar methods. Layers which are not Models
ignore the loss scale.
Policies are constructed by passing a string to the constructor, e.g.
`tf.keras.mixed_precision.experimental.Policy('float32')`. The string
determines the compute and variable dtypes. It can be one of the following:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype. No loss scaling is done by default.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. These policies are used for
mixed precision training. With 'mixed_float16', a dynamic loss scale is
used by default. 'mixed_bfloat16' does no loss scaling by default, as loss
scaling is unnecessary with bfloat16.
### How to use mixed precision in a Keras model
To use mixed precision in a Keras model, the `'mixed_float16'` or
`'mixed_bfloat16'` policy can be used.
`tf.keras.mixed_precision.experimental.set_policy` can be used to set the
default policy for layers if no policy is passed to them. For example:
>>> tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... # Dense layers use global policy of 'mixed_float16', which does
... # computations in float16 while keeping variables in float32.
... tf.keras.layers.Dense(10),
... tf.keras.layers.Dense(10),
... # Softmax should be done in float32 for numeric stability. We pass
... # dtype='float32' to use float32 instead of the global policy.
... tf.keras.layers.Activation('softmax', dtype='float32')
... ])
Alternatively, the policy can be passed to individual layers instead of
setting the global policy with `set_policy`:
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... tf.keras.layers.Dense(10, dtype=policy),
... tf.keras.layers.Dense(10, dtype=policy),
... # Softmax should be done in float32 for numeric stability.
... tf.keras.layers.Activation('softmax', dtype='float32')
... ])
Note the `'mixed_float16'` policy will apply loss scaling by default in
`Model.fit`, `Model.train_on_batch`, and other training methods. If no such
method is used (e.g., a custom training loop is used) and `'mixed_float16'` is
used, the loss scale must be manually applied. See
`tf.keras.mixed_precision.experimental.LossScaleOptimizer` for details. For
`'mixed_bfloat16'`, no loss scaling is done and loss scaling never needs to be
manually applied.
See [the mixed precision
guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more
information on using mixed precision
### How to use float64 in a Keras model
Using float64 is similar to mixed precision. Either the global policy can be
set to float64, or `dtype='float64'` can be passed to individual layers. For
example, to set the global policy:
>>> tf.keras.mixed_precision.experimental.set_policy('float64')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... # All layers use global policy of 'float64', which does computations
... # and creates variables in float64.
... tf.keras.layers.Dense(10),
... tf.keras.layers.Dense(10),
... tf.keras.layers.Activation('softmax')
... ])
>>> # Optionaly set policy back to float32 if any other models use float32
>>> tf.keras.mixed_precision.experimental.set_policy('float32')
### How a layer uses its policy's compute dtype
A layer will cast its inputs to its compute dtype in TensorFlow 2. For
example:
>>> x = tf.ones((4, 4, 4, 4), dtype='float64')
>>> # `layer`'s policy defaults to float32.
>>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)
>>> # `layer` casts it's inputs to its compute dtype, which is float32, and
>>> # does computations in float32.
>>> y = layer(x)
>>> y.dtype
tf.float32
Note that the base `tf.keras.layers.Layer` class inserts the casts. If
subclassing your own layer, you do not have to insert any casts.
Currently, only tensors in the first argument to the layer's `call` method are
casted. For example:
>>> class MyLayer(tf.keras.layers.Layer):
... # Bug! `b` will not be casted.
... def call(self, a, b):
... return a + 1., b + 1.
>>> a = tf.constant(1., dtype="float32")
>>> b = tf.constant(1., dtype="float32")
>>> layer = MyLayer(dtype="float64")
>>> x, y = layer(a, b)
>>> x.dtype
tf.float64
>>> y.dtype
tf.float32
If writing your own layer, it is recommended to accept tensors only in the
first argument. This way, all tensors are casted to the layer's compute dtype.
`MyLayer` should therefore be written as:
>>> class MyLayer(tf.keras.layers.Layer):
... # Now, all tensor inputs will be casted.
... def call(self, inputs):
... a, b = inputs
... return a + 1., b + 1.
>>> a = tf.constant(1., dtype="float32")
>>> b = tf.constant(1., dtype="float32")
>>> layer = MyLayer(dtype="float64")
>>> x, y = layer((a, b))
>>> x.dtype
tf.float64
>>> y.dtype
tf.float64
Other arguments are not automatically casted for technical reasons, but this
may change in a future minor release.
A layer subclass can prevent its inputs from being autocasted by passing
`autocast=False` to the layer constructor. For example:
>>> class NonAutoCastingLayer(tf.keras.layers.Layer):
... def __init__(self, **kwargs):
... kwargs['autocast'] = False
... super(NonAutoCastingLayer, self).__init__(**kwargs)
... def call(self, inp):
... return inp
>>> x = tf.ones((4, 4, 4, 4), dtype='float32')
>>> layer = NonAutoCastingLayer(dtype='float64')
>>> y = layer(x) # Will not cast inputs to it's compute dtype of float64
>>> y.dtype
tf.float32
### How a layer uses its policy's variable dtype
The default dtype of variables created by `tf.keras.layers.Layer.add_weight`
is the layer's policy's variable dtype.
If a layer's compute and variable dtypes differ, `add_weight` will wrap
floating-point variables with a special wrapper called an `AutoCastVariable`.
This wrapper is identical to the original variable except it casts itself to
the layer's compute dtype when used within `Layer.call`. Outside `Layer.call`,
the variable is not casted.
A layer author can prevent a variable from being wrapped with an
`AutoCastVariable` by passing `experimental_autocast=False` to `add_weight`:
>>> class MyLayer(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.x = self.add_weight('x')
... self.y = self.add_weight('y', experimental_autocast=False)
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> layer = MyLayer(dtype=policy)
>>> layer.build((2, 2))
>>> layer.x
<AutoCastVariable 'x:0' shape=() dtype=float32 true_dtype=float32, numpy=...>
>>> layer.y
<tf.Variable 'y:0' shape=() dtype=float32, numpy=...>
Passing `experimental_autocast=False` is useful for layers which may
internally do some math in the variable dtype instead of the compute dtype.
For example, you may wish to compute variable statistics, such as mean and
variance, in the variable dtype.
### How to write a layer that supports mixed precision and float64.
For the most part, layers will automatically support mixed precision and
float64 without any additional work, due to the fact the base layer
automatically casts inputs, creates variables of the correct type, and in the
case of mixed precision, wraps variables with `AutoCastVariables`.
For example, this simple dense layer does not require any additional work to
support mixed precision or float64. Keras automatically casts the inputs and
variable to the appropriate dtype.
>>> class MyDense(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
... def call(self, inputs):
... return tf.matmul(inputs, self.kernel)
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> layer = MyDense(dtype=policy)
>>> x = np.random.rand(10, 10)
>>> y = layer(x)
>>> y.dtype
tf.float16
The primary case where you need extra work to support mixed precision or
float64 is when you create a new tensor, such as with `tf.ones` or
`tf.constant`. In such cases, you must create the tensor of the correct dtype.
For example, suppose you modify the `MyDense` layer to add a random number to
the output using `tf.random.normal`. You must pass the input dtype to
`tf.random.normal` to ensure the dtypes match.
>>> class MyDense(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
... def call(self, inputs):
... rand = tf.random.normal(shape=inputs.shape, dtype=inputs.dtype)
... return tf.matmul(inputs, self.kernel) + rand
>>>
>>> layer = MyDense(dtype=policy)
>>> y = layer(x)
>>> y.dtype
tf.float16
If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a `TypeError`
would have occurred. This is because the dtype defaults to `"float32"`, so the
layer would only work if the inputs were float32.
### The deprecated "infer" policy
In addition to the above mentioned policies, a policy can also be "infer".
This Policy is deprecated, and it is not recommended. When a layer has an
infer policy, it will infer the computation and variable dtype from the first
input the first time the layer is called. Once the layer is called for the
first time, the layer's policy will change to the dtype of the first input.
In TensorFlow 1, only the "infer" policy is available.
"""
def __init__(self, name, loss_scale=USE_DEFAULT):
"""Constructs the policy.
The `name` argument determines the compute and variable dtype, the default
loss scale, and has no additional effect on the Policy. The compute and
variable dtypes can only be specified through `name`, and cannot be
specified directly.
Args:
name: A string. Can be one of the following values:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. With 'mixed_float16',
a dynamic loss scale is used. These policies are used for mixed
precision training.
* 'infer' (deprecated): Infer the compute and variable dtype from the
input dtype.
loss_scale: A `tf.mixed_precision.experimental.LossScale`, an int (which
uses a `FixedLossScale`), or the string "dynamic" (which uses a
`DynamicLossScale`). Defaults to using no loss scaling unless `name` is
"mixed_float16", in which case this defaults to "dynamic". Only
`tf.keras.Model`s, not layers, use the loss scale, and it is only used
during `Model.fit`, `Model.train_on_batch`, and other similar methods.
"""
if isinstance(name, dtypes.DType):
raise TypeError("'name' must be a string, not a DType. "
"Instead, pass DType.name. Got: %s" % (name.name,))
elif not isinstance(name, six.string_types):
raise TypeError("'name' must be a string, but got: %s" % (name,))
self._name = name
self._compute_dtype, self._variable_dtype = self._parse_name(name)
if loss_scale == USE_DEFAULT:
loss_scale = 'dynamic' if name == 'mixed_float16' else None
self._using_default_loss_scale = True
else:
self._using_default_loss_scale = False
if loss_scale and self._compute_dtype not in (None, 'float16'):
tf_logging.warn('Creating a Policy with a loss scale is only useful for '
'float16 policies. You passed loss_scale=%r for policy '
'%s. Consider not passing any loss_scale instead.' %
(loss_scale, name))
self._loss_scale = keras_loss_scale_module.get(loss_scale)
if name in ('mixed_float16', 'mixed_bloat16'):
device_compatibility_check.log_device_compatibility_check(name)
def _parse_name(self, name):
"""Parses a Policy name into a compute and variable dtype.
Args:
name: The name of the policy:
Returns:
The (compute_dtype, variable_dtype) pair.
"""
if name.endswith('_float32_vars'):
error_msg = ('Policies ending in \'_float32_vars\' have been removed '
'from TensorFlow.')
if name in ('infer_float32_vars', 'infer_with_float32_vars'):
error_msg += (' Please use the \'mixed_float16\' or \'mixed_bfloat16\' '
'policy instead.')
elif name == 'float16_with_float32_vars':
error_msg += (' Please use the \'mixed_float16\' policy instead.')
elif name == 'bfloat16_with_float32_vars':
error_msg += (' Please use the \'mixed_bfloat16\' policy instead.')
error_msg += ' Got policy name: \'%s\'' % name
raise ValueError(error_msg)
if name == 'mixed_float16':
return 'float16', 'float32'
elif name == 'mixed_bfloat16':
return 'bfloat16', 'float32'
elif name == 'infer':
return None, None
try:
dtype = dtypes.as_dtype(name).name
except TypeError:
error = ("Cannot convert value %s to a mixed precision Policy. "
"Valid policies include include 'mixed_float16', "
"'mixed_bfloat16', and the name of any dtype such as "
"'float32'." % (name,))
# six.raise_from suppresses the original TypeError from being raised
six.raise_from(ValueError(error), None)
return dtype, dtype
@property
def variable_dtype(self):
"""The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicitly chooses a different dtype. If this is different than
`Policy.compute_dtype`, Layers will cast variables to the compute dtype to
avoid type errors.
Returns:
The variable dtype of this policy, or None if the variable dtype should be
inferred from the inputs.
"""
return self._variable_dtype
@property
def compute_dtype(self):
"""The compute dtype of this policy.
This is the dtype layers will do their computations in.
Note that even if the compute dtype is float16 or bfloat16, hardware devices
may not do individual adds, multiplies, and other fundamental operations in
[b]float16, but instead may do some of them in float32 for numeric
stability. The compute dtype is the dtype of the inputs and outputs of the
TensorFlow ops that the layer executes. Internally, many TensorFlow ops will
do certain internal calculations in float32, or some other device-internal
intermediate format with higher precision than [b]float16, to increase
numeric stability.
For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a
float16 compute dtype, will pass float16 inputs to tf.matmul. But, tf.matmul
will do use float32 intermediate math. The performance benefit of float16 is
still apparent, due to increased memory bandwidth and the fact modern GPUs
have specialized hardware for computing matmuls on float16 while still
keeping intermediate computations in float32.
Returns:
The compute dtype of this policy, or None if the compute dtype should be
inferred from the inputs.
"""
return self._compute_dtype
@property
def should_cast_variables(self):
"""Returns True if variables should be casted.
This is true if the variable dtype is not the same as the compute dtype.
Returns:
True, if variables should be casted.
"""
return self.variable_dtype != self.compute_dtype
@property
def loss_scale(self):
"""Returns the loss scale of this Policy.
Returns:
A `tf.mixed_precision.experimental.LossScale`, or None.
"""
return self._loss_scale
@property
def name(self):
"""Returns the name of this policy."""
return self._name
def __repr__(self):
return '<Policy "%s", loss_scale=%s>' % (self._name, self.loss_scale)
def get_config(self):
config = {
'name': self.name
}
if not self._using_default_loss_scale:
# We only include the loss scale if the default loss scale is not used.
# This allows us to change the loss scale config format without breaking
# users who use the default loss scale.
config['loss_scale'] = keras_loss_scale_module.serialize(self.loss_scale)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if 'loss_scale' in config and isinstance(config['loss_scale'], dict):
config = config.copy()
config['loss_scale'] = keras_loss_scale_module.deserialize(
config['loss_scale'], custom_objects=custom_objects)
return cls(**config)
# The current global policy in effect. If None, it means the current value of
# floatx should be used as the policy if the V2 dtype behavior is enabled,
# or "infer" otherwise.
# TODO(reedwm): Make this thread local?
_global_policy = None
@keras_export('keras.mixed_precision.experimental.global_policy')
def global_policy():
"""Returns the global Policy.
The global policy is the default policy used for layers, if no policy is
passed to the layer constructor. If no policy has been set with
`keras.mixed_precision.experimental.set_policy`, this will return a policy
constructed from `tf.keras.backend.floatx()` in TensorFlow 2 (floatx defaults
to float32), or an "infer" policy in TensorFlow 1.
See `keras.mixed_precision.experimental.Policy` for more information.
Returns:
The global Policy.
"""
if _global_policy is None:
if base_layer_utils.v2_dtype_behavior_enabled():
return Policy(backend.floatx())
else:
return Policy('infer')
return _global_policy
def policy_defaults_to_floatx():
"""Returns True if `global_policy()` will use the current value of floatx."""
return _global_policy is None and base_layer_utils.v2_dtype_behavior_enabled()
def _check_if_mixed_precision_graph_rewrite_is_enabled():
# TODO(reedwm): Update this comment once the Keras API is complete.
if mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled:
raise ValueError(
'The mixed precision policy cannot be set, because the mixed '
'precision graph rewrite has already been enabled.\n'
'At most, one of the following functions can be called:\n\n'
' 1. tf.train.experimental.enable_mixed_precision_graph_rewrite() '
'(You called this first)\n'
' 2. tf.keras.mixed_precision.experimental.set_policy() (You called '
'this second)\n\n'
'You called both functions, which is an error, because both functions '
'enable you to use mixed precision. If in doubt which function to use, '
'use the second, as it supports Eager execution and is more '
'customizable.')
@keras_export('keras.mixed_precision.experimental.set_policy')
def set_policy(policy):
"""Sets the global Policy.
The global policy is the default policy used for layers, if no policy is
passed to the layer constructor. If no global policy is set, layers will
instead default to a Policy constructed from `tf.keras.backend.floatx()` in
TensorFlow 2. In TensorFlow 1, layers default to an "infer" policy.
See `keras.mixed_precision.experimental.Policy` for more information.
Args:
policy: A Policy, or a string that will be converted to a Policy..
"""
global _global_policy
_check_if_mixed_precision_graph_rewrite_is_enabled()
if policy is not None and not isinstance(policy, Policy):
policy = Policy(policy)
if (policy and not base_layer_utils.v2_dtype_behavior_enabled() and
policy.compute_dtype):
raise ValueError(
'The global policy can only be set to a non-infer policy in TensorFlow '
'2')
_global_policy = policy
mixed_precision_global_state.using_default_mixed_precision_policy = (
_global_policy is None)
# TODO(reedwm): Make this thread local
@contextlib.contextmanager
def policy_scope(policy):
"""A context manager that sets the global Policy under it.
Args:
policy: A Policy, or a string that will be converted to a Policy..
Yields:
Nothing.
"""
old_policy = _global_policy
try:
set_policy(policy)
yield
finally:
set_policy(old_policy)
def _is_convertible_to_dtype(dtype):
try:
dtypes.as_dtype(dtype)
return True
except TypeError:
return False
def _policy_equivalent_to_dtype(policy):
"""Returns True if the Policy is equivalent to a single dtype.
A policy is equivalent to a single dtype if the policy's compute and variable
dtypes are the same and the policy does not cause the layer/model to have
additional behavior, such as loss scaling.
The "infer" policy is considered equivalent to a single dtype.
Args:
policy: A Policy.
Returns:
True, if the policy is equivalent to a single dtype.
"""
# We use type() instead of isinstance because a sublcass of Policy is never
# equivalent to a dtype.
return (type(policy) == Policy and # pylint: disable=unidiomatic-typecheck
list(policy.get_config().keys()) == ['name'] and
(policy.name == 'infer' or _is_convertible_to_dtype(policy.name)))
def serialize(policy):
if _policy_equivalent_to_dtype(policy):
# We return either None or the policy name for compatibility with older
# versions of Keras. If the policy name is returned, it is a dtype string
# such as 'float32'.
return None if policy.name == 'infer' else policy.name
return generic_utils.serialize_keras_object(policy)
def deserialize(config, custom_objects=None):
if isinstance(config, str) and _is_convertible_to_dtype(config):
return Policy(config)
if config is None:
return Policy('infer')
module_objects = {'Policy': Policy}
return generic_utils.deserialize_keras_object(
config,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='dtype policy')
| apache-2.0 | 4,548,425,901,872,756,700 | 39.958665 | 102 | 0.695843 | false |
googleapis/googleapis-gen | google/cloud/networkmanagement/v1/networkmanagement-v1-py/google/cloud/network_management_v1/services/reachability_service/transports/grpc.py | 1 | 21150 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.network_management_v1.types import connectivity_test
from google.cloud.network_management_v1.types import reachability
from google.longrunning import operations_pb2 # type: ignore
from .base import ReachabilityServiceTransport, DEFAULT_CLIENT_INFO
class ReachabilityServiceGrpcTransport(ReachabilityServiceTransport):
"""gRPC backend transport for ReachabilityService.
The Reachability service in the Google Cloud Network
Management API provides services that analyze the reachability
within a single Google Virtual Private Cloud (VPC) network,
between peered VPC networks, between VPC and on-premises
networks, or between VPC networks and internet hosts. A
reachability analysis is based on Google Cloud network
configurations.
You can use the analysis results to verify these configurations
and to troubleshoot connectivity issues.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'networkmanagement.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'networkmanagement.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_connectivity_tests(self) -> Callable[
[reachability.ListConnectivityTestsRequest],
reachability.ListConnectivityTestsResponse]:
r"""Return a callable for the list connectivity tests method over gRPC.
Lists all Connectivity Tests owned by a project.
Returns:
Callable[[~.ListConnectivityTestsRequest],
~.ListConnectivityTestsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_connectivity_tests' not in self._stubs:
self._stubs['list_connectivity_tests'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/ListConnectivityTests',
request_serializer=reachability.ListConnectivityTestsRequest.serialize,
response_deserializer=reachability.ListConnectivityTestsResponse.deserialize,
)
return self._stubs['list_connectivity_tests']
@property
def get_connectivity_test(self) -> Callable[
[reachability.GetConnectivityTestRequest],
connectivity_test.ConnectivityTest]:
r"""Return a callable for the get connectivity test method over gRPC.
Gets the details of a specific Connectivity Test.
Returns:
Callable[[~.GetConnectivityTestRequest],
~.ConnectivityTest]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_connectivity_test' not in self._stubs:
self._stubs['get_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/GetConnectivityTest',
request_serializer=reachability.GetConnectivityTestRequest.serialize,
response_deserializer=connectivity_test.ConnectivityTest.deserialize,
)
return self._stubs['get_connectivity_test']
@property
def create_connectivity_test(self) -> Callable[
[reachability.CreateConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the create connectivity test method over gRPC.
Creates a new Connectivity Test. After you create a test, the
reachability analysis is performed as part of the long running
operation, which completes when the analysis completes.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, containing non-existent resources in the
network, or you don't have read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
AMBIGUOUS. For more information, see the Connectivity Test
documentation.
Returns:
Callable[[~.CreateConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_connectivity_test' not in self._stubs:
self._stubs['create_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/CreateConnectivityTest',
request_serializer=reachability.CreateConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['create_connectivity_test']
@property
def update_connectivity_test(self) -> Callable[
[reachability.UpdateConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the update connectivity test method over gRPC.
Updates the configuration of an existing ``ConnectivityTest``.
After you update a test, the reachability analysis is performed
as part of the long running operation, which completes when the
analysis completes. The Reachability state in the test resource
is updated with the new result.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, they contain non-existent resources in the
network, or the user does not have read permissions to the
network configurations of listed projects), then the
reachability result returns a value of UNKNOWN.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
``AMBIGUOUS``. See the documentation in ``ConnectivityTest`` for
for more details.
Returns:
Callable[[~.UpdateConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_connectivity_test' not in self._stubs:
self._stubs['update_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/UpdateConnectivityTest',
request_serializer=reachability.UpdateConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_connectivity_test']
@property
def rerun_connectivity_test(self) -> Callable[
[reachability.RerunConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the rerun connectivity test method over gRPC.
Rerun an existing ``ConnectivityTest``. After the user triggers
the rerun, the reachability analysis is performed as part of the
long running operation, which completes when the analysis
completes.
Even though the test configuration remains the same, the
reachability result may change due to underlying network
configuration changes.
If the endpoint specifications in ``ConnectivityTest`` become
invalid (for example, specified resources are deleted in the
network, or you lost read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
Returns:
Callable[[~.RerunConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'rerun_connectivity_test' not in self._stubs:
self._stubs['rerun_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/RerunConnectivityTest',
request_serializer=reachability.RerunConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['rerun_connectivity_test']
@property
def delete_connectivity_test(self) -> Callable[
[reachability.DeleteConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the delete connectivity test method over gRPC.
Deletes a specific ``ConnectivityTest``.
Returns:
Callable[[~.DeleteConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_connectivity_test' not in self._stubs:
self._stubs['delete_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/DeleteConnectivityTest',
request_serializer=reachability.DeleteConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_connectivity_test']
__all__ = (
'ReachabilityServiceGrpcTransport',
)
| apache-2.0 | 3,437,077,281,151,357,400 | 45.792035 | 96 | 0.636359 | false |
VlachosGroup/VlachosGroupAdditivity | pgradd/DrawMol.py | 1 | 2230 | """
=========================================
Defenition to draw RDKIT mol object (:mod:`pgradd.DrawMol`)
=========================================
Coverts a rdkit mol object to a svg image and display.
"""
from rdkit import Chem
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
from IPython.display import SVG, display
# http://rdkit.blogspot.com/2015/02/new-drawing-code.html
def moltosvg(mol, highlight=[], molSize=(400, 400), kekulize=True):
mc = Chem.Mol(mol.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except Exception:
mc = Chem.Mol(mol.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0], molSize[1])
# Atom Label
opts = drawer.drawOptions()
# Atom name and index
for i in range(mol.GetNumAtoms()):
opts.atomLabels[i] = mol.GetAtomWithIdx(i).GetSymbol()+str(i)
# radicals and charges
for atom in mol.GetAtoms():
nr = atom.GetNumRadicalElectrons()
nc = atom.GetFormalCharge()
if nr > 0:
string = atom.GetSymbol() + ':'*divmod(nr, 2)[0] +\
'.'*divmod(nr, 2)[1]
opts.atomLabels[atom.GetIdx()] += string
elif nc == 1:
string = atom.GetSymbol() + '+'
opts.atomLabels[atom.GetIdx()] += string
elif nc > 1:
string = atom.GetSymbol() + '+' + str(nc)
opts.atomLabels[atom.GetIdx()] += string
elif nc == -1:
string = atom.GetSymbol() + '-'
opts.atomLabels[atom.GetIdx()] += string
elif nc < -1:
string = atom.GetSymbol() + '-' + str(nc)
opts.atomLabels[atom.GetIdx()] += string
# highlight
if highlight:
drawer.DrawMolecule(mc, highlightAtoms=highlight)
else:
drawer.DrawMolecule(mc)
drawer.FinishDrawing()
svg = drawer.GetDrawingText()
# It seems that the svg renderer used doesn't quite hit the spec.
# Here are some fixes to make it work in the notebook, although I think
# the underlying issue needs to be resolved at the generation step
svg.replace('svg:', '')
display(SVG(svg))
| mit | 5,404,241,152,769,177,000 | 32.283582 | 75 | 0.58296 | false |
aldebaran/qibuild | python/qitest/parsers.py | 1 | 7334 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Collection of parser fonctions for qitests actions """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import qisys.parsers
import qitest.project
import qibuild.parsers
class EmptyTestListException(Exception):
""" No test to run exception """
pass
def test_parser(parser, with_num_jobs=True):
""" Test Parser """
qisys.parsers.worktree_parser(parser)
group = parser.add_argument_group("test options")
group.add_argument("--perf", dest="perf", action="store_true",
help="run perfs tests instead of pure tests.")
group.add_argument("-k", "--pattern", dest="patterns", action="append",
help="Filter tests matching these patterns")
group.add_argument("-x", "--exclude", dest="excludes", action="append",
help="Exclude test matching these patterns")
group.add_argument("-V", dest="verbose_tests", action="store_true",
help="display tests output")
group.add_argument("--valgrind", dest="valgrind", action="store_true",
help="run tests under valgrind")
group.add_argument("--nightmare", dest="nightmare", action="store_true",
help="run tests in shuffle and 20 times (apply only to gtest)")
group.add_argument("--coverage", dest="coverage", action="store_true",
help="run coverage")
group.add_argument("--ncpu", dest="num_cpus", default=-1, type=int,
help="set number of CPU each test is allowed to use (linux)")
group.add_argument("--nightly", action="store_true", dest="nightly")
group.add_argument("--break-on-failure", action="store_true", dest="break_on_failure",
help="Break on failure (for gtest only)")
group.add_argument("--repeat-until-fail", default=0, type=int, metavar="N",
help="Repeat tests until they fail (at most N times)")
group.add_argument("--qitest-json", dest="qitest_jsons", action="append")
group.add_argument("--test-output-dir", type=os.path.abspath,
dest="test_output_dir",
help="Generate XML test reports in the given directory "
"(instead of build-<platform>/sdk/test-results)")
group.add_argument("--coverage-output-dir", dest="coverage_output_dir",
help="Generate XML and HTML coverage reports in the given "
"directory (instead of build-<platform>/sdk/coverage-results)")
group.add_argument("--root-output-dir", dest="test_output_dir", metavar="ROOT_OUTPUT_DIR",
help="same as --test-output-dir (deprecated)")
group.add_argument("--no-capture", dest="capture", action="store_false")
group.add_argument("--ignore-timeouts", dest="ignore_timeouts", action="store_true",
help="Ignore timeouts when running tests")
group.add_argument("--lf", "--last-failed", dest="last_failed", action="store_true",
help="Run the failing test from previous run")
group.add_argument("--allow-no-test", dest="allow_no_test", action="store_true",
help="Don't fail if no tests to run")
parser.set_defaults(nightly=False, capture=True, last_failed=False,
ignore_timeouts=False)
if with_num_jobs:
qisys.parsers.parallel_parser(group, default=1)
return group
def get_test_runner(args, build_project=None, qitest_json=None):
""" Get Test Runner """
test_project = None
if not qitest_json:
qitest_json = vars(args).get("qitest_json")
if not qitest_json:
candidate = os.path.join(os.getcwd(), "qitest.json")
if os.path.exists(candidate):
qitest_json = candidate
if qitest_json:
test_project = qitest.project.TestProject(qitest_json)
if not test_project:
if build_project:
test_project = build_project.to_test_project()
else:
return None
test_runner = qibuild.test_runner.ProjectTestRunner(test_project)
if build_project:
test_runner.cwd = build_project.sdk_directory
test_runner.env = build_project.build_worktree.get_env()
else:
test_runner.cwd = qisys.sh.to_native_path(os.path.dirname(qitest_json))
test_runner.patterns = args.patterns
test_runner.excludes = args.excludes
test_runner.perf = args.perf
test_runner.coverage = args.coverage
test_runner.break_on_failure = args.break_on_failure
test_runner.valgrind = args.valgrind
test_runner.verbose = args.verbose_tests
test_runner.num_cpus = args.num_cpus
test_runner.num_jobs = args.num_jobs
test_runner.repeat_until_fail = args.repeat_until_fail
test_runner.nightly = args.nightly
test_runner.nightmare = args.nightmare
test_runner.test_output_dir = args.test_output_dir
test_runner.capture = args.capture
test_runner.last_failed = args.last_failed
test_runner.ignore_timeouts = args.ignore_timeouts
return test_runner
def parse_build_projects(args):
""" Parse Build Projects """
res = list()
try:
build_worktree = qibuild.parsers.get_build_worktree(args)
solve_deps = False
if args.use_deps:
solve_deps = True
build_projects = qibuild.parsers.get_build_projects(
build_worktree,
args, solve_deps=solve_deps)
for build_project in build_projects:
test_runner = None
try:
test_runner = get_test_runner(args, build_project=build_project)
except qibuild.project.NoQiTestJson:
pass
if test_runner:
res.append(test_runner)
except (qisys.worktree.NotInWorkTree, qibuild.parsers.CouldNotGuessProjectName):
pass
return res
def get_test_runners(args):
""" Get Test Runners """
res = list()
qitest_jsons = args.qitest_jsons or list()
# first case: qitest.json in current working directory
test_runner = get_test_runner(args)
if test_runner:
res.append(test_runner)
# second case: qitest.json specified with --qitest-json
for qitest_json in qitest_jsons:
test_runner = get_test_runner(args, qitest_json=qitest_json)
res.append(test_runner)
# third case: parsing build projects
build_projects_runners = parse_build_projects(args)
# avoid appending a test_runner guessed from a build project
# when res already contains a test runner computed from a
# --qitest-json argument
known_cwds = [x.cwd for x in res]
for test_runner in build_projects_runners:
if test_runner.cwd not in known_cwds:
res.append(test_runner)
if args.coverage and not build_projects_runners:
raise Exception("""--coverage can only be used from a qibuild CMake project\n""")
elif args.coverage:
return build_projects_runners
if not res:
raise EmptyTestListException("Nothing found to test")
return res
| bsd-3-clause | 6,434,639,803,409,143,000 | 43.993865 | 94 | 0.637715 | false |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractCurrentlyTLingBuniMi.py | 1 | 1148 | def extractCurrentlyTLingBuniMi(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if item['title'].startswith('[BNM]'):
return buildReleaseMessageWithType(item, 'Bu ni Mi wo Sasagete Hyaku to Yonen. Elf de Yarinaosu Musha Shugyou', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[DD]'):
return buildReleaseMessageWithType(item, 'Doll Dungeon', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[HCLS]'):
return buildReleaseMessageWithType(item, 'High Comprehension Low Strength', vol, chp, frag=frag, postfix=postfix)
tagmap = [
('Abyss Domination', 'Abyss Domination', 'translated'),
('Nine Yang Sword Saint', 'Nine Yang Sword Saint', 'translated'),
('Mysterious World Beast God', 'Mysterious World Beast God', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | bsd-3-clause | -392,690,096,227,605,250 | 44.96 | 151 | 0.690767 | false |
xcgspring/AXUI | test/test_driver/windows/test_Translater.py | 1 | 1731 |
import sys
import unittest
class TestTranslater(unittest.TestCase):
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_coordinate_identifier(self):
import AXUI.driver.windows.Translater as translater
from AXUI.parsing.identifier_parsing import identifier_lexer, identifier_parser
identifier = "Coordinate = '(12 ,34, 56, 79)'"
parsed_identifier = identifier_parser.parse(identifier, lexer=identifier_lexer)
translated_identifier = translater.ID_Translater(parsed_identifier).get_translated()
print(translated_identifier)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_index_identifier(self):
import AXUI.driver.windows.Translater as translater
from AXUI.parsing.identifier_parsing import identifier_lexer, identifier_parser
identifier = "Name='menu bar' AND Index=3"
parsed_identifier = identifier_parser.parse(identifier, lexer=identifier_lexer)
translated_identifier = translater.ID_Translater(parsed_identifier).get_translated()
print(translated_identifier)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_UIA_identifier(self):
import AXUI.driver.windows.Translater as translater
from AXUI.parsing.identifier_parsing import identifier_lexer, identifier_parser
identifier = "Name='menu bar' AND LocalizedControlType='menu bar'"
parsed_identifier = identifier_parser.parse(identifier, lexer=identifier_lexer)
translated_identifier = translater.ID_Translater(parsed_identifier).get_translated()
print(translated_identifier)
| apache-2.0 | -7,218,756,001,438,972,000 | 53.09375 | 92 | 0.722126 | false |
christiansandberg/canopen | test/test_emcy.py | 1 | 2212 | import unittest
from canopen import emcy
class TestEmcyConsumer(unittest.TestCase):
def test_emcy_list(self):
emcy_node = emcy.EmcyConsumer()
emcy_node.on_emcy(0x81, b'\x01\x20\x02\x00\x01\x02\x03\x04', 1473418396.0)
emcy_node.on_emcy(0x81, b'\x10\x90\x01\x00\x01\x02\x03\x04', 1473418397.0)
self.assertEqual(len(emcy_node.log), 2)
self.assertEqual(len(emcy_node.active), 2)
error = emcy_node.log[0]
self.assertIsInstance(error, emcy.EmcyError)
self.assertIsInstance(error, Exception)
self.assertEqual(error.code, 0x2001)
self.assertEqual(error.register, 0x02)
self.assertEqual(error.data, b'\x00\x01\x02\x03\x04')
self.assertAlmostEqual(error.timestamp, 1473418396.0)
self.assertEqual(emcy_node.active[0], error)
error = emcy_node.log[1]
self.assertEqual(error.code, 0x9010)
self.assertEqual(error.register, 0x01)
self.assertEqual(error.data, b'\x00\x01\x02\x03\x04')
self.assertAlmostEqual(error.timestamp, 1473418397.0)
self.assertEqual(emcy_node.active[1], error)
emcy_node.on_emcy(0x81, b'\x00\x00\x00\x00\x00\x00\x00\x00', 1473418397.0)
self.assertEqual(len(emcy_node.log), 3)
self.assertEqual(len(emcy_node.active), 0)
def test_str(self):
error = emcy.EmcyError(0x2001, 0x02, b'\x00\x01\x02\x03\x04', 1473418396.0)
self.assertEqual(str(error), "Code 0x2001, Current")
error = emcy.EmcyError(0x50FF, 0x01, b'\x00\x01\x02\x03\x04', 1473418396.0)
self.assertEqual(str(error), "Code 0x50FF, Device Hardware")
error = emcy.EmcyError(0x7100, 0x01, b'\x00\x01\x02\x03\x04', 1473418396.0)
self.assertEqual(str(error), "Code 0x7100")
class MockNetwork(object):
data = None
def send_message(self, can_id, data):
self.data = data
class TestEmcyProducer(unittest.TestCase):
def test_send(self):
network = MockNetwork()
emcy_node = emcy.EmcyProducer(0x80 + 1)
emcy_node.network = network
emcy_node.send(0x2001, 0x2, b'\x00\x01\x02\x03\x04')
self.assertEqual(network.data, b'\x01\x20\x02\x00\x01\x02\x03\x04')
| mit | 6,203,207,328,669,061,000 | 35.262295 | 83 | 0.65642 | false |
Froff/TFY4115-Simulering | python/Simulation.py | 1 | 1185 | from math import sqrt
import Slope
class Simulation:
SIM_STEP_SIZE = 0.0001
const_g = -981
def __init__ (self, slope, **kwargs):
self.slope = slope
self.t = [0]
self.x = [Simulation.SIM_STEP_SIZE]
self.mom_inertia_coefficient = 0
for name, value in kwargs.items():
if name == "startingposition":
self.x = [value]
if name == "momentofintertiacoefficient":
self.mom_inertia_coefficient = value
def runSimulation(self):
while not self.isFinished():
self.step()
def step (self):
x = self.x[-1]
dydx = self.slope.dydx(x)
y = self.slope.f(x) - self.slope.f(0)
I = self.mom_inertia_coefficient
g = Simulation.const_g
step_size = Simulation.SIM_STEP_SIZE
try:
self.x.append(x + step_size * sqrt( (2*g*y) / ( (1 + I) * (1 + dydx**2) ) ))
self.t.append(self.t[-1] + Simulation.SIM_STEP_SIZE)
except ValueError:
print("Math domain error. x={}, y={}".format(x, y))
exit(2)
def isFinished (self):
return self.x[-1] >= self.slope.end
| mit | 6,737,321,104,293,273,000 | 30.184211 | 88 | 0.533333 | false |
googleapis/googleapis-gen | google/cloud/gkehub/v1alpha2/gkehub-v1alpha2-py/google/cloud/gkehub_v1alpha2/services/gke_hub/pagers.py | 1 | 5811 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.gkehub_v1alpha2.types import membership
class ListMembershipsPager:
"""A pager for iterating through ``list_memberships`` requests.
This class thinly wraps an initial
:class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse` object, and
provides an ``__iter__`` method to iterate through its
``resources`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListMemberships`` requests and continue to iterate
through the ``resources`` field on the
corresponding responses.
All the usual :class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., membership.ListMembershipsResponse],
request: membership.ListMembershipsRequest,
response: membership.ListMembershipsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gkehub_v1alpha2.types.ListMembershipsRequest):
The initial request object.
response (google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = membership.ListMembershipsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[membership.ListMembershipsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[membership.Membership]:
for page in self.pages:
yield from page.resources
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListMembershipsAsyncPager:
"""A pager for iterating through ``list_memberships`` requests.
This class thinly wraps an initial
:class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``resources`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListMemberships`` requests and continue to iterate
through the ``resources`` field on the
corresponding responses.
All the usual :class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[membership.ListMembershipsResponse]],
request: membership.ListMembershipsRequest,
response: membership.ListMembershipsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gkehub_v1alpha2.types.ListMembershipsRequest):
The initial request object.
response (google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = membership.ListMembershipsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[membership.ListMembershipsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[membership.Membership]:
async def async_generator():
async for page in self.pages:
for response in page.resources:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| apache-2.0 | -1,709,948,911,245,092,900 | 40.507143 | 95 | 0.660816 | false |
erccarls/vectorsearch | vectorsearch/word2vec.py | 1 | 4242 | from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools
import gensim
from gensim.utils import keep_vocab_item
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType
logger = logging.getLogger(__name__)
try:
from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
class Word2Vec(gensim.models.Word2Vec):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self._stem_memory = defaultdict(set)
def most_similar(self, words={}, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words.
words : a dict where the words are the keys and the weights are the values.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
If topn is False, most_similar returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
self.init_sims()
# if isinstance(positive, string_types) and not negative:
# # allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
# positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
# positive = [
# (word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
# for word in positive
# ]
# negative = [
# (word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
# for word in negative
# ]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in words.items():
if isinstance(word, ndarray):
mean.append(weight * word)
elif word in self.vocab:
mean.append(weight * self.syn0norm[self.vocab[word].index])
all_words.add(self.vocab[word].index)
else:
Warning("word '%s' not in vocabulary" % word)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
limited = self.syn0norm if restrict_vocab is None else self.syn0norm[:restrict_vocab]
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
| apache-2.0 | -2,757,497,388,881,234,400 | 38.654206 | 116 | 0.656294 | false |
CloudBreadPaPa/azure-ml-python-seminar | code/python/ml-Iris.py | 1 | 1412 | import urllib2
# If you are using Python 3+, import urllib instead of urllib2
import json
data = {
"Inputs": {
"input1":
{
"ColumnNames": ["Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species"],
"Values": [ [ "1", "1", "1", "1", "" ], ]
}, },
"GlobalParameters": {
}
}
body = str.encode(json.dumps(data))
url = 'https://asiasoutheast.services.azureml.net/workspaces/46d0e60b05b34558827abd41f11d204f/services/acac88a083ce443789028306375ddf56/execute?api-version=2.0&details=true'
api_key = '<change here>' # Replace this with the API key for the web service
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
req = urllib2.Request(url, body, headers)
try:
response = urllib2.urlopen(req)
# If you are using Python 3+, replace urllib2 with urllib.request in the above code:
# req = urllib.request.Request(url, body, headers)
# response = urllib.request.urlopen(req)
result = response.read()
print(result)
except urllib2.HTTPError, error:
print("The request failed with status code: " + str(error.code))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(error.info())
print(json.loads(error.read()))
| mit | -7,397,852,236,911,984,000 | 30.377778 | 173 | 0.626771 | false |
schwehr/gdal-autotest2 | python/ogr/georss_test.py | 1 | 15293 | # MOE:insert #!/usr/bin/env python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Test OGR handling of GeoRSS files.
This is a rewrite of:
https://trac.osgeo.org/gdal/browser/trunk/autotest/ogr/ogr_georss.py
"""
import json
import os
import sys
import unittest
import google3
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
from autotest2.gcore import gcore_util
from autotest2.ogr import ogr_util
DRIVER = ogr_util.GEORSS_DRIVER
EXT = '.xml'
DEFAULT_LAYER_NAME = 'OGRGeoRSS'
# Values used in some of the atom tests.
ATOM_FIELD_VALUES = [
('title', 'Atom draft-07 snapshot',
ogr.OFTString), ('link_rel', 'alternate',
ogr.OFTString), ('link_type', 'text/html', ogr.OFTString),
('link_href', 'http://example.org/2005/04/02/atom',
ogr.OFTString), ('link2_rel', 'enclosure',
ogr.OFTString), ('link2_type', 'audio/mpeg',
ogr.OFTString), ('link2_length', '1337',
ogr.OFTInteger),
('link2_href', 'http://example.org/audio/ph34r_my_podcast.mp3',
ogr.OFTString), ('id', 'tag:example.org,2003:3.2397',
ogr.OFTString), ('updated', '2005/07/31 12:29:29+00',
ogr.OFTDateTime),
('published', '2003/12/13 08:29:29-04',
ogr.OFTDateTime), ('author_name', 'Mark Pilgrim',
ogr.OFTString), ('author_uri', 'http://example.org/',
ogr.OFTString),
('author_email', '[email protected]',
ogr.OFTString), ('contributor_name', 'Sam Ruby',
ogr.OFTString), ('contributor2_name', 'Joe Gregorio',
ogr.OFTString), ('content_type', 'xhtml',
ogr.OFTString),
('content_xml_lang', 'en',
ogr.OFTString), ('content_xml_base', 'http://diveintomark.org/',
ogr.OFTString)
]
def setUpModule():
ogr_util.SetupTestEnv()
def CreateField(layer, name, field_type=ogr.OFTString):
field_definition = ogr.FieldDefn(name, field_type)
layer.CreateField(field_definition)
field_definition.Destroy()
@ogr_util.SkipIfDriverMissing(DRIVER)
class OgrGeoRSSTest(ogr_util.DriverTestCase):
def setUp(self):
super(OgrGeoRSSTest, self).setUp(DRIVER, EXT)
# Helper for GeoRSS tests. Used by GeoRss1x.
def ogrGeoRssTestAtom(self, ogr_filepath):
ds = self.CheckOpen(ogr_filepath)
lyr = ds.GetLayerByIndex(0)
self.assertIsNone(lyr.GetSpatialRef())
feat = lyr.GetNextFeature()
for field_value in ATOM_FIELD_VALUES:
self.assertEquals(feat.GetFieldAsString(field_value[0]), field_value[1])
self.assertIn('<div xmlns="http://www.w3.org/1999/xhtml">',
feat.GetFieldAsString('content'))
# Helper for GeoRSS tests. Used by GeoRss2~9.
def ogrGeoRssTest(self, ogr_filepath, only_first_feature):
ds = self.CheckOpen(ogr_filepath)
lyr = ds.GetLayerByIndex(0)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
self.assertIsNotNone(lyr.GetSpatialRef())
self.assertTrue(lyr.GetSpatialRef().IsSame(srs))
self.assertNotIn('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]',
lyr.GetSpatialRef().ExportToWkt())
feat = lyr.GetNextFeature()
expected_wkt = 'POINT (2 49)'
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A point')
self.assertEquals(feat.GetFieldAsString('author'), 'Author')
self.assertEquals(feat.GetFieldAsString('link'), 'http://gdal.org')
self.assertEquals(
feat.GetFieldAsString('pubDate'), '2008/12/07 20:13:00+02')
self.assertEquals(feat.GetFieldAsString('category'), 'First category')
self.assertEquals(feat.GetFieldAsString('category_domain'), 'first_domain')
self.assertEquals(feat.GetFieldAsString('category2'), 'Second category')
self.assertEquals(
feat.GetFieldAsString('category2_domain'), 'second_domain')
feat = lyr.GetNextFeature()
expected_wkt = 'LINESTRING (2 48,2.1 48.1,2.2 48.0)'
if only_first_feature is False:
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A line')
feat = lyr.GetNextFeature()
expected_wkt = 'POLYGON ((2 50,2.1 50.1,2.2 48.1,2.1 46.1,2 50))'
if only_first_feature is False:
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A polygon')
feat = lyr.GetNextFeature()
expected_wkt = 'POLYGON ((2 49,2.0 49.5,2.2 49.5,2.2 49.0,2 49))'
if only_first_feature is False:
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A box')
# Creates a RSS 2.0 document
def ogrGeoRssCreate(self, ogr_filepath, options):
ds = self.driver.CreateDataSource(ogr_filepath, options=options)
lyr = ds.CreateLayer('georss')
lyr.CreateField(ogr.FieldDefn('title', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('author', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('link', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('pubDate', ogr.OFTDateTime))
lyr.CreateField(ogr.FieldDefn('description', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category_domain', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category2', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category2_domain', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A point')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetField('category', 'First category')
dst_feat.SetField('category_domain', 'first_domain')
dst_feat.SetField('category2', 'Second category')
dst_feat.SetField('category2_domain', 'second_domain')
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A line')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(
ogr.CreateGeometryFromWkt('LINESTRING (2 48,2.1 48.1,2.2 48.0)'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A polygon')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(
ogr.CreateGeometryFromWkt(
'POLYGON ((2 50,2.1 50.1,2.2 48.1,2.1 46.1,2 50))'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A box')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(
ogr.CreateGeometryFromWkt(
'POLYGON ((2 49,2.0 49.5,2.2 49.5,2.2 49.0,2 49))'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
ds = None
def testOgrGeorss1(self):
filepath = ogr_util.GetTestFilePath('georss/atom_rfc_sample.xml')
self.ogrGeoRssTestAtom(filepath)
def testOgrGeorss1AtomNs(self):
filepath = ogr_util.GetTestFilePath('georss/atom_rfc_sample_atom_ns.xml')
self.ogrGeoRssTestAtom(filepath)
def testOgrGeorss1bis(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test_atom.xml')
ds = self.driver.CreateDataSource(filepath, options=['FORMAT=ATOM'])
lyr = ds.CreateLayer('georss')
for field_value in ATOM_FIELD_VALUES:
lyr.CreateField(ogr.FieldDefn(field_value[0], field_value[2]))
lyr.CreateField(ogr.FieldDefn('content', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
for field_value in ATOM_FIELD_VALUES:
dst_feat.SetField(field_value[0], field_value[1])
dst_feat.SetField(
'content', '<div xmlns="http://www.w3.org/1999/xhtml">'
'<p><i>[Update: The Atom draft is finished.]</i></p></div>')
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
def testOgrGeorss1ter(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test_atom.xml')
self.ogrGeoRssTestAtom(filepath)
# Test reading a RSS 2.0 document with GeoRSS simple geometries
def testOgrGeorss2(self):
filepath = ogr_util.GetTestFilePath('georss/test_georss_simple.xml')
self.ogrGeoRssTest(filepath, False)
# Test reading a RSS 2.0 document with GeoRSS GML geometries
def testOgrGeorss3(self):
filepath = ogr_util.GetTestFilePath('georss/test_georss_gml.xml')
self.ogrGeoRssTest(filepath, False)
# Test writing a RSS 2.0 document in Simple dialect
# (doesn't need read support)
def testOgrGeorss4and5(self):
filepath = ogr_util.GetTestFilePath('/vsimem/ogr_georss_4.xml')
with gcore_util.GdalUnlinkWhenDone(filepath):
self.ogrGeoRssCreate(filepath, [])
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 5.
self.ogrGeoRssTest(filepath, False)
# Test writing a RSS 2.0 document in GML dialect
# (doesn't need read support)
def testOgrGeorss6and7(self):
filepath = ogr_util.GetTestFilePath('/vsimem/ogr_georss_6.xml')
with gcore_util.GdalUnlinkWhenDone(filepath):
self.ogrGeoRssCreate(filepath, ['GEOM_DIALECT=GML'])
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 7.
self.ogrGeoRssTest(filepath, False)
# Test writing a RSS 2.0 document in W3C Geo dialect
# (doesn't need read support)
def testOgrGeorss8and9(self):
filepath = ogr_util.GetTestFilePath('/vsimem/ogr_georss_8.xml')
with gcore_util.GdalUnlinkWhenDone(filepath):
self.ogrGeoRssCreate(filepath, ['GEOM_DIALECT=W3C_GEO'])
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 9.
self.ogrGeoRssTest(filepath, True)
# Test writing a RSS 2.0 document in GML dialect with EPSG:32631
def testOgrGeorss10and11(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test32631.rss')
with gcore_util.GdalUnlinkWhenDone(filepath):
srs = osr.SpatialReference()
srs.ImportFromEPSG(32631)
ds = self.driver.CreateDataSource(filepath)
with gcore_util.GdalUnlinkWhenDone(filepath):
with gcore_util.ErrorHandler('CPLQuietErrorHandler'):
lyr = ds.CreateLayer('georss', srs=srs)
self.assertIsNone(lyr)
ds = self.driver.CreateDataSource(filepath, options=['GEOM_DIALECT=GML'])
lyr = ds.CreateLayer('georss', srs=srs)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (500000 4000000)'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
# Close the files and force a flush to the filesystem.
lyr = None
ds = None
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 11.
ds = self.CheckOpen(filepath)
lyr = ds.GetLayer(0)
srs = osr.SpatialReference()
srs.ImportFromEPSG(32631)
self.assertIsNotNone(lyr.GetSpatialRef())
self.assertTrue(lyr.GetSpatialRef().IsSame(srs))
self.assertIn('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]',
lyr.GetSpatialRef().ExportToWkt())
feat = lyr.GetNextFeature()
expected_wkt = 'POINT (500000 4000000)'
self.assertEqual(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
# TODO(b/71817518): ogr_georss_12
def testOgrGeorss13and14(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test32631.rss')
with gcore_util.GdalUnlinkWhenDone(filepath):
ds = self.driver.CreateDataSource(
filepath, options=['USE_EXTENSIONS=YES'])
lyr = ds.CreateLayer('georss')
lyr.CreateField(ogr.FieldDefn('myns_field', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('field2', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('ogr_field3', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('myns_field', 'val')
dst_feat.SetField('field2', 'val2')
dst_feat.SetField('ogr_field3', 'val3')
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
ds = None
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 14.
ds = self.CheckOpen(filepath)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
self.assertEquals(feat.GetFieldAsString('myns_field'), 'val')
self.assertEquals(feat.GetFieldAsString('ogr_field2'), 'val2')
self.assertEquals(feat.GetFieldAsString('ogr_field3'), 'val3')
# ogr_georss_15 redundant as all temp files were tested with in memory file.
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -1,856,050,396,164,473,900 | 37.716456 | 81 | 0.678153 | false |
wisechengyi/pants | src/python/pants/util/collections.py | 1 | 3201 | # Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import collections
import collections.abc
from typing import Any, Callable, DefaultDict, Iterable, List, MutableMapping, Type, TypeVar, Union
_K = TypeVar("_K")
_V = TypeVar("_V")
def factory_dict(value_factory: Callable[[_K], _V], *args, **kwargs) -> DefaultDict:
"""A dict whose values are computed by `value_factory` when a `__getitem__` key is missing.
Note that values retrieved by any other method will not be lazily computed; eg: via `get`.
:param value_factory:
:param *args: Any positional args to pass through to `dict`.
:param **kwrags: Any kwargs to pass through to `dict`.
"""
class FactoryDict(collections.defaultdict):
@staticmethod
def __never_called():
raise AssertionError(
"The default factory should never be called since we override " "__missing__."
)
def __init__(self):
super().__init__(self.__never_called, *args, **kwargs)
def __missing__(self, key):
value = value_factory(key)
self[key] = value
return value
return FactoryDict()
def recursively_update(d: MutableMapping, d2: MutableMapping) -> None:
"""dict.update but which merges child dicts (dict2 takes precedence where there's conflict)."""
for k, v in d2.items():
if k in d:
if isinstance(v, dict):
recursively_update(d[k], v)
continue
d[k] = v
_T = TypeVar("_T")
def assert_single_element(iterable: Iterable[_T]) -> _T:
"""Get the single element of `iterable`, or raise an error.
:raise: :class:`StopIteration` if there is no element.
:raise: :class:`ValueError` if there is more than one element.
"""
it = iter(iterable)
first_item = next(it)
try:
next(it)
except StopIteration:
return first_item
raise ValueError(f"iterable {iterable!r} has more than one element.")
def ensure_list(val: Union[Any, Iterable[Any]], *, expected_type: Type[_T]) -> List[_T]:
"""Given either a single value or an iterable of values, always return a list.
This performs runtime type checking to ensure that every element of the list is the expected
type.
"""
if isinstance(val, expected_type):
return [val]
if not isinstance(val, collections.abc.Iterable):
raise ValueError(
f"The value {val} (type {type(val)}) did not have the expected type {expected_type} "
"nor was it an iterable."
)
result: List[_T] = []
for i, x in enumerate(val):
if not isinstance(x, expected_type):
raise ValueError(
f"Not all elements of the iterable have type {expected_type}. Encountered the "
f"element {x} of type {type(x)} at index {i}."
)
result.append(x)
return result
def ensure_str_list(val: Union[str, Iterable[str]]) -> List[str]:
"""Given either a single string or an iterable of strings, always return a list."""
return ensure_list(val, expected_type=str)
| apache-2.0 | 1,141,446,506,871,677,600 | 32.34375 | 99 | 0.621993 | false |
uclouvain/osis | base/tests/views/learning_units/external/test_update.py | 1 | 4921 | ############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
############################################################################
from django.contrib.messages import get_messages, SUCCESS
from django.test import TestCase
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from waffle.testutils import override_flag
from base.models.enums.entity_type import FACULTY
from base.models.enums.learning_container_year_types import EXTERNAL
from base.models.enums.organization_type import MAIN
from base.tests.factories.academic_calendar import generate_learning_unit_edition_calendars
from base.tests.factories.academic_year import create_current_academic_year
from base.tests.factories.entity import EntityWithVersionFactory
from base.tests.factories.external_learning_unit_year import ExternalLearningUnitYearFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFullFactory
from base.tests.factories.person import PersonFactory
from base.tests.forms.test_external_learning_unit import get_valid_external_learning_unit_form_data
from base.views.learning_units.update import update_learning_unit
from learning_unit.tests.factories.central_manager import CentralManagerFactory
@override_flag('learning_unit_update', active=True)
class TestUpdateExternalLearningUnitView(TestCase):
@classmethod
def setUpTestData(cls):
cls.entity = EntityWithVersionFactory(organization__type=MAIN, version__entity_type=FACULTY)
cls.manager = CentralManagerFactory(entity=cls.entity, with_child=True)
cls.person = cls.manager.person
cls.academic_year = create_current_academic_year()
generate_learning_unit_edition_calendars([cls.academic_year])
cls.luy = LearningUnitYearFullFactory(
academic_year=cls.academic_year,
internship_subtype=None,
acronym="EFAC1000",
learning_container_year__container_type=EXTERNAL,
learning_container_year__requirement_entity=cls.entity,
learning_container_year__allocation_entity=cls.entity,
)
cls.data = get_valid_external_learning_unit_form_data(cls.academic_year, cls.luy, cls.entity)
cls.url = reverse(update_learning_unit, args=[cls.luy.pk])
def setUp(self):
self.external = ExternalLearningUnitYearFactory(learning_unit_year=self.luy)
self.client.force_login(self.person.user)
def test_update_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_update_get_permission_denied(self):
self.client.force_login(PersonFactory().user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_update_post(self):
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.level for m in get_messages(response.wsgi_request)]
self.assertEqual(messages, [SUCCESS])
def test_update_message_with_report(self):
self.data['postponement'] = "1"
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.message for m in get_messages(response.wsgi_request)]
self.assertEqual(messages[0], _("The learning unit has been updated (with report)."))
def test_update_message_without_report(self):
self.data['postponement'] = "0"
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.message for m in get_messages(response.wsgi_request)]
self.assertEqual(messages[0], _("The learning unit has been updated (without report)."))
| agpl-3.0 | -6,383,782,094,576,534,000 | 47.712871 | 101 | 0.710569 | false |
devdelay/home-assistant | homeassistant/util/__init__.py | 1 | 13534 | """Helper methods for various modules."""
from collections.abc import MutableSet
from itertools import chain
import threading
import queue
from datetime import datetime
import re
import enum
import socket
import random
import string
from functools import wraps
from types import MappingProxyType
from typing import Any, Sequence
from .dt import as_local, utcnow
RE_SANITIZE_FILENAME = re.compile(r'(~|\.\.|/|\\)')
RE_SANITIZE_PATH = re.compile(r'(~|\.(\.)+)')
RE_SLUGIFY = re.compile(r'[^a-z0-9_]+')
def sanitize_filename(filename):
r"""Sanitize a filename by removing .. / and \\."""
return RE_SANITIZE_FILENAME.sub("", filename)
def sanitize_path(path):
"""Sanitize a path by removing ~ and .."""
return RE_SANITIZE_PATH.sub("", path)
def slugify(text: str) -> str:
"""Slugify a given text."""
text = text.lower().replace(" ", "_")
return RE_SLUGIFY.sub("", text)
def repr_helper(inp: Any) -> str:
"""Help creating a more readable string representation of objects."""
if isinstance(inp, (dict, MappingProxyType)):
return ", ".join(
repr_helper(key)+"="+repr_helper(item) for key, item
in inp.items())
elif isinstance(inp, datetime):
return as_local(inp).isoformat()
else:
return str(inp)
def convert(value, to_type, default=None):
"""Convert value to to_type, returns default if fails."""
try:
return default if value is None else to_type(value)
except (ValueError, TypeError):
# If value could not be converted
return default
def ensure_unique_string(preferred_string: str,
current_strings: Sequence[str]) -> str:
"""Return a string that is not present in current_strings.
If preferred string exists will append _2, _3, ..
"""
test_string = preferred_string
current_strings_set = set(current_strings)
tries = 1
while test_string in current_strings_set:
tries += 1
test_string = "{}_{}".format(preferred_string, tries)
return test_string
# Taken from: http://stackoverflow.com/a/11735897
def get_local_ip():
"""Try to determine the local IP address of the machine."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use Google Public DNS server to determine own IP
sock.connect(('8.8.8.8', 80))
return sock.getsockname()[0]
except socket.error:
return socket.gethostbyname(socket.gethostname())
finally:
sock.close()
# Taken from http://stackoverflow.com/a/23728630
def get_random_string(length=10):
"""Return a random string with letters and digits."""
generator = random.SystemRandom()
source_chars = string.ascii_letters + string.digits
return ''.join(generator.choice(source_chars) for _ in range(length))
class OrderedEnum(enum.Enum):
"""Taken from Python 3.4.0 docs."""
# pylint: disable=no-init, too-few-public-methods
def __ge__(self, other):
"""Return the greater than element."""
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
"""Return the greater element."""
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
"""Return the lower than element."""
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
"""Return the lower element."""
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
class OrderedSet(MutableSet):
"""Ordered set taken from http://code.activestate.com/recipes/576694/."""
def __init__(self, iterable=None):
"""Initialize the set."""
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
"""Return the length of the set."""
return len(self.map)
def __contains__(self, key):
"""Check if key is in set."""
return key in self.map
def add(self, key):
"""Add an element to the end of the set."""
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def promote(self, key):
"""Promote element to beginning of the set, add if not there."""
if key in self.map:
self.discard(key)
begin = self.end[2]
curr = begin[1]
curr[2] = begin[1] = self.map[key] = [key, curr, begin]
def discard(self, key):
"""Discard an element from the set."""
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
"""Iteration of the set."""
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
"""Reverse the ordering."""
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True): # pylint: disable=arguments-differ
"""Pop element of the end of the set.
Set last=False to pop from the beginning.
"""
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def update(self, *args):
"""Add elements from args to the set."""
for item in chain(*args):
self.add(item)
def __repr__(self):
"""Return the representation."""
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
"""Return the comparision."""
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class Throttle(object):
"""A class for throttling the execution of tasks.
This method decorator adds a cooldown to a method to prevent it from being
called more then 1 time within the timedelta interval `min_time` after it
returned its result.
Calling a method a second time during the interval will return None.
Pass keyword argument `no_throttle=True` to the wrapped method to make
the call not throttled.
Decorator takes in an optional second timedelta interval to throttle the
'no_throttle' calls.
Adds a datetime attribute `last_call` to the method.
"""
# pylint: disable=too-few-public-methods
def __init__(self, min_time, limit_no_throttle=None):
"""Initialize the throttle."""
self.min_time = min_time
self.limit_no_throttle = limit_no_throttle
def __call__(self, method):
"""Caller for the throttle."""
if self.limit_no_throttle is not None:
method = Throttle(self.limit_no_throttle)(method)
# Different methods that can be passed in:
# - a function
# - an unbound function on a class
# - a method (bound function on a class)
# We want to be able to differentiate between function and unbound
# methods (which are considered functions).
# All methods have the classname in their qualname seperated by a '.'
# Functions have a '.' in their qualname if defined inline, but will
# be prefixed by '.<locals>.' so we strip that out.
is_func = (not hasattr(method, '__self__') and
'.' not in method.__qualname__.split('.<locals>.')[-1])
@wraps(method)
def wrapper(*args, **kwargs):
"""Wrapper that allows wrapped to be called only once per min_time.
If we cannot acquire the lock, it is running so return None.
"""
# pylint: disable=protected-access
if hasattr(method, '__self__'):
host = method.__self__
elif is_func:
host = wrapper
else:
host = args[0] if args else wrapper
if not hasattr(host, '_throttle'):
host._throttle = {}
if id(self) not in host._throttle:
host._throttle[id(self)] = [threading.Lock(), None]
throttle = host._throttle[id(self)]
if not throttle[0].acquire(False):
return None
# Check if method is never called or no_throttle is given
force = not throttle[1] or kwargs.pop('no_throttle', False)
try:
if force or utcnow() - throttle[1] > self.min_time:
result = method(*args, **kwargs)
throttle[1] = utcnow()
return result
else:
return None
finally:
throttle[0].release()
return wrapper
class ThreadPool(object):
"""A priority queue-based thread pool."""
# pylint: disable=too-many-instance-attributes
def __init__(self, job_handler, worker_count=0, busy_callback=None):
"""Initialize the pool.
job_handler: method to be called from worker thread to handle job
worker_count: number of threads to run that handle jobs
busy_callback: method to be called when queue gets too big.
Parameters: worker_count, list of current_jobs,
pending_jobs_count
"""
self._job_handler = job_handler
self._busy_callback = busy_callback
self.worker_count = 0
self.busy_warning_limit = 0
self._work_queue = queue.PriorityQueue()
self.current_jobs = []
self._lock = threading.RLock()
self._quit_task = object()
self.running = True
for _ in range(worker_count):
self.add_worker()
def add_worker(self):
"""Add worker to the thread pool and reset warning limit."""
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
worker = threading.Thread(
target=self._worker,
name='ThreadPool Worker {}'.format(self.worker_count))
worker.daemon = True
worker.start()
self.worker_count += 1
self.busy_warning_limit = self.worker_count * 3
def remove_worker(self):
"""Remove worker from the thread pool and reset warning limit."""
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
self._work_queue.put(PriorityQueueItem(0, self._quit_task))
self.worker_count -= 1
self.busy_warning_limit = self.worker_count * 3
def add_job(self, priority, job):
"""Add a job to the queue."""
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
self._work_queue.put(PriorityQueueItem(priority, job))
# Check if our queue is getting too big.
if self._work_queue.qsize() > self.busy_warning_limit \
and self._busy_callback is not None:
# Increase limit we will issue next warning.
self.busy_warning_limit *= 2
self._busy_callback(
self.worker_count, self.current_jobs,
self._work_queue.qsize())
def block_till_done(self):
"""Block till current work is done."""
self._work_queue.join()
def stop(self):
"""Finish all the jobs and stops all the threads."""
self.block_till_done()
with self._lock:
if not self.running:
return
# Tell the workers to quit
for _ in range(self.worker_count):
self.remove_worker()
self.running = False
# Wait till all workers have quit
self.block_till_done()
def _worker(self):
"""Handle jobs for the thread pool."""
while True:
# Get new item from work_queue
job = self._work_queue.get().item
if job is self._quit_task:
self._work_queue.task_done()
return
# Add to current running jobs
job_log = (utcnow(), job)
self.current_jobs.append(job_log)
# Do the job
self._job_handler(job)
# Remove from current running job
self.current_jobs.remove(job_log)
# Tell work_queue the task is done
self._work_queue.task_done()
class PriorityQueueItem(object):
"""Holds a priority and a value. Used within PriorityQueue."""
# pylint: disable=too-few-public-methods
def __init__(self, priority, item):
"""Initialize the queue."""
self.priority = priority
self.item = item
def __lt__(self, other):
"""Return the ordering."""
return self.priority < other.priority
| mit | -2,104,050,902,340,730,000 | 30.328704 | 79 | 0.570637 | false |
bxlab/bx-python | lib/bx/align/epo.py | 1 | 11523 | """Classes and utilities for mutliple alignments from the EPO pipeline"""
import logging
import os
import pickle as cPickle
import re
from collections import namedtuple
from ._epo import ( # noqa: F401
bed_union,
cummulative_intervals,
fastLoadChain,
rem_dash
)
log = logging.getLogger(__name__)
class Chain(namedtuple('Chain', 'score tName tSize tStrand tStart tEnd qName qSize qStrand qStart qEnd id')):
"""A Chain header as in http://genome.ucsc.edu/goldenPath/help/chain.html
chain coordinates are with respect to the strand, so for example tStart on the + strand is the
distance from the leftmost position; tStart on the - strand is the distance from the rightmost position."""
__slots__ = ()
def __str__(self):
return "chain {score} {tName} {tSize} {tStrand} {tStart} {tEnd} {qName} {qSize} {qStrand} {qStart} {qEnd} {id}".format(**self._asdict())
@classmethod
def _strfactory(cls, line):
"""factory class method for Chain
:param line: header of a chain (in .chain format)
"""
assert isinstance(line, str), "this is a factory from string"
line = line.rstrip().split()[1:] # the first component is the keyword "chain"
tup = [t[0](t[1]) for t in zip([int, str, int, str, int, int, str, int, str, int, int, str], line)]
return tuple.__new__(cls, tup)
@classmethod
def _make_from_epo(cls, trg_comp, qr_comp, trg_chrom_sizes, qr_chrom_sizes):
"""crate a chain of collinear rings from the given components.
The target of the chain will always be on the forward strand.
This is done to avoid confusion when mapping psl files. So,
if trg_comp.strand=-, qr_comp.strand=- (resp. +) the
chain header will have tStrand=+, qStrand=+ (resp. -). No strand
changes on the other cases.
:param trg_comp: target (i.e, the first) component
:type trg_comp: L{EPOitem}
:param qr_comp: query (i.e, the second) component
:type qr_comp: L{EPOitem}
:param trg_chrom_sizes: chromosome sizes of the target
:type trg_chrom_sizes: dictionary of the type (chrom) --> size
:param qr_chrom_sizes: chromosome sizes of the query
:type qr_chrom_sizes: dictionary of the type (chrom) --> size
:return: A L{Chain} instance"""
# size, target, query arrays
S, T, Q = [], [], []
# the target strand of the chain must be on the forward strand
trg_intervals = trg_comp.intervals(reverse=trg_comp.strand == '-')
qr_intervals = qr_comp.intervals(reverse=trg_comp.strand == '-')
if len(trg_intervals) == 0 or len(qr_intervals) == 0:
log.warning("deletion/insertion only intervals")
return None
A, B = rem_dash(trg_intervals, qr_intervals)
# correct for when cigar starts/ends with dashes (in number of bases)
tr_start_correction = max(B[0][0] - A[0][0], 0)
tr_end_correction = max(A[-1][1] - B[-1][1], 0)
qr_start_correction = max(A[0][0] - B[0][0], 0)
qr_end_correction = max(B[-1][1] - A[-1][1], 0)
a, b = A.pop(0), B.pop(0)
# intervals are 0-base, halfo-open => lengths = coordinate difference
while A or B:
if a[1] < b[1]:
T.append(0)
Q.append(A[0][0] - a[1])
S.append(min(a[1], b[1]) - max(a[0], b[0]))
a = A.pop(0)
elif b[1] < a[1]:
Q.append(0)
T.append(B[0][0] - b[1])
S.append(min(a[1], b[1]) - max(a[0], b[0]))
b = B.pop(0)
elif A and B:
assert 1 > 2, "there are dash columns"
else:
break
S.append(min(a[1], b[1]) - max(a[0], b[0]))
assert len(T) == len(Q) == len(S) - 1, "(S, T, Q) = (%d, %d, %d)" % tuple(map(len, (S, T, Q)))
tSize = trg_chrom_sizes[trg_comp.chrom]
qSize = qr_chrom_sizes[qr_comp.chrom]
# UCSC coordinates are 0-based, half-open and e! coordinates are 1-base, closed
# chain_start = epo_start - 1 and chain_end = epo_end
if qr_comp.strand == '+':
chain = Chain(
0, trg_comp.chrom, tSize, "+",
(trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction,
qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'),
(qr_comp.start - 1) + qr_start_correction, qr_comp.end - qr_end_correction,
qr_comp.gabid)
else:
chain = Chain(
0, trg_comp.chrom, tSize, "+",
(trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction,
qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'),
(qr_comp.start - 1) + qr_end_correction, qr_comp.end - qr_start_correction,
qr_comp.gabid)
# strand correction. in UCSC coordinates this is: size - coord
if chain.qStrand == '-':
chain = chain._replace(
qEnd=chain.qSize - chain.qStart,
qStart=chain.qSize - chain.qEnd)
assert chain.tEnd - chain.tStart == sum(S) + sum(T), "[%s] %d != %d" % (
str(chain), chain.tEnd - chain.tStart, sum(S) + sum(T))
assert chain.qEnd - chain.qStart == sum(S) + sum(Q), "[%s] %d != %d" % (
str(chain), chain.qEnd - chain.qStart, sum(S) + sum(Q))
return chain, S, T, Q
def slice(self, who):
"return the slice entry (in a bed6 format), AS IS in the chain header"
assert who in ('t', 'q'), "who should be 't' or 'q'"
if who == 't':
return (self.tName, self.tStart, self.tEnd, self.id, self.score, self.tStrand)
else:
return (self.qName, self.qStart, self.qEnd, self.id, self.score, self.qStrand)
def bedInterval(self, who):
"return a BED6 entry, thus DOES coordinate conversion for minus strands"
if who == 't':
st, en = self.tStart, self.tEnd
if self.tStrand == '-':
st, en = self.tSize-en, self.tSize-st
return (self.tName, st, en, self.id, self.score, self.tStrand)
else:
st, en = self.qStart, self.qEnd
if self.qStrand == '-':
st, en = self.qSize-en, self.qSize-st
assert en-st == self.qEnd - self.qStart
return (self.qName, st, en, self.id, self.score, self.qStrand)
@classmethod
def _parse_file(cls, path, pickle=False):
"""parse a .chain file into a list of the type [(L{Chain}, arr, arr, arr) ...]
:param fname: name of the file"""
fname = path
if fname.endswith(".gz"):
fname = path[:-3]
if fname.endswith('.pkl'):
# you asked for the pickled file. I'll give it to you
log.debug("loading pickled file %s ...", fname)
with open(fname, "rb") as f:
return cPickle.load(f)
elif os.path.isfile("%s.pkl" % fname):
# there is a cached version I can give to you
log.info("loading pickled file %s.pkl ...", fname)
if os.stat(path).st_mtime > os.stat("%s.pkl" % fname).st_mtime:
log.critical("*** pickled file %s.pkl is not up to date ***", fname)
try:
with open("%s.pkl" % fname, "rb") as f:
return cPickle.load(f)
except Exception:
log.warning("Loading pickled file %s.pkl failed", fname)
data = fastLoadChain(path, cls._strfactory)
if pickle and not os.path.isfile('%s.pkl' % fname):
log.info("pickling to %s.pkl", fname)
with open('%s.pkl' % fname, 'wb') as f:
cPickle.dump(data, f)
return data
class EPOitem(namedtuple('Epo_item', 'species gabid chrom start end strand cigar')):
"this format is how alignments are delivered from e!"
__slots__ = ()
cigar_pattern = re.compile(r"(\d*)([MD])")
def __repr__(self):
return str(self)
def __str__(self):
c = self.cigar[:5] + "..." + self.cigar[-5:]
return "(%s %s %s %d %d %s %s)" % tuple(self[:6] + (c,))
@classmethod
def _strfactory(cls, line):
"""factory method for an EPOitem
:param line: a line of input"""
cmp = line.rstrip().split()
chrom = cmp[2]
if not chrom.startswith("chr"):
chrom = "chr%s" % chrom
instance = tuple.__new__(
cls,
(cmp[0], cmp[1], chrom, int(cmp[3]), int(cmp[4]), {'1': '+', '-1': '-'}[cmp[5]], cmp[6]))
span = instance.end - instance.start + 1
m_num = sum((t[1] == "M" and [t[0]] or [0])[0] for t in instance.cigar_iter(False))
if span != m_num:
log.warning("[{gabid}] {species}.{chrom}:{start}-{end}.".format(**instance._asdict()) + "(span) %d != %d (matches)" % (span, m_num))
return None
return instance
@classmethod
def _parse_epo(cls, fname):
"""Load an entire file in the EPO format into a dictionary of the type {gab_id => [Epoitem, ...]}
:param fname: file name"""
data = {}
with open(fname) as fd:
for el in (cls._strfactory(_) for _ in fd):
if el:
data.setdefault(el.gabid, []).append(el)
log.info("parsed %d elements from %s", len(data), fname)
return data
def cigar_iter(self, reverse):
"""self.cigar => [(length, type) ... ] iterate the cigar
:param reverse: whether to iterate in the reverse direction (right-to-left)
:type reverse: boolean
:return a list of pairs of the type [(length, M/D) ..]
"""
l = 0
P = self.cigar_pattern
data = []
cigar = self.cigar
parsed_cigar = re.findall(P, cigar)
if reverse:
parsed_cigar = parsed_cigar[::-1]
for _l, t in parsed_cigar:
# 1M is encoded as M
l = (_l and int(_l) or 1) # int(_l) cannot be 0
data.append((l, t))
return data
def intervals(self, reverse, thr=0):
"""return a list of (0-based half-open) intervals representing the match regions of the cigar
for example 4MD4M2DM with reverse=False will produce [(0,4), (5,9), (11,12)]
4MD4M2DM with reverse=True will produce [(0,1), (3,7), (8,12)] (= 12 - previous interval)
:param reverse: whether to iterate in the reverse direction (right-to-left) (this is passed as is to self.cigar_iter)
:type reverse: boolean
:param thr: shift all intervals by this much
:type thr: integer
:return: list of pairs"""
d = [(thr, thr)]
dl = 0
for tup in self.cigar_iter(reverse):
if tup[1] == "D":
dl = tup[0]
else:
s = d[-1][1] + dl
d.append((s, s+tup[0]))
assert d[0] == (thr, thr)
# assert that nr. of Ms in the interval == sum of produced intervals
assert sum(t[0] for t in self.cigar_iter(False) if t[1] == "M") == sum(t[1]-t[0] for t in d)
d_sum = sum(t[1]-t[0] for t in d)
assert self.end - self.start + 1 == d_sum, "[ (%d, %d) = %d ] != %d" % (
self.start, self.end, self.end-self.start+1, d_sum)
return d[1:] # clip the (thr, thr) entry
| mit | 7,633,953,274,690,669,000 | 38.462329 | 144 | 0.540484 | false |
Arcensoth/cogbot | cogbot/cogs/join_leave/join_leave_server_state.py | 1 | 2346 | from discord import Member, Role
from discord.ext.commands import Context
from cogbot.cogs.abc.base_cog import BaseCogServerState
from cogbot.cogs.join_leave.join_leave_options import JoinLeaveOptions
class JoinLeaveServerState(BaseCogServerState[JoinLeaveOptions]):
async def create_options(self) -> JoinLeaveOptions:
return await JoinLeaveOptions().init(self, self.raw_options)
async def join_role(self, ctx: Context, author: Member, role_alias: str):
try:
role_entry = self.options.role_entry_from_alias[role_alias.lower()]
role = self.bot.get_role(self.server, role_entry.role_id)
await self.bot.add_roles(author, role)
await self.bot.say(f"{author.mention} has joined {role}")
except:
self.log.info(f"{author} failed to join the role: {role_alias}")
await self.bot.react_question(ctx)
async def leave_role(self, ctx: Context, author: Member, role_alias: str):
try:
role_entry = self.options.role_entry_from_alias[role_alias]
role = self.bot.get_role(self.server, role_entry.role_id)
await self.bot.remove_roles(author, role)
await self.bot.say(f"{author.mention} has left {role}")
except:
self.log.info(f"{author} failed to leave the role: {role_alias}")
await self.bot.react_question(ctx)
async def list_roles(self, ctx: Context, author: Member):
role_lines = []
for role_entry in self.options.role_entries:
role: Role = self.bot.get_role(self.server, role_entry.role_id)
role_lines.append(f"{role}")
role_aliases = role_entry.aliases
first_role_alias = role_aliases[0]
other_role_aliases = role_aliases[1:]
role_aliases_line = f" >join {first_role_alias}"
if other_role_aliases:
other_role_aliases_str = " or ".join(
f'"{role_alias}"' for role_alias in other_role_aliases
)
role_aliases_line = f"{role_aliases_line} (or {other_role_aliases_str})"
role_lines.append(role_aliases_line)
roles_str = "\n".join(role_lines)
await self.bot.say(
f"{author.mention} Available self-assignable roles:\n```\n{roles_str}\n```"
)
| mit | 4,599,399,970,453,194,000 | 45.92 | 88 | 0.6185 | false |
Rahulsharma0810/Scrapy-Python-TheHinduDailyNews | TheHinduDailyNews/settings.py | 1 | 3227 | # -*- coding: utf-8 -*-
# Scrapy settings for TheHinduDailyNews project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'TheHinduDailyNews'
SPIDER_MODULES = ['TheHinduDailyNews.spiders']
NEWSPIDER_MODULE = 'TheHinduDailyNews.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'TheHinduDailyNews (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'TheHinduDailyNews.middlewares.ThehindudailynewsSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'TheHinduDailyNews.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'TheHinduDailyNews.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| mit | -533,306,544,020,083,650 | 34.855556 | 109 | 0.771615 | false |
mypinballs/whirlwind | effects.py | 1 | 8263 | # Top Rollover Lanes
__author__="jim"
__date__ ="$Jan 18, 2011 1:36:37 PM$"
import procgame
import locale
from procgame import *
base_path = config.value_for_key_path('base_path')
game_path = base_path+"games/whirlwind/"
class Effects(game.Mode):
def __init__(self, game, priority):
super(Effects, self).__init__(game, priority)
def drive_lamp(self, lamp_name, style='on',time=2):
if style == 'slow':
self.game.lamps[lamp_name].schedule(schedule=0x00ff00ff, cycle_seconds=0, now=True)
elif style == 'medium':
self.game.lamps[lamp_name].schedule(schedule=0x0f0f0f0f, cycle_seconds=0, now=True)
elif style == 'fast':
self.game.lamps[lamp_name].schedule(schedule=0x99999999, cycle_seconds=0, now=True)
elif style == 'superfast':
self.game.lamps[lamp_name].schedule(schedule=0xaaaaaaaa, cycle_seconds=0, now=True)
elif style == 'on':
self.game.lamps[lamp_name].enable()
elif style == 'off':
self.off(lamp_name)
elif style == 'smarton':
self.game.lamps[lamp_name].schedule(schedule=0xaaaaaaaa, cycle_seconds=0, now=True)
self.cancel_delayed(lamp_name+'_on')
self.delay(name=lamp_name+'_on', event_type=None, delay=0.6, handler=self.game.lamps[lamp_name].enable)
elif style == 'timedon':
self.game.lamps[lamp_name].enable()
self.cancel_delayed(lamp_name+'_off')
self.delay(name=lamp_name+'_off', event_type=None, delay=time, handler=self.off,param=lamp_name)
elif style == 'timeout':
if time>10:
self.cancel_delayed(lamp_name+'_medium')
self.delay(name=lamp_name+'_medium', event_type=None, delay=time-10, handler=lambda:self.drive_lamp(lamp_name,'medium'))
if time>5:
self.cancel_delayed(lamp_name+'_fast')
self.delay(name=lamp_name+'_fast', event_type=None, delay=time-5, handler=lambda:self.drive_lamp(lamp_name,'fast'))
if time>1:
self.cancel_delayed(lamp_name+'_superfast')
self.delay(name=lamp_name+'_superfast', event_type=None, delay=time-1, handler=lambda:self.drive_lamp(lamp_name,'superfast'))
self.delay(name=lamp_name+'_off', event_type=None, delay=time, handler=self.off,param=lamp_name)
def clear_lamp_timers(self,lamp_name):
self.cancel_delayed(lamp_name+'_medium')
self.cancel_delayed(lamp_name+'_fast')
self.cancel_delayed(lamp_name+'_superfast')
self.cancel_delayed(lamp_name+'on')
self.cancel_delayed(lamp_name+'_off')
def off(self,lamp_name):
self.clear_lamp_timers(lamp_name)
self.game.lamps[lamp_name].disable()
# def drive_super_fast(self, lamp_name):
# self.game.lamps[lamp_name].schedule(schedule=0x99999999, cycle_seconds=0, now=True)
#
# def drive_fast(self, lamp_name):
# self.game.lamps[lamp_name].schedule(schedule=0x55555555, cycle_seconds=0, now=True)
#
# def drive_medium(self, lamp_name):
# self.game.lamps[lamp_name].schedule(schedule=0x0f0f0f0f, cycle_seconds=0, now=True)
def drive_flasher(self, data, style='medium',cycle=0,time=2):
if isinstance(data, basestring):
flasher_name=data
else:
flasher_name=data[0]
style = data[1]
time = data[2]
if style == 'slow':
self.game.coils[flasher_name].schedule(schedule=0x00003000, cycle_seconds=cycle, now=True)
elif style == 'medium':
self.game.coils[flasher_name].schedule(schedule=0x30003000, cycle_seconds=cycle, now=True)
elif style == 'fast':
self.game.coils[flasher_name].schedule(schedule=0x11111111, cycle_seconds=cycle, now=True)
elif style == 'super':
self.game.coils[flasher_name].schedule(schedule=0x55555555, cycle_seconds=cycle, now=True)
elif style == 'super2':
self.game.coils[flasher_name].schedule(schedule=0x55055055, cycle_seconds=cycle, now=True)
elif style == 'strobe':
self.game.coils[flasher_name].schedule(schedule=0xeeeeeeee, cycle_seconds=cycle, now=True)
elif style == 'chaos':
self.game.coils[flasher_name].schedule(schedule=0x019930AB, cycle_seconds=cycle, now=True)
elif style == 'fade':
self.game.coils[flasher_name].schedule(schedule=0xAAA99933, cycle_seconds=cycle, now=True)
if time>0:
self.delay(name=flasher_name+'_off', event_type=None, delay=time, handler=self.game.coils[flasher_name].disable)
# def strobe_flasher_set(self,flasher_list,time=0.5):
# timer = 0
# for fname in flasher_list:
# self.delay(name=fname+'strobe', event_type=None, delay=timer, handler=self.drive_flasher, param=[fname,'fast',time])
# timer+=time
def strobe_flasher_set(self,flasher_list,time=1,overlap=0.2,repeats=1,enable=True):
timer = 0
for i in range(repeats):
for fname in flasher_list:
if enable:
self.delay(name=fname+'strobe', event_type=None, delay=timer, handler=self.drive_flasher, param=[fname,'fast',time+overlap])
timer+=time
else:
self.cancel_delayed(fname+'strobe')
self.game.coils[fname].disable()
def strobe_controlled_flasher_set(self,flasher_list,time=0.1,overlap=0.2,repeats=1,enable=True):
timer = 0
#playfield flashers
sequence=[]
for j in range(repeats):
sequence += flasher_list
for i in range(len(sequence)):
def flash(i,time,delay):
self.delay(delay=delay,handler=lambda:self.game.switched_coils.drive(name=sequence[i],style='fast',time=time+0.1))
flash(i,time,timer)
timer+=time
def drive_led(self,lamp_name,colour):
if colour=='red':
self.led_colour_data(lamp_name,'on','off','off')
elif colour=='pink':
self.led_colour_data(lamp_name,'on','off','med')
elif colour=='magenta':
self.led_colour_data(lamp_name,'on','off','on')
elif colour=='purple':
self.led_colour_data(lamp_name,'med','off','on')
elif colour=='skyblue':
self.led_colour_data(lamp_name,'off','med','on')
elif colour=='blue':
self.led_colour_data(lamp_name,'off','off','on')
elif colour=='cyan':
self.led_colour_data(lamp_name,'off','on','on')
elif colour=='turquoise':
self.led_colour_data(lamp_name,'off','on','med')
elif colour=='green':
self.led_colour_data(lamp_name,'off','on','off')
elif colour=='limegreen':
self.led_colour_data(lamp_name,'med','on','off')
elif colour=='yellow':
self.led_colour_data(lamp_name,'on','on','off')
elif colour=='orange':
self.led_colour_data(lamp_name,'on','med','off')
elif colour=='white':
self.led_colour_data(lamp_name,'on','on','on')
elif colour=='black':
self.led_colour_data(lamp_name,'off','off','off')
def led_colour_data(self,lamp_name,red,blue,green):
data=[red,green,blue]
name=['Red','Green','Blue']
for i in range(len(data)):
if data[i]=='off':
self.game.lamps[lamp_name+name[i]].disable()
elif data[i]=='on':
self.game.lamps[lamp_name+name[i]].enable()
elif data[i]=='med':
self.game.lamps[lamp_name+name[i]].schedule(schedule=0x80808080, cycle_seconds=0, now=True)
# self.game.lamps[lamp_name+name[i]].patter()
| gpl-3.0 | 1,920,861,269,690,406,000 | 44.15847 | 148 | 0.563839 | false |
lilmuck/lilmuck | plugin.video.szenestreams/default.py | 1 | 6874 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib,urllib2,re,xbmcaddon,xbmcplugin,xbmcgui,xbmc,HTMLParser
from stream import *
htmlparser = HTMLParser.HTMLParser()
pluginhandle = int(sys.argv[1])
itemcnt = 0
baseurl = 'http://www.szene-streams.com'
settings = xbmcaddon.Addon(id='plugin.video.szene-streams')
maxitems = (int(settings.getSetting("items_per_page"))+1)*10
filterUnknownHoster = settings.getSetting("filterUnknownHoster") == 'true'
forceMovieViewMode = settings.getSetting("forceMovieViewMode") == 'true'
movieViewMode = str(settings.getSetting("movieViewMode"))
dbg = False
def CATEGORIES():
data = getUrl(baseurl)
cats = re.findall('<a[^>]*?class="CatInf"[^>]*?href="(.*?)"[^>]*?>.*?<div class="CatNumInf">(.*?)</div>[^<]*?<div[^>]*?class="CatNameInf">(.*?)</div>', data, re.S|re.I)
addDir('Letzte Updates', baseurl, 1, '', True)
addDir('Serien', baseurl + '/load', 0, '', True)
for (url, num, name) in cats:
if 'http:' not in url: url = baseurl + url
addDir(name + ' [COLOR=blue](' + num + ')[/COLOR]', url, 1, '', True)
xbmc.executebuiltin("Container.SetViewMode(400)")
def SERIES(url):
data = getUrl(url)
cats = re.findall('<a[^>]*?class="CatInf"[^>]*?href="(.*?)"[^>]*?>.*?<div class="CatNumInf">(.*?)</div>[^<]*?<div[^>]*?class="CatNameInf">(.*?)</div>', data, re.S|re.I)
addDir('Letzte Updates', baseurl + '/load/0-1', 1, '', True)
for (url, num, name) in cats:
if 'http:' not in url: url = baseurl + url
addDir(name + ' [COLOR=blue](' + num + ')[/COLOR]', url, 1, '', True)
xbmc.executebuiltin("Container.SetViewMode(400)")
def INDEX(url):
global itemcnt
nextPageUrl = re.sub('-[\d]+$', '', url)
print url
data = getUrl(url)
movies = re.findall('<div class="ImgWrapNews">[^<]*<a[^<]*<img[^>]*src="([^"]*.[jpg|png])"[^>]*alt="([^"]*)"[^>]*>.*?class="[^"]*entryLink[^"]*".*?href="([^"]*)"', data, re.S|re.I)
if movies:
for (image, title, url) in movies:
if 'http:' not in url: url = baseurl + url
addDir(clean(title), url, 2, image, True)
itemcnt = itemcnt + 1
nextPage = re.findall('<a class="swchItem"[^>]*onclick="spages\(\'(\d+)\'[^>]*?"[^>]*><span>»</span>', data, re.S)
if nextPage:
if itemcnt >= maxitems:
addDir('Weiter >>', nextPageUrl + '-' + nextPage[0], 1, '', True)
else:
INDEX(nextPageUrl + '-' + nextPage[0])
if forceMovieViewMode: xbmc.executebuiltin("Container.SetViewMode(" + movieViewMode + ")")
def VIDEOLINKS(url, image):
data = getUrl(url)
streams = []
raw = re.findall('(<fieldset[^>]*>[^<]*<legend>.*?</fieldset>)', data, re.S)
if raw:
for each in raw:
series = re.findall('<div class="spoiler"><font[^>]*><b[^>]*>(.+?)</b>(.*?)<input', each, re.S|re.I)
if not series: series = re.findall('<legend>(.+?)</legend>[^<]*<div class="spoiler">(.*?)<input', each, re.S|re.I)
if not series: series = re.findall('<legend>(.+?)</legend>.*?(<iframe.*?</iframe>|<a[^>]*href=".+"[^>]*>).*', each, re.S|re.I)
if series:
for ser in series:
for (s, n) in re.findall('<a[^>]*href="([^"]+)"[^>]*>([^<]*)<', each, re.S|re.I):
if dbg: print 'ser1'
if ser: n = clean(ser[1]) + ' ' + extractFilename(s)
n = clean(n) if n else extractFilename(s)
if n: streams += [(n, s)]
for s in re.findall('<iframe[^>]*src="([^"]*)"[^>]*>', each, re.S|re.I):
if dbg: print 'ser2'
if ser: n = clean(ser[1])
if not n: n = 'unknown'
if n: streams += [(n, s)]
elif re.match('.*?iframe.*?src.*', each, re.S|re.I):
if dbg: print 'nonser1'
streams += re.findall('<font[^>]*>.*?src=".*?/player/(.*?)\..{3}".*?<iframe.*?src=["|\'](.*?)["|\']', each, re.S|re.I)
else:
if dbg: print 'nonser2'
streams += re.findall('<font[^>]*>.*?src=".*?/player/(.*?)\..{3}".*?</font>.*?target="_blank" href=["|\'](.*?)["|\']', each, re.S|re.I)
if streams:
for (filename, stream) in streams:
hoster = get_stream_link().get_hostername(stream)
if filterUnknownHoster and hoster == 'Not Supported': continue
entry = '[COLOR=blue](' + hoster + ')[/COLOR] ' + filename
addLink(entry, clean(stream), 3, image)
def clean(s):
try: s = htmlparser.unescape(s)
except: print "could not unescape string '%s'"%(s)
s = re.sub('<[^>]*>', '', s)
s = s.replace('_', ' ')
s = re.sub('[ ]+', ' ', s)
for hit in set(re.findall("&#\d+;", s)):
try: s = s.replace(hit, unichr(int(hit[2:-1])))
except ValueError: pass
return s.strip('\n').strip()
def extractFilename(path):
path = re.sub('^.*/', '',clean(path)).replace('.html', '').replace('_', ' ')
return re.sub('\.[a-zA-Z]{3}', '', path)
def GETLINK(url):
stream_url = get_stream_link().get_stream(url)
if stream_url:
if re.match('^Error: ', stream_url, re.S|re.I):
xbmc.executebuiltin("XBMC.Notification(Fehler!, " + re.sub('^Error: ','',stream_url) + ", 4000)")
else:
listitem = xbmcgui.ListItem(path=stream_url)
return xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
def getUrl(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addLink(name, url, mode, image):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
liz = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=image)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty('IsPlayable', 'true')
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
def addDir(name, url, mode, image, is_folder=False):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&image="+urllib.quote_plus(image)
liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=is_folder)
params = get_params()
url = mode = image = None
try: url = urllib.unquote_plus(params["url"])
except: pass
try: mode = int(params["mode"])
except: pass
try: image = urllib.unquote_plus(params["image"])
except: pass
if mode==None or url==None or len(url)<1: CATEGORIES()
elif mode==0: SERIES(url)
elif mode==1: INDEX(url)
elif mode==2: VIDEOLINKS(url, image)
elif mode==3: GETLINK(url)
xbmcplugin.endOfDirectory(int(sys.argv[1])) | gpl-2.0 | -6,652,518,995,713,558,000 | 40.167665 | 181 | 0.608816 | false |
tseaver/gcloud-python | videointelligence/nox.py | 1 | 2462 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
@nox.session
def default(session):
return unit(session, 'default')
@nox.session
@nox.parametrize('py', ['2.7', '3.5', '3.6', '3.7'])
def unit(session, py):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
if py != 'default':
session.interpreter = 'python{}'.format(py)
# Set the virtualenv directory name.
session.virtualenv_dirname = 'unit-' + py
# Install all test dependencies, then install this package in-place.
session.install('pytest')
session.install('-e', '.')
# Run py.test against the unit tests.
session.run('py.test', '--quiet', os.path.join('tests', 'unit'))
# TODO: Fix generated system tests
#@nox.session
#@nox.parametrize('py', ['2.7', '3.7'])
#def system(session, py):
# """Run the system test suite."""
#
# # Sanity check: Only run system tests if the environment variable is set.
# if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
# session.skip('Credentials must be set via environment variable.')
#
# # Run unit tests against all supported versions of Python.
# session.interpreter = 'python{}'.format(py)
#
# # Set the virtualenv dirname.
# session.virtualenv_dirname = 'sys-' + py
#
# # Install all test dependencies, then install this package in-place.
# session.install('pytest')
# session.install('-e', '.')
#
# # Run py.test against the unit tests.
# session.run('py.test', '--quiet', os.path.join('tests', 'system'),
# *session.posargs)
@nox.session
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.interpreter = 'python3.6'
session.install('docutils', 'pygments')
session.run('python', 'setup.py', 'check', '--restructuredtext',
'--strict')
| apache-2.0 | -6,340,280,631,160,391,000 | 30.974026 | 78 | 0.669374 | false |
Skyeouyang/Text-Analytics-Project | lexicon analysis.py | 1 | 2398 | #######################################
##Author Skye Ouyang
##Date 19th Apr.
#######################################
import glob
import os
def IsNotNull(value):
return value is not None and len(value) > 0
#create weapon list
dict_weapon = []
weapons = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/weapon_words.txt','r')
for weapon in weapons:
t = weapon.strip().lower()
if (IsNotNull(t)):
dict_weapon.append(t)
weapons.close()
#create bloody words list
dict_bloody = []
bloodys = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/bloody_words.txt','r')
for bloody in bloodys:
b = bloody.strip().lower()
if (IsNotNull(b)):
dict_bloody.append(b)
#create mysterious words list
dict_mysterious = []
mysteriouss = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/mysterious_words.txt','r')
for mysterious in mysteriouss:
m = mysterious.strip().lower()
if (IsNotNull(m)):
dict_mysterious.append(m)
#input data
path ="D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/dataset/low_score_novel"
allFiles = glob.glob(path + "/*.txt")
#file = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/dataset/high_score_novel/01. The Girl with the Dragon Tattoo.txt','r')
weapon_cnt = []
bloody_cnt = []
mysterious_cnt = []
for file in allFiles:
with open(file) as fle:
fiction = fle.read()
# set for loop
wea_cnt = 0
blo_cnt = 0
mys_cnt = 0
# count of weapon words
for word in dict_weapon:
if (word in fiction):
wea_cnt = wea_cnt + 1
for word in dict_bloody:
if (word in fiction):
blo_cnt = blo_cnt + 1
for word in dict_mysterious:
if (word in fiction):
mys_cnt = mys_cnt + 1
print (wea_cnt, blo_cnt , mys_cnt)
# write into list
weapon_cnt.append(wea_cnt)
bloody_cnt.append(blo_cnt)
mysterious_cnt.append(mys_cnt)
weapon_cnt
'''
for file in allFiles:
with open (file) as fle:
blo_cnt = 0
fiction = fle.read()
'''
#file_name = os.path.splitext(path + '/*.txt')[0]
#print ('The size of %s is ' % (file_name) + str(len(fiction)))
| apache-2.0 | -6,619,393,933,516,462,000 | 27.604938 | 149 | 0.582569 | false |
gandalfcode/gandalf | examples/example09.py | 1 | 1749 | #==============================================================================
# example09.py
# Create initial conditions for pure N-body simulation inside the python
# script, and then run the simulation to completion while plotting results.
#==============================================================================
from gandalf.analysis.facade import *
import numpy as np
import time
# Create empty numpy arrays for setting star initial conditions
Nstar = 3
x = np.zeros(Nstar)
y = np.zeros(Nstar)
vx = np.zeros(Nstar)
vy = np.zeros(Nstar)
m = np.zeros(Nstar)
h = 0.000001*np.ones(Nstar)
# Set values for each star individually (Note all velocities initially zero)
m[0] = 3.0; x[0] = 1.0; y[0] = 3.0
m[1] = 4.0; x[1] = -2.0; y[1] = -1.0
m[2] = 5.0; x[2] = 1.0; y[2] = -1.0
# Create new 1D simulation object and set parameters
sim = newsim(ndim=2,sim='nbody')
sim.SetParam('ic','python')
sim.SetParam('nbody','hermite4ts')
sim.SetParam('sub_systems',0)
sim.SetParam('Npec',3)
sim.SetParam('Nlevels',1)
sim.SetParam('Nstar',Nstar)
sim.SetParam('tend',80.0)
sim.SetParam('dt_snap',1.0)
sim.SetParam('noutputstep',128)
sim.SetParam('ndiagstep',2048)
sim.SetParam('dimensionless',1)
sim.SetParam('run_id','BURRAU1')
sim.SetParam('out_file_form','su')
# Call setup routines and import particle data
sim.PreSetupForPython()
sim.ImportArray(x,'x','star')
sim.ImportArray(y,'y','star')
sim.ImportArray(vx,'vx','star')
sim.ImportArray(vy,'vy','star')
sim.ImportArray(m,'m','star')
sim.ImportArray(h,'h','star')
sim.SetupSimulation()
# Plot the density of all particles near the shock
plot("x","y",type="star")
limit("x",-30.0,30.0,window="all")
limit("y",-20.0,40.0,window="all")
# Run simulation and save plot to file
run()
block()
| gpl-2.0 | -2,850,669,717,202,946,000 | 29.684211 | 79 | 0.63522 | false |
Sjc1000/PyRC | UI/Disabled/FriendsList.py | 1 | 2227 | #!/usr/bin/env python3
from gi.repository import Gtk, Gdk
import json
class FriendsList():
servers = {}
active_server = None
def __init__(self, MainWindow):
self.MainWindow = MainWindow
self.position = [8, 5, 1, 4]
def prebuild(self):
self.MainWindow.ui_plugins['UserList'].position = (8, 0, 1, 5)
return None
def build(self):
self.scroll_window = Gtk.ScrolledWindow()
self.list = Gtk.ListStore(str, str)
self.view = Gtk.TreeView(self.list)
self.view.set_activate_on_single_click(True)
self.view.set_hexpand(True)
self.view.connect('row-activated', self.clicked)
text_render = Gtk.CellRendererText()
username = Gtk.TreeViewColumn('Friends', text_render, text=0, foreground=1)
self.view.append_column(username)
self.scroll_window.add(self.view)
self.MainWindow.grid.attach(self.scroll_window, *self.position)
return None
def clicked(self, TreeView, TreePath, TreeViewColumn):
print('User list clicked')
return None
def add_friend(self, connection, nickname):
connection.send('MONITOR + ' + nickname)
self.servers[connection.server]['friends'][nickname] = {'iter': None, 'online': False}
if connection.server == self.active_server:
iter = self.list.append([nickname, 'grey'])
self.servers[connection.server]['friends'][nickname]['iter'] = iter
return None
def activate_path(self, server, channel, clicked=False):
self.active_server = server
#redraw
return None
def on376(self, connection, *junk):
with open('UI/friends.json', 'r') as ffile:
friends = json.loads(ffile.read())
if connection.server not in friends:
return None
self.servers[connection.server] = {'friends': {}}
for nickname in sorted(friends[connection.server]):
self.add_friend(connection, nickname)
connection.send('MONITOR s')
return None
def on730(self, connection, host, nickname, uhost):
if nickname == connection.nickname:
return None
print( uhost )
return None | gpl-2.0 | -5,208,045,553,747,212,000 | 32.253731 | 94 | 0.619668 | false |
colloquium/spacewalk | client/tools/rhnpush/rhnpush.py | 1 | 27859 | #
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# $Id$
"""
Management tool for the RHN proxy.
This script performs various management operations on the RHN proxy:
- Creates the local directory structure needed to store local packages
- Uploads packages from a given directory to the RHN servers
- Optionally, once the packages are uploaded, they can be linked to (one or
more) channels, and copied in the local directories for these channels.
- Lists the RHN server's vision on a certain channel
- Checks if the local image of the channel (the local directory) is in sync
with the server's image, and prints the missing packages (or the extra
ones)
"""
import os
import random
import sys
import string
import time
import urlparse
import rhnpush_confmanager
try:
from optparse import Option, OptionParser
except ImportError:
from optik import Option, OptionParser
from rhn import rpclib
from spacewalk.common import rhn_mpm
from spacewalk.common.checksum import getFileChecksum
import uploadLib
import rhnpush_v2
# Global settings
BUFFER_SIZE = 65536
HEADERS_PER_CALL = 10
DEBUG = 0
RPMTAG_NOSOURCE = 1051
def main():
# Initialize a command-line processing object with a table of options
optionsTable = [
Option('-v','--verbose', action='count', help='Increase verbosity', default=0),
Option('-d','--dir', action='store', help='Process packages from this directory'),
Option('-c','--channel', action='append', help='Manage this channel (specified by label)'),
Option('-n','--count', action='store', help='Process this number of headers per call', type='int'),
Option('-l','--list', action='store_true', help='Only list the specified channels'),
Option('-r','--reldir', action='store', help='Relative dir to associate with the file'),
Option('-o','--orgid', action='store', help='Org ID', type='int'),
Option('-u','--username', action='store', help='Use this username to connect to RHN/Satellite'),
Option('-p','--password', action='store', help='Use this password to connect to RHN/Satellite'),
Option('-s','--stdin', action='store_true', help='Read the package names from stdin'),
Option('-X','--exclude', action='append', help='Exclude packages that match this glob expression'),
Option( '--force', action='store_true', help='Force the package upload (overwrites if already uploaded)'),
Option( '--nosig', action='store_true', help='Push unsigned packages'),
Option( '--newest', action='store_true', help='Only push the packages that are newer than the server ones'),
Option( '--nullorg', action='store_true', help='Use the null org id'),
Option( '--header', action='store_true', help='Upload only the header(s)'),
Option( '--source', action='store_true', help='Upload source package information'),
Option( '--server', action='store', help='Push to this server (http[s]://<hostname>/APP)'),
Option( '--proxy', action='store', help='Use proxy server (<server>:<port>)'),
Option( '--test', action='store_true', help='Only print the packages to be pushed'),
Option('-?','--usage', action='store_true', help='Briefly describe the options'),
Option('-N','--new-cache', action='store_true', help='Create a new username/password cache'),
Option( '--no-cache', action='store_true', help='Do not create a username/password cache'),
Option( '--extended-test', action='store_true', help='Perform a more verbose test'),
Option( '--no-session-caching', action='store_true',
help='Disables session-token support. Useful for using rhnpush with pre-4.0.6 satellites.'),
Option( '--tolerant', action='store_true',
help='If rhnpush errors while uploading a package, continue uploading the rest of the packages.')
]
#Having to maintain a store_true list is ugly. I'm trying to get rid of this.
#12/22/05 wregglej 173287 Added no_cache to true_list so it's value gets changed from a string to an int.
true_list = ['usage', 'test', 'source', 'header', 'nullorg', 'newest',\
'nosig', 'force', 'list', 'stdin', 'new_cache','extended_test', 'no_cache',\
'no_session_caching', 'tolerant']
optionParser = OptionParser(option_list=optionsTable, usage="%prog [OPTION] [<package>]")
manager = rhnpush_confmanager.ConfManager(optionParser, true_list)
options = manager.get_config()
upload = UploadClass(options, files=options.files)
if options.usage:
optionParser.print_usage()
sys.exit(0)
if options.list:
if not options.channel:
upload.die(1, "Must specify a channel for --list to work")
upload.list()
return
if options.dir and not options.stdin:
upload.directory()
elif options.stdin and not options.dir:
upload.readStdin()
elif options.dir and options.stdin:
upload.readStdin()
upload.directory()
if options.exclude:
upload.filter_excludes()
if options.newest:
if not options.channel:
upload.die(1, "Must specify a channel for --newest to work")
upload.newest()
if not upload.files:
if upload.newest:
print "No new files to upload; exiting"
else:
print "Nothing to do (try --help for more options)"
sys.exit(0)
if options.test:
upload.test()
return
if options.extended_test:
upload.extended_test()
return
if options.header:
upload.uploadHeaders()
return
ret = upload.packages()
if ret != 0:
return 1
class UploadClass(uploadLib.UploadClass):
def setURL(self):
server = self.options.server
if server is None:
self.die(1, "Required parameter --server not supplied")
scheme, netloc, path, params, query, fragment = urlparse.urlparse(server)
if not netloc:
# No schema - trying to patch it up ourselves?
server = "http://" + server
scheme, netloc, path, params, query, fragment = urlparse.urlparse(server)
if not netloc:
self.die(2, "Invalid URL %s" % server)
if path == '':
path = '/APP'
if string.lower(scheme) not in ('http', 'https'):
self.die(3, "Unknown URL scheme %s" % scheme)
self.url = urlparse.urlunparse((scheme, netloc, path, params, query,
fragment))
self.url_v2 = urlparse.urlunparse((scheme, netloc, "/PACKAGE-PUSH",
params, query, fragment))
def setOrg(self):
if self.options.nullorg:
if self.options.force:
self.die(1, "ERROR: You cannot force a package to a nullorg channel.")
else:
# They push things to the None org id
self.orgId = ''
else:
self.orgId = self.options.orgid or -1
def setForce(self):
if self.options.force:
self.force = 4
else:
self.force = None
def setRelativeDir(self):
self.relativeDir = self.options.reldir
def setChannels(self):
self.channels = self.options.channel or []
def _test_force(self):
test_force_str = "Setting force flag: %s"
test_force = "Passed"
try:
self.setForce()
except:
test_force = "Failed"
print test_force_str % test_force
def _test_set_org(self):
test_set_org_str = "Setting the org: %s"
test_set_org = "Passed"
try:
self.setOrg()
except:
test_set_org = "Failed"
print test_set_org_str % test_set_org
def _test_set_url(self):
test_set_url_str = "Setting the URL: %s"
test_set_url = "Passed"
try:
self.setURL()
except:
test_set_url = "Failed"
print test_set_url_str % test_set_url
def _test_set_channels(self):
test_set_channels_str = "Setting the channels: %s"
test_set_channels = "Passed"
try:
self.setChannels()
except:
test_set_channels = "Failed"
print test_set_channels_str % test_set_channels
def _test_username_password(self):
test_user_pass_str = "Setting the username and password: %s"
test_user_pass = "Passed"
try:
self.setUsernamePassword()
except:
test_user_pass = "Failed"
print test_user_pass_str % test_user_pass
def _test_set_server(self):
test_set_server_str = "Setting the server: %s"
test_set_server = "Passed"
try:
self.setServer()
except:
test_set_server = "Failed"
print test_set_server_str % test_set_server
def _test_connect(self):
auth_ret = uploadLib.call(self.server.packages.test_login, self.username, self.password )
if auth_ret == 1:
test_auth = "Passed"
else:
test_auth = "Failed"
print "Testing connection and authentication: %s" % test_auth
def _test_access(self):
if self.new_sat_test():
access_ret = callable(self.server.packages.channelPackageSubscriptionBySession)
else:
access_ret = callable(self.server.packages.channelPackageSubscription)
if access_ret == 1:
test_access = "Passed"
else:
test_access = "Failed"
print "Testing access to upload functionality on server: %s" % test_access
#12/22/05 wregglej 173287 Added a this funtion to test the new session authentication stuff.
#It still needs work.
def _test_authenticate(self):
self.authenticate()
def extended_test(self):
self._test_force()
self._test_set_org()
self._test_set_url()
self._test_set_channels()
self._test_username_password()
self._test_set_server()
self._test_connect()
self._test_access()
print "The files that would have been pushed:"
self.test()
def packages(self):
self.setForce()
# set the org
self.setOrg()
# set the URL
self.setURL()
# set the channels
self.setChannels()
# set the server
self.setServer()
#12/22/05 wregglej 173287 authenticate the session.
self.authenticate()
# Do we have the new-style handler available?
#ping the server for status
self.warn(2,"url is",self.url_v2)
ping = rhnpush_v2.PingPackageUpload(self.url_v2, self.options.proxy)
self.ping_status, errmsg, headerinfo = ping.ping()
self.warn(2, "Result codes:", self.ping_status, errmsg)
# move patch clusters to the end because all the patches in the cluster
# have to be pushed before the cluster itself
files1 = []
files2 = []
for file in self.files:
if file.startswith('patch-cluster-'):
files2.append(file)
else:
files1.append(file)
self.files = files1 + files2
channel_packages = []
# a little fault tolarence is in order
random.seed()
checkpkgflag = 0
tries = 3
#pkilambi:check if the Sat version we are talking to has this capability.
#If not use the normal way to talk to older satellites(< 4.1.0).
if headerinfo.getheader('X-RHN-Check-Package-Exists'):
checkpkgflag = 1
(server_digest_hash, pkgs_info, digest_hash) = self.check_package_exists()
for pkg in self.files:
ret = None #pkilambi:errors off as not initialized.this fixes it.
#temporary fix for picking pkgs instead of full paths
pkg_key = (pkg.strip()).split('/')[-1]
if checkpkgflag :
# it's newer satellite, compute checksum checks on client.
if not server_digest_hash.has_key(pkg_key):
continue
checksum_type, checksum = digest = digest_hash[pkg_key]
server_digest = tuple(server_digest_hash[pkg_key])
# compare checksums for existance check
if server_digest == digest and not self.options.force:
channel_packages.append(pkgs_info[pkg_key])
self.warn(1, "Package %s already exists on the RHN Server-- Skipping Upload...." % pkg)
continue
elif server_digest == ():
self.warn(1,"Package %s Not Found on RHN Server -- Uploading" % pkg)
elif server_digest == "on-disk" and not self.options.force:
channel_packages.append(pkgs_info[pkg_key])
self.warn(0,"Package on disk but not on db -- Skipping Upload "%pkg)
continue
elif server_digest != digest:
if self.options.force:
self.warn(1,"Package checksum %s mismatch -- Forcing Upload"% pkg)
else:
msg = """Error: Package %s already exists on the server with a different checksum. Skipping upload to prevent overwriting existing package. (You may use rhnpush with the --force option to force this upload if the force_upload option is enabled on your server.)\n"""% pkg
if not self.options.tolerant:
self.die(-1, msg)
self.warn(0, msg)
continue
else:
# it's an older satellite(< 4.1.0). Just do the push the usual old way,
# without checksum pre-check.
try:
f = open(pkg)
header, payload_stream = rhn_mpm.load(file=f)
checksum_type = header.checksum_type()
except rhn_mpm.InvalidPackageError, e:
if not self.options.tolerant:
self.die(-1, "ERROR: %s: This file doesn't appear to be a package" % pkg)
self.warn(2, "ERROR: %s: This file doesn't appear to be a package" % pkg)
continue
except IOError:
if not self.options.tolerant:
self.die(-1, "ERROR: %s: No such file or directory available" % pkg)
self.warn(2, "ERROR: %s: No such file or directory available" % pkg)
continue
checksum = getFileChecksum(checksum_type, file=payload_stream)
f.close()
for t in range(0, tries):
try:
ret = self.package(pkg, checksum_type, checksum)
if ret is None:
raise UploadError()
# TODO: Revisit this. We throw this error all over the place,
# but doing so will cause us to skip the --tolerant logic
# below. I don't think we really want this behavior.
# There are some cases where we don't want to retry 3
# times, but not at the expense of disabling the tolerant
# flag, IMHO. This loop needs some lovin'. -- pav
#FIX: it checks for tolerant flag and aborts only if the flag is
#not specified
except UploadError, ue:
if not self.options.tolerant:
self.die(1, ue)
self.warn(2, ue)
except AuthenticationRequired, a:
#session expired so we re-authenticate for the process to complete
#this uses the username and password from memory if available
#else it prompts for one.
self.authenticate()
except:
self.warn(2, sys.exc_info()[1])
wait = random.randint(1, 5)
self.warn(0, "Waiting %d seconds and trying again..." % wait)
time.sleep(wait)
#The else clause gets executed in the stuff in the try-except block *succeeds*.
else:
break
#if the preceeding for-loop exits without a call to break, then this else clause gets called.
#What's kind of weird is that if the preceeding for-loop doesn't call break then an error occured
#and all of retry attempts failed. If the for-loop *does* call break then everything is hunky-dory.
#In short, this else clause only get's called if something is F.U.B.A.R and the retry attempts don't
#fix anything.
else:
if not self.options.tolerant:
#pkilambi:bug#176358:this exits with a error code of 1
self.die(1, "Giving up after %d attempts" % tries)
else:
print "Giving up after %d attempts and continuing on..." % (tries,)
#5/13/05 wregglej - 154248 ?? we still want to add the packages if they're source.
if ret and self.channels: # and ret['arch'] != 'src':
# Don't bother to add the package if
# no channel was specified or a source rpm was passed
channel_packages.append(ret)
#self.channels is never None, it always has at least one entry with an empty string.
if len(self.channels) == 1 and self.channels[0] == '':
return
info = {
'packages' : channel_packages,
'channels' : self.channels
}
if self.orgId == '' or self.orgId > 0:
info['orgId'] = self.orgId
#2/3/06 wregglej 173287 Added check to see if we can use session tokens.
if channel_packages:
if self.new_sat_test():
#12/22/05 wregglej 173287 Changed the XMLRPC function to the new session-based one.
self.authenticate()
uploadLib.call(self.server.packages.channelPackageSubscriptionBySession,
self.session.getSessionString(), info)
else:
uploadLib.call(self.server.packages.channelPackageSubscription, self.username,
self.password, info)
return 0
# does an existance check of the packages to be uploaded and returns their checksum and other info
def check_package_exists(self):
self.warn(2, "Computing checksum and package info. This may take some time ...")
pkg_hash = {}
digest_hash = {}
for pkg in self.files:
pkg_info = {}
pkg_key = (pkg.strip()).split('/')[-1]
if not os.access(pkg, os.R_OK):
if not self.options.tolerant:
self.die(-1, "Could not read file %s" % pkg)
self.warn(-1, "Could not read file %s" % pkg)
continue
try:
f = open(pkg)
header, payload_stream = rhn_mpm.load(file=f)
checksum_type = header.checksum_type()
except rhn_mpm.InvalidPackageError, e:
if not self.options.tolerant:
self.die(-1, "ERROR: %s: This file doesn't appear to be a package" % pkg)
self.warn(2, "ERROR: %s: This file doesn't appear to be a package" % pkg)
continue
except IOError:
if not self.options.tolerant:
self.die(-1, "ERROR: %s: No such file or directory available" % pkg)
self.warn(2, "ERROR: %s: No such file or directory available" % pkg)
continue
checksum = getFileChecksum(checksum_type, file=payload_stream)
digest_hash[pkg_key] = (checksum_type, checksum)
f.close()
for tag in ('name', 'version', 'release', 'epoch', 'arch'):
val = header[tag]
if val is None:
val = ''
pkg_info[tag] = val
#b195903:the arch for srpms should be obtained by is_source check
#instead of checking arch in header
if header.is_source:
if not self.options.source:
self.die(-1, "ERROR: Trying to Push src rpm, Please re-try with --source.")
if RPMTAG_NOSOURCE in header.keys():
pkg_info['arch'] = 'nosrc'
else:
pkg_info['arch'] = 'src'
pkg_info['checksum_type'] = checksum_type
pkg_info['checksum'] = checksum
pkg_hash[pkg_key] = pkg_info
if self.options.nullorg:
#to satisfy xmlrpc from None values.
orgid = 'null'
else:
orgid = ''
info = {
'packages' : pkg_hash,
'channels' : self.channels,
'org_id' : orgid,
'force' : self.options.force or 0
}
# rpc call to get checksum info for all the packages to be uploaded
if not self.options.source:
if self.new_sat_test():
# computing checksum and other info is expensive process and session
# could have expired.Make sure its re-authenticated.
self.authenticate()
if uploadLib.exists_getPackageChecksumBySession(self.server):
checksum_data = uploadLib.getPackageChecksumBySession(self.server, self.session.getSessionString(), info)
else:
# old server only md5 capable
checksum_data = uploadLib.getPackageMD5sumBySession(self.server, self.session.getSessionString(), info)
else:
# even older server without session authentication
checksum_data = uploadLib.getPackageMD5sum(self.server, self.username, self.password, info)
else:
if self.new_sat_test():
# computing checksum and other info is expensive process and session
# could have expired.Make sure its re-authenticated.
self.authenticate()
if uploadLib.exists_getPackageChecksumBySession(self.server):
checksum_data = uploadLib.getSourcePackageChecksumBySession(self.server, self.session.getSessionString(), info)
else:
# old server only md5 capable
checksum_data = uploadLib.getSourcePackageMD5sumBySession(self.server, self.session.getSessionString(), info)
else:
# even older server without session authentication
checksum_data = uploadLib.getSourcePackageMD5sum(self.server, self.username, self.password, info)
return (checksum_data, pkg_hash, digest_hash)
def package(self, package, FileChecksumType, FileChecksum):
self.warn(1, "Uploading package %s" % package)
if not os.access(package, os.R_OK):
self.die(-1, "Could not read file %s" % package)
try:
h = uploadLib.get_header(package, source=self.options.source)
except uploadLib.InvalidPackageError, e:
# GS: MALFORMED PACKAGE
print "Unable to load package", package
return None
if hasattr(h, 'packaging'):
packaging = h.packaging
else:
packaging = 'rpm'
if packaging == 'rpm' and self.options.nosig is None and not h.is_signed():
#pkilambi:bug#173886:force exit to check for sig if --nosig
raise UploadError("ERROR: %s: unsigned rpm (use --nosig to force)"% package)
try:
ret = self._push_package_v2(package, FileChecksumType, FileChecksum)
except UploadError, e:
ret, diff_level, pdict = e.args[:3]
severities = {
1 : 'path changed',
2 : 'package resigned',
3 : 'differing build times or hosts',
4 : 'package recompiled',
}
if severities.has_key(diff_level):
strmsg = \
"Error: Package with same name already exists on " + \
"server but contents differ (" + \
severities[diff_level] + \
"). Use --force or remove old package before " + \
"uploading the newer version."
else:
strmsg = "Error: severity %s" % diff_level
self.warn(-1, "Uploading failed for %s\n%s\n\tDiff: %s" % \
(package, strmsg, pdict['diff']['diff']))
if diff_level != 1:
# This will prevent us from annoyingly retrying when there is
# no reason to.
raise UploadError()
return ret
return ret
def _push_package_v2(self, package, FileChecksumType, FileChecksum):
self.warn(1, "Using POST request")
pu = rhnpush_v2.PackageUpload(self.url_v2, self.options.proxy)
if self.new_sat_test():
pu.set_session(self.session.getSessionString())
else:
pu.set_auth(self.username, self.password)
pu.set_force(self.options.force)
pu.set_null_org(self.options.nullorg)
status, msgstr = pu.upload(package, FileChecksumType, FileChecksum)
ret = {}
for tag in ('name', 'version', 'release', 'epoch', 'arch'):
val = getattr(pu, "package_%s" % tag)
if val is None:
val = ''
ret[tag] = val
ret['checksum_type'] = FileChecksumType
ret['checksum'] = FileChecksum
if status == 400:
# Bad request - something bad happened
try:
data = rpclib.xmlrpclib.loads(msgstr)
except:
# Raise the exception instead of silently dying
raise UploadError("Error pushing %s: %s (%s)" %
(package, msgstr, status))
(diff_dict, ), methodname = data
del methodname
diff_level = diff_dict['level']
pdict = diff_dict['diff']
raise UploadError(ret, diff_level, pdict)
if status == 403:
#auth expired raise an exception to grab one
raise AuthenticationRequired()
if status != 200:
self.die(1, "Error pushing %s: %s (%s)" % (package, msgstr, status))
return ret
class UploadError(Exception):
pass
class AuthenticationRequired(Exception):
pass
if __name__ == '__main__':
# test code
sys.exit(main() or 0)
| gpl-2.0 | -2,766,869,436,809,004,500 | 40.893233 | 294 | 0.561434 | false |
wujuguang/sqlalchemy | lib/sqlalchemy/dialects/postgresql/pygresql.py | 1 | 8129 | # postgresql/pygresql.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+pygresql
:name: pygresql
:dbapi: pgdb
:connectstring: postgresql+pygresql://user:password@host:port/dbname[?key=value&key=value...]
:url: http://www.pygresql.org/
.. note::
The pygresql dialect is **not tested as part of SQLAlchemy's continuous
integration** and may have unresolved issues. The recommended PostgreSQL
dialect is psycopg2.
""" # noqa
import decimal
import re
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import PGCompiler
from .base import PGDialect
from .base import PGIdentifierPreparer
from .base import UUID
from .hstore import HSTORE
from .json import JSON
from .json import JSONB
from ... import exc
from ... import processors
from ... import util
from ...sql.elements import Null
from ...types import JSON as Json
from ...types import Numeric
class _PGNumeric(Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if not isinstance(coltype, int):
coltype = coltype.oid
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# PyGreSQL returns Decimal natively for 1700 (numeric)
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
else:
if coltype in _FLOAT_TYPES:
# PyGreSQL returns float natively for 701 (float8)
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if not dialect.has_native_hstore:
return super(_PGHStore, self).bind_processor(dialect)
hstore = dialect.dbapi.Hstore
def process(value):
if isinstance(value, dict):
return hstore(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_hstore:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGJSON(JSON):
def bind_processor(self, dialect):
if not dialect.has_native_json:
return super(_PGJSON, self).bind_processor(dialect)
json = dialect.dbapi.Json
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, Null) or (
value is None and self.none_as_null
):
return None
if value is None or isinstance(value, (dict, list)):
return json(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_json:
return super(_PGJSON, self).result_processor(dialect, coltype)
class _PGJSONB(JSONB):
def bind_processor(self, dialect):
if not dialect.has_native_json:
return super(_PGJSONB, self).bind_processor(dialect)
json = dialect.dbapi.Json
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, Null) or (
value is None and self.none_as_null
):
return None
if value is None or isinstance(value, (dict, list)):
return json(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_json:
return super(_PGJSONB, self).result_processor(dialect, coltype)
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not dialect.has_native_uuid:
return super(_PGUUID, self).bind_processor(dialect)
uuid = dialect.dbapi.Uuid
def process(value):
if value is None:
return None
if isinstance(value, (str, bytes)):
if len(value) == 16:
return uuid(bytes=value)
return uuid(value)
if isinstance(value, int):
return uuid(int=value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_uuid:
return super(_PGUUID, self).result_processor(dialect, coltype)
if not self.as_uuid:
def process(value):
if value is not None:
return str(value)
return process
class _PGCompiler(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
def post_process_text(self, text):
return text.replace("%", "%%")
class _PGIdentifierPreparer(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace("%", "%%")
class PGDialect_pygresql(PGDialect):
driver = "pygresql"
statement_compiler = _PGCompiler
preparer = _PGIdentifierPreparer
@classmethod
def dbapi(cls):
import pgdb
return pgdb
colspecs = util.update_copy(
PGDialect.colspecs,
{
Numeric: _PGNumeric,
HSTORE: _PGHStore,
Json: _PGJSON,
JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID,
},
)
def __init__(self, **kwargs):
super(PGDialect_pygresql, self).__init__(**kwargs)
try:
version = self.dbapi.version
m = re.match(r"(\d+)\.(\d+)", version)
version = (int(m.group(1)), int(m.group(2)))
except (AttributeError, ValueError, TypeError):
version = (0, 0)
self.dbapi_version = version
if version < (5, 0):
has_native_hstore = has_native_json = has_native_uuid = False
if version != (0, 0):
util.warn(
"PyGreSQL is only fully supported by SQLAlchemy"
" since version 5.0."
)
else:
self.supports_unicode_statements = True
self.supports_unicode_binds = True
has_native_hstore = has_native_json = has_native_uuid = True
self.has_native_hstore = has_native_hstore
self.has_native_json = has_native_json
self.has_native_uuid = has_native_uuid
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if "port" in opts:
opts["host"] = "%s:%s" % (
opts.get("host", "").rsplit(":", 1)[0],
opts.pop("port"),
)
opts.update(url.query)
return [], opts
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
if not connection:
return False
try:
connection = connection.connection
except AttributeError:
pass
else:
if not connection:
return False
try:
return connection.closed
except AttributeError: # PyGreSQL < 5.0
return connection._cnx is None
return False
dialect = PGDialect_pygresql
| mit | -2,064,211,738,100,849,400 | 29.56015 | 97 | 0.570058 | false |
bugbound/webnuke | libs/angular/angularCustomJavascript.py | 1 | 1178 | class AngularCustomJavascript:
def __init__(self, jsinjector):
self.version = 0.1
self.jsinjector = jsinjector
self.jsinjector.add_help_topic('wn_showAngularAppName()', 'Show AngularJS Main Application Name')
self.jsinjector.add_js_file('libs/angular/js/app-name.js')
self.jsinjector.add_help_topic('wn_showAngularDeps()', 'Show AngularJS Main Dependencies')
self.jsinjector.add_js_file('libs/angular/js/angular-deps.js')
self.jsinjector.add_help_topic('wn_showAngularMainClasses()', 'Show AngularJS Main Classes')
self.jsinjector.add_help_topic('wn_showAngularAllClasses()', 'Show AngularJS All Classes')
self.jsinjector.add_js_file('libs/angular/js/angular-tools.js')
self.jsinjector.add_help_topic('wn_testNgResourceClasses()', 'Test ngResource Classes')
self.jsinjector.add_js_file('libs/angular/js/test-ngresource.js')
self.jsinjector.add_help_topic('wn_showAngularRoutes()', 'Show AngularJS URL Routes')
self.jsinjector.add_js_file('libs/angular/js/show-routes.js')
self.jsinjector.add_help_topic('wn_testHTTPClasses()', 'Test Angular Classes with get and query methods')
self.jsinjector.add_js_file('libs/angular/js/test-http.js')
| mit | 6,817,451,471,202,081,000 | 57.9 | 107 | 0.757216 | false |
LockScreen/Backend | venv/lib/python2.7/site-packages/botocore/docs/sharedexample.py | 1 | 9129 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import numbers
from botocore.utils import parse_timestamp
from datetime import datetime
class SharedExampleDocumenter(object):
def document_shared_example(self, example, prefix, section,
operation_model):
"""Documents a single shared example based on its definition.
:param example: The model of the example
:param prefix: The prefix to use in the method example.
:param section: The section to write to.
:param operation_model: The model of the operation used in the example
"""
section.style.new_paragraph()
section.write(example.get('description'))
section.style.new_line()
self.document_input(section, example, prefix,
operation_model.input_shape)
self.document_output(section, example, operation_model.output_shape)
def document_input(self, section, example, prefix, shape):
input_section = section.add_new_section('input')
input_section.style.start_codeblock()
if prefix is not None:
input_section.write(prefix)
params = example['input']
comments = example.get('comments')
if comments:
comments = comments.get('input')
param_section = input_section.add_new_section('parameters')
self._document_params(param_section, params, comments, [], shape)
closing_section = input_section.add_new_section('input-close')
closing_section.style.new_line()
closing_section.style.new_line()
closing_section.write('print(response)')
closing_section.style.end_codeblock()
def document_output(self, section, example, shape):
output_section = section.add_new_section('output')
output_section.writeln('Expected Output:')
output_section.style.start_codeblock()
params = example.get('output', {})
# There might not be an output, but we will return metadata anyway
params['ResponseMetadata'] = {"...": "..."}
comments = example.get('comments')
if comments:
comments = comments.get('output')
self._document_dict(output_section, params, comments, [], shape, True)
closing_section = output_section.add_new_section('output-close')
closing_section.style.end_codeblock()
def _document(self, section, value, comments, path, shape):
"""
:param section: The section to add the docs to.
:param value: The input / output values representing the parameters that
are included in the example.
:param comments: The dictionary containing all the comments to be
applied to the example.
:param path: A list describing where the documenter is in traversing the
parameters. This is used to find the equivalent location
in the comments dictionary.
"""
if isinstance(value, dict):
self._document_dict(section, value, comments, path, shape)
elif isinstance(value, list):
self._document_list(section, value, comments, path, shape)
elif isinstance(value, numbers.Number):
self._document_number(section, value, path)
elif shape and shape.type_name == 'timestamp':
self._document_datetime(section, value, path)
else:
self._document_str(section, value, path)
def _document_dict(self, section, value, comments, path, shape,
top_level=False):
dict_section = section.add_new_section('dict-value')
self._start_nested_value(dict_section, '{')
for key, val in value.items():
path.append('.%s' % key)
item_section = dict_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write("'%s': " % key)
# Shape could be none if there is no output besides ResponseMetadata
item_shape = None
if shape:
if shape.type_name == 'structure':
item_shape = shape.members.get(key)
elif shape.type_name == 'map':
item_shape = shape.value
self._document(item_section, val, comments, path, item_shape)
path.pop()
dict_section_end = dict_section.add_new_section('ending-brace')
self._end_nested_value(dict_section_end, '}')
if not top_level:
dict_section_end.write(',')
def _document_params(self, section, value, comments, path, shape):
param_section = section.add_new_section('param-values')
self._start_nested_value(param_section, '(')
for key, val in value.items():
path.append('.%s' % key)
item_section = param_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write(key + '=')
# Shape could be none if there are no input parameters
item_shape = None
if shape:
item_shape = shape.members.get(key)
self._document(item_section, val, comments, path, item_shape)
path.pop()
param_section_end = param_section.add_new_section('ending-parenthesis')
self._end_nested_value(param_section_end, ')')
def _document_list(self, section, value, comments, path, shape):
list_section = section.add_new_section('list-section')
self._start_nested_value(list_section, '[')
item_shape = shape.member
for index, val in enumerate(value):
item_section = list_section.add_new_section(index)
item_section.style.new_line()
path.append('[%s]' % index)
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
self._document(item_section, val, comments, path, item_shape)
path.pop()
list_section_end = list_section.add_new_section('ending-bracket')
self._end_nested_value(list_section_end, '],')
def _document_str(self, section, value, path):
# We do the string conversion because this might accept a type that
# we don't specifically address.
section.write("'%s'," % str(value))
def _document_number(self, section, value, path):
section.write("%s," % str(value))
def _document_datetime(self, section, value, path):
datetime_tuple = parse_timestamp(value).timetuple()
datetime_str = str(datetime_tuple[0])
for i in range(1, len(datetime_tuple)):
datetime_str += ", " + str(datetime_tuple[i])
section.write("datetime(%s)," % datetime_str)
def _get_comment(self, path, comments):
key = re.sub('^\.', '', ''.join(path))
if comments and key in comments:
return '# ' + comments[key]
else:
return ''
def _start_nested_value(self, section, start):
section.write(start)
section.style.indent()
section.style.indent()
def _end_nested_value(self, section, end):
section.style.dedent()
section.style.dedent()
section.style.new_line()
section.write(end)
def document_shared_examples(section, operation_model, example_prefix,
shared_examples):
"""Documents the shared examples
:param section: The section to write to.
:param operation_model: The model of the operation.
:param example_prefix: The prefix to use in the method example.
:param shared_examples: The shared JSON examples from the model.
"""
container_section = section.add_new_section('shared-examples')
container_section.style.new_paragraph()
container_section.style.bold('Examples')
documenter = SharedExampleDocumenter()
for example in shared_examples:
documenter.document_shared_example(
example=example,
section=container_section.add_new_section(example['id']),
prefix=example_prefix,
operation_model=operation_model
)
| mit | -1,774,796,653,096,055,800 | 40.684932 | 80 | 0.614197 | false |
goulu/Goulib | tests/test_Goulib_itertools2.py | 1 | 16319 | #!/usr/bin/env python
# coding: utf8
from nose.tools import assert_equal, assert_not_equals
from nose import SkipTest
#lines above are inserted automatically by pythoscope. Line below overrides them
from Goulib.tests import *
from Goulib.itertools2 import *
class TestTake:
def test_take(self):
assert_equal(take(3, irange(1,10)),[1,2,3])
class TestIndex:
def test_index(self):
assert_equal(index(4, irange(1,10)),3)
assert_equal(index(9, irange(1,10)),8)
class TestFirst:
def test_first(self):
assert_equal(first(irange(1,10)),1)
assert_equal(first('abc'),'a')
class TestLast:
def test_last(self):
assert_equal(last(irange(1,10)),10)
class TestTakeEvery:
def test_take_every(self):
assert_equal(every(2, irange(1,10)),[1,3,5,7,9])
assert_equal(takeevery(3,irange(1,10)), [1,4,7,10])
class TestDrop:
def test_drop(self):
assert_equal(drop(5, irange(1,10)),[6,7,8,9,10])
class TestIlen:
def test_ilen(self):
assert_equal(ilen(irange(10,0)),0)
assert_equal(ilen(irange(11,20)),10)
class TestIrange:
def test_irange(self):
assert_equal(irange(1,5),[1,2,3,4,5])
class TestArange:
def test_arange(self):
assert_equal(arange(-1,2.5,.5),[-1,-0.5,0,0.5,1,1.5,2])
assert_equal(arange(2,-1.5,.5),reversed([-1,-0.5,0,0.5,1,1.5,2]))
l=list(arange(1,step=.01))
assert_equal(len(l),100)
class TestLinspace:
def test_linspace(self):
assert_equal(linspace(-1,2,7),[-1,-0.5,0,0.5,1,1.5,2])
assert_equal(linspace(1,1,7),[1,1,1,1,1,1,1])
assert_equal(linspace((1,0),(0,1),3),[(1,0),(.5,.5),(0,1)])
class TestFlatten:
def test_flatten(self):
f=list(flatten([[1,2],[3]]))
assert_equal(f,[1,2,3])
assert_equal(flatten([1,[2,[3]]]),[1,2,3])
assert_equal(flatten(['a',['bc']]),['a','bc']) #do not recurse in strings
assert_equal(flatten([[[1],(2,[3])]],(tuple)),[1,(2,[3])]) # do not recurse in tuple
d=dict(enumerate(range(10)))
assert_equal(flatten(d),range(10))
class TestGroups:
def test_groups(self):
assert_equal(groups(irange(1,6),3,2),[[1,2,3],[3,4,5]])
assert_equal(groups([1,2,3,4,5,6],3,2),[[1,2,3],[3,4,5]])
assert_equal(groups([1,2,3,4,5,6],3),[[1,2,3],[4,5,6]])
assert_equal(groups([1,2,3,4,5,6],4),[[1,2,3,4]])
class TestReshape:
def test_reshape(self):
data=[1,[2,[3,4],[5,6,7]]] #data can have any shape...
assert_equal(reshape(data,(2,3)),[[1,2,3],[4,5,6]])
assert_equal(reshape(data,(3,2)),[[1,2],[3,4],[5,6]])
assert_equal(reshape(data,(3,3)),[[1,2,3],[4,5,6],[7]])
class TestCompose:
def test_compose(self):
from math import sin
f=compose(sin, lambda x:x*x)
assert_equal(f(2),sin(4))
class TestIterate:
def test_iterate(self):
assert_equal(take(4,iterate(lambda x:x*x, 2)), [2,4,16,16*16])
class TestIsIterable:
def test_isiterable(self):
assert_false(isiterable(123))
assert_false(isiterable('a string'))
assert_true(isiterable([]))
assert_true(isiterable(tuple()))
assert_true(isiterable({}))
assert_true(isiterable(set()))
assert_true(isiterable((x for x in range(10))))
assert_true(isiterable(map(lambda x:x*x,[1,2,3])))
class TestTails:
def test_tails(self):
assert_equal(tails([1,2,3]),[[1,2,3], [2,3], [3], []])
class TestIreduce:
def test_ireduce(self):
import operator
assert_equal(ireduce(operator.add, irange(10)),[1,3,6,10,15,21,28,36,45,55])
assert_equal(ireduce(operator.add, irange(10),2),[2,2,3,5,8,12,17,23,30,38,47,57])
class TestCompress:
def test_compress(self):
assert_equal(compress('AAAABBBCCDAABBB'),[('A', 4),('B', 3),('C', 2),('D', 1),('A', 2),('B', 3)])
# https://www.linkedin.com/groups/25827/25827-6166706414627627011
res=compress('aaaaabbbbccccccaaaaaaa')
res=''.join('%d%s'%(n,c) for (c,n) in res)
assert_equal(res,'5a4b6c7a')
class TestDecompress:
def test_decompress(self):
data='aaaaabbbbccccccaaaaaaa';
res=compress(data)
data2=decompress(res)
assert_equal(data2,data)
class TestUnique:
def test_unique(self):
assert_equal(unique('AAAABBBCCDAABBB'),'ABCD')
assert_equal(unique('ABBCcAD', str.upper),'ABCD')
assert_equal(unique('ZZZZBBBCCDAABBB',buffer=1),'ZBCDAB')
# harmless regression ...
# s=list(unique('AAAABBBCCDAABBB',buffer=4))
# assert_equal(s,'ABCD')
class TestIdentity:
def test_identity(self):
x=object()
assert_equal(identity(x),x)
class TestAny:
def test_any(self):
assert_true(any((1,2,3,4),lambda x:x>3))
assert_false(any((1,2,3,4),lambda x:x>4))
class TestAll:
def test_all(self):
assert_true(all((1,2,3,4),lambda x:x<5))
assert_false(all((1,2,3,4),lambda x:x<4))
class TestNo:
def test_no(self):
assert_true(no((1,2,3,4),lambda x:x<1))
assert_false(no((1,2,3,4),lambda x:x<2))
class TestTakenth:
def test_takenth(self):
#http://stackoverflow.com/questions/12007820/better-ways-to-get-nth-element-from-an-unsubscriptable-iterable
from itertools import permutations
assert_equal(nth(1000,permutations(range(10), 10)),
(0, 1, 2, 4, 6, 5, 8, 9, 3, 7)
)
class TestIcross:
def test_icross(self):
assert_equal(icross([1,2,5],[2,3]),
[(1,2),(1,3),(2,2),(2,3),(5,2),(5,3)]
)
class TestQuantify:
def test_quantify(self):
from Goulib.math2 import is_pentagonal
assert_equal(quantify(irange(1,100), is_pentagonal),8)
class TestPairwise:
def test_pairwise(self):
assert_equal(pairwise([1,2,3]),[(1,2),(2,3)])
assert_equal(pairwise([1,2,3],operator.add),[3,5])
assert_equal(pairwise([1,2,3],loop=True),[(1,2),(2,3),(3,1)])
assert_equal(pairwise([1,2,3],operator.add,loop=True),[3,5,4])
assert_equal(pairwise([]),[])
assert_equal(pairwise([1]),[])
assert_equal(pairwise([1],loop=True),[(1,1)])
class TestInterleave:
def test_interleave(self):
assert_equal(interleave([0,2,4],[1,3,5]),[0,1,2,3,4,5])
assert_equal(interleave([0,2,4],[1,3]),[0,1,2,3,4])
assert_equal(interleave([0],[]),[0])
class TestRandSeq:
def test_rand_seq(self):
# assert_equal(expected, rand_seq(size))
raise SkipTest
class TestAllPairs:
def test_all_pairs(self):
# assert_equal(expected, all_pairs(size))
raise SkipTest
class TestFilter2:
def test_filter2(self):
yes,no=filter2([1,2,3,4,3,2,1],lambda x:x<3)
assert_equal(yes,[1,2,2,1])
assert_equal(no,[3,4,3])
class TestIfind:
def test_ifind(self):
pass #tested below
class TestFind:
def test_find(self):
assert_equal(find([0,1,2,3,4],lambda x:x>2),(3,3))
class TestIsplit:
def test_isplit(self):
pass #tested below
class TestSplit:
def test_split(self):
assert_equal(split([0,1,2,-1,3,4,5], lambda x:x<0),[[0,1,2],[3,4,5]])
assert_equal(split([-1,0,1,2,-1,3,4,5,-1], lambda x:x<0),[[],[0,1,2],[3,4,5],[]])
assert_equal(split([-1,0,1,2,-1,3,4,5,-1], lambda x:x<0,True),[[],[-1,0,1,2],[-1,3,4,5],[-1]])
class TestNextPermutation:
def test_next_permutation(self):
res=take(10,next_permutation(list('hello')))
res=[''.join(x) for x in res]
res=','.join(res)
assert_equal(res,'hello,helol,heoll,hlelo,hleol,hlleo,hlloe,hloel,hlole,hoell')
class TestIter2:
def test___add__(self):
i1 = iter2(irange(1,5))
i2 = iter2(irange(6,10))
assert_equal(i1+i2,range(1,11))
def test___init__(self):
# iter2 = iter2(iterable)
raise SkipTest
def test___iter__(self):
# iter2 = iter2(iterable)
# assert_equal(expected, iter2.__iter__())
raise SkipTest
def test_append(self):
# iter2 = iter2(iterable)
# assert_equal(expected, iter2.append(iterable))
raise SkipTest
def test_insert(self):
# iter2 = iter2(iterable)
# assert_equal(expected, iter2.insert(place, iterable))
raise SkipTest
def test_next(self):
# iter2 = iter2(iterable)
# assert_equal(expected, iter2.next())
raise SkipTest
def test___next__(self):
# iter2 = iter2(iterable)
# assert_equal(expected, iter2.__next__())
raise SkipTest
class TestProduct:
def test_product(self):
#test compatibility with itertools.product
assert_equal(itertools2.product(),itertools.product())
assert_equal(itertools2.product([]),itertools.product([]))
assert_equal(itertools2.product('ABCD', 'xy'),itertools.product('ABCD', 'xy'))
# assert_equal(itertools2.product('AB', 'wxyz'),itertools.product('AB', 'wxyz'))
assert_equal(itertools2.product(range(2), repeat=3),itertools.product(range(2), repeat=3))
#test case from http://stackoverflow.com/questions/12093364/cartesian-product-of-large-iterators-itertools
g = product(itertools.permutations(range(100)),repeat=2)
assert_equal(next(g),(range(100),range(100)))
class TestCombinationsWithReplacement:
def test_combinations_with_replacement(self):
assert_equal(combinations_with_replacement('ABC', 2),
['AA','AB','BB','AC','BC','CC'])
assert_equal(combinations_with_replacement('AB', 4),
['AAAA','AAAB','AABB','ABBB','BBBB'])
class TestCountUnique:
def test_count_unique(self):
assert_equal(count_unique('AAAABBBCCDAABBB'),4)
assert_equal(count_unique('ABBCcAD', str.lower),4)
class TestBest:
def test_best(self):
assert_equal(best([3,2,1,2,1]),[1,1])
assert_equal(best([3,2,1,2,1],reverse=True,n=2),[3,2,2])
class TestRemovef:
def test_removef(self):
l=[0,1,'a',None,3.14,[]]
r=removef(l,lambda x:True if not x else False)
assert_equal(r,[0,None,[]])
assert_equal(l,[1,'a',3.14])
class TestShuffle:
def test_shuffle(self):
s1=list("hello world")
s2=shuffle(list("hello world")) #copy, as shuffle works in place
assert_not_equal(s1,s2) #would really be bad luck ...
assert_equal(occurences(s1),occurences(s2))
class TestIndexMin:
def test_index_min(self):
assert_equal(index_min("hallo~welt"),(1,'a'))
class TestIndexMax:
def test_index_max(self):
assert_equal(index_max("hello world"),(6,'w'))
class TestTakeevery:
def test_takeevery(self):
# assert_equal(expected, takeevery(n, iterable))
raise SkipTest
class TestSortIndexes:
def test_sort_indexes(self):
# assert_equal(expected, sort_indexes(iterable, key, reverse))
raise SkipTest
class TestSubdict:
def test_subdict(self):
# assert_equal(expected, subdict(d, keys))
raise SkipTest
class TestAccumulate:
def test_accumulate(self):
# assert_equal(expected, accumulate(iterable, func, skip_first))
raise SkipTest
class TestDiff:
def test_diff(self):
# assert_equal(expected, diff(iterable1, iterable2))
raise SkipTest
class TestSortedIterable:
def test_sorted_iterable(self):
data=[1,2,3,7,6,5,4]
res=sorted(data)
#with a small buffer, it fails
def test(iterable,buffer,key=None):
return [x for x in ensure_sorted(
sorted_iterable(iterable,key=key, buffer=buffer)
,key=key)]
assert_raises(SortingError,test,data,3)
#with a larger one, it's ok
assert_equal(test(data,buffer=4),res)
class TestIsiterable:
def test_isiterable(self):
assert_true(isiterable(list()))
assert_true(isiterable(tuple()))
assert_true(isiterable(range(1000)))
assert_false(isiterable(''))
class TestItemgetter:
def test_itemgetter(self):
# assert_equal(expected, itemgetter(iterable, i))
raise SkipTest
class TestTee:
def test_tee(self):
it=itertools.count()
it,it1,it2=tee(it,n=3)
assert_equal(next(it1),next(it2))
assert_equal(next(it1),next(it2))
assert_equal(next(it),0)
class TestIremove:
def test_iremove(self):
# assert_equal(expected, iremove(iterable, f))
raise SkipTest
class TestDictsplit:
def test_dictsplit(self):
# assert_equal(expected, dictsplit(dic, keys))
raise SkipTest
class TestShape:
def test_shape(self):
data=[[[5,6,7],2,[3,4]],1] #data can have any shape...
assert_equal(shape(data),(2,3,3)) #... but shape is evaluated from [0]
class TestNdim:
def test_ndim(self):
data=[[[5,6,7],2,[3,4]],1] #data can have any shape...
assert_equal(ndim(data),3) #... but shape is evaluated from [0]
class TestEnumerates:
def test_enumerates(self):
r=range(10)
d=dict(enumerate(r))
assert_equal(enumerates(d),enumerates(r))
class TestEnsureSorted:
def test_ensure_sorted(self):
# assert_equal(expected, ensure_sorted(iterable, key))
raise SkipTest # implement your test here
class TestIscallable:
def test_iscallable(self):
# assert_equal(expected, iscallable(f))
raise SkipTest # implement your test here
class TestIntersect:
def test_intersect(self):
# http://stackoverflow.com/questions/969709/joining-a-set-of-ordered-integer-yielding-python-iterators
postings = [[1, 100, 142, 322, 12312],
[2, 100, 101, 322, 1221],
[100, 142, 322, 956, 1222]]
assert_equal(intersect(*postings),[100, 322])
class TestKeep:
@classmethod
def setup_class(self):
l=[1,2,3,4,5,6,7,8,9]
k=keep(l)
kl=list(k)
assert_equal(kl,l)
assert_equal(k.val,l[-1])
def test___init__(self):
pass #tested in test_detect_cycle
def test___iter__(self):
pass #tested in test_detect_cycle
def test_next(self):
pass #tested in test_detect_cycle
def test___next__(self):
# keep = keep(iterable)
# assert_equal(expected, keep.__next__())
raise SkipTest # implement your test here
class TestFirstMatch:
def test_first_match(self):
pass #tested in test_detect_cycle
class TestDetectCycle:
def test_detect_cycle(self):
assert_equal(detect_cycle(list('123412341')),(0,4))
assert_equal(detect_cycle(list('012345'+'678'*4)),(6,3))
assert_equal(detect_cycle(list('012345'+'678'*3)),(6,3))
#Floyd fails when repetition isn't long enough (2*i ?):
assert_equal(floyd(list('012345'+'678'*3)),(None,None))
#test from https://rosettacode.org/wiki/Cycle_detection
assert_equal(detect_cycle([3,10,101,2,5,26,167,95,101,2,5,26,167,95]),(2,6))
"""does not work yet because of repeating digits
p3=[1, 1, 2, 0, 2, 2, 1, 0, 1, 1, 2, 0, 2, 2, 1, 0, 1, 1, 2, 0, 2, 2,
1, 0, 1, 1, 2, 0, 2, 2, 1, 0, 1, 1, 2, 0, 2, 2, 1, 0, 1, 1, 2, 0, 2,
2, 1, 0, 1, 1, 2, 0, 2, 2, 1, 0, 1, 1, 2, 0, 2, 2, 1, 0, 1, 1, 2, 0,
2, 2, 1, 0, 1, 1, 2, 0, 2, 2, 1, 0, 1, 1, 2, 0, 2, 2, 1, 0, 1, 1, 2,
0, 2, 2, 1, 0, 1, 1, 2, 0, 2, 2]
assert_equal(detect_cycle(p3)[1],8)
"""
from Goulib.math2 import pi_digits_gen
assert_equal(detect_cycle(pi_digits_gen()),(1,2)) # same problem ..
class TestFloyd:
def test_floyd(self):
# assert_equal(expected, floyd(iterable, limit))
raise SkipTest # implement your test here
if __name__ == "__main__":
runmodule()
| lgpl-3.0 | -4,727,952,602,423,984,000 | 31.50924 | 116 | 0.578896 | false |
joyider/op_mon | tests/test_functional.py | 1 | 3998 | # -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
from flask import url_for
from op_mon.user.models import User
from .factories import UserFactory
class TestLoggingIn:
"""Login."""
def test_can_log_in_returns_200(self, user, testapp):
"""Login successful."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_out(self, user, testapp):
"""Show alert on logout."""
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
"""Show error if password is incorrect."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert 'Invalid password' in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
"""Show error if username doesn't exist."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert 'Unknown user' in res
class TestRegistering:
"""Register a user."""
def test_can_register(self, user, testapp):
"""Register a new user."""
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get('/')
# Clicks Create Account button
res = res.click('Create account')
# Fills out the form
form = res.forms['registerForm']
form['username'] = 'foobar'
form['email'] = '[email protected]'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
"""Show error if passwords don't match."""
# Goes to registration page
res = testapp.get(url_for('public.register'))
# Fills out form, but passwords don't match
form = res.forms['registerForm']
form['username'] = 'foobar'
form['email'] = '[email protected]'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert 'Passwords must match' in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
"""Show error if user already registered."""
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for('public.register'))
# Fills out form, but username is already registered
form = res.forms['registerForm']
form['username'] = user.username
form['email'] = '[email protected]'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert 'Username already registered' in res
| bsd-3-clause | 3,693,033,976,702,981,600 | 32.316667 | 80 | 0.576788 | false |