repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
clemkoa/scikit-learn | examples/covariance/plot_outlier_detection.py | 15 | 5121 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white',
s=20, edgecolor='k')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black',
s=20, edgecolor='k')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| bsd-3-clause | -2,388,725,839,652,824,000 | 39.642857 | 79 | 0.627807 | false |
backtou/longlab | gnuradio-core/src/python/gnuradio/gr/qa_copy.py | 18 | 1851 | #!/usr/bin/env python
#
# Copyright 2009,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
class test_copy(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_copy (self):
src_data = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
expected_result = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
src = gr.vector_source_b(src_data)
op = gr.copy(gr.sizeof_char)
dst = gr.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
dst_data = dst.data()
self.assertEqual(expected_result, dst_data)
def test_copy_drop (self):
src_data = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
expected_result = ()
src = gr.vector_source_b(src_data)
op = gr.copy(gr.sizeof_char)
op.set_enabled(False)
dst = gr.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
dst_data = dst.data()
self.assertEqual(expected_result, dst_data)
if __name__ == '__main__':
gr_unittest.run(test_copy, "test_copy.xml")
| gpl-3.0 | 5,000,358,698,534,241,000 | 30.913793 | 70 | 0.634252 | false |
dimdung/boto | tests/unit/vpc/test_internetgateway.py | 114 | 6080 | from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, InternetGateway
class TestDescribeInternetGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeInternetGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<internetGatewaySet>
<item>
<internetGatewayId>igw-eaad4883EXAMPLE</internetGatewayId>
<attachmentSet>
<item>
<vpcId>vpc-11ad4878</vpcId>
<state>available</state>
</item>
</attachmentSet>
<tagSet/>
</item>
</internetGatewaySet>
</DescribeInternetGatewaysResponse>
"""
def test_describe_internet_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_internet_gateways(
'igw-eaad4883EXAMPLE', filters=[('attachment.state', ['available', 'pending'])])
self.assert_request_parameters({
'Action': 'DescribeInternetGateways',
'InternetGatewayId.1': 'igw-eaad4883EXAMPLE',
'Filter.1.Name': 'attachment.state',
'Filter.1.Value.1': 'available',
'Filter.1.Value.2': 'pending'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 1)
self.assertIsInstance(api_response[0], InternetGateway)
self.assertEqual(api_response[0].id, 'igw-eaad4883EXAMPLE')
class TestCreateInternetGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<internetGateway>
<internetGatewayId>igw-eaad4883</internetGatewayId>
<attachmentSet/>
<tagSet/>
</internetGateway>
</CreateInternetGatewayResponse>
"""
def test_create_internet_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_internet_gateway()
self.assert_request_parameters({
'Action': 'CreateInternetGateway'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, InternetGateway)
self.assertEqual(api_response.id, 'igw-eaad4883')
class TestDeleteInternetGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteInternetGatewayResponse>
"""
def test_delete_internet_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_internet_gateway('igw-eaad4883')
self.assert_request_parameters({
'Action': 'DeleteInternetGateway',
'InternetGatewayId': 'igw-eaad4883'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestAttachInternetGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<AttachInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</AttachInternetGatewayResponse>
"""
def test_attach_internet_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.attach_internet_gateway(
'igw-eaad4883', 'vpc-11ad4878')
self.assert_request_parameters({
'Action': 'AttachInternetGateway',
'InternetGatewayId': 'igw-eaad4883',
'VpcId': 'vpc-11ad4878'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestDetachInternetGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DetachInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DetachInternetGatewayResponse>
"""
def test_detach_internet_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.detach_internet_gateway(
'igw-eaad4883', 'vpc-11ad4878')
self.assert_request_parameters({
'Action': 'DetachInternetGateway',
'InternetGatewayId': 'igw-eaad4883',
'VpcId': 'vpc-11ad4878'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
| mit | -487,131,552,712,800,450 | 39 | 95 | 0.590625 | false |
tanderegg/ansible-modules-core | cloud/amazon/ec2_facts.py | 110 | 6444 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_facts
short_description: Gathers facts about remote hosts within ec2 (aws)
version_added: "1.0"
options:
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: '1.5.1'
description:
- This module fetches data from the metadata servers in ec2 (aws) as per
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html.
The module must be called from within the EC2 instance itself.
notes:
- Parameters to filter on ec2_facts may be added later.
author: "Silviu Dicu (@silviud) <[email protected]>"
'''
EXAMPLES = '''
# Conditional example
- name: Gather facts
action: ec2_facts
- name: Conditional
action: debug msg="This instance is a t1.micro"
when: ansible_ec2_instance_type == "t1.micro"
'''
import socket
import re
socket.setdefaulttimeout(5)
class Ec2Metadata(object):
ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/'
ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'
ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/'
AWS_REGIONS = ('ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-central-1',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2',
'us-gov-west-1'
)
def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None):
self.module = module
self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri
self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri
self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri
self._data = {}
self._prefix = 'ansible_ec2_%s'
def _fetch(self, url):
(response, info) = fetch_url(self.module, url, force=True)
if response:
data = response.read()
else:
data = None
return data
def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']):
new_fields = {}
for key, value in fields.iteritems():
split_fields = key[len(uri):].split('/')
if len(split_fields) > 1 and split_fields[1]:
new_key = "-".join(split_fields)
new_fields[self._prefix % new_key] = value
else:
new_key = "".join(split_fields)
new_fields[self._prefix % new_key] = value
for pattern in filter_patterns:
for key in new_fields.keys():
match = re.search(pattern, key)
if match:
new_fields.pop(key)
return new_fields
def fetch(self, uri, recurse=True):
raw_subfields = self._fetch(uri)
if not raw_subfields:
return
subfields = raw_subfields.split('\n')
for field in subfields:
if field.endswith('/') and recurse:
self.fetch(uri + field)
if uri.endswith('/'):
new_uri = uri + field
else:
new_uri = uri + '/' + field
if new_uri not in self._data and not new_uri.endswith('/'):
content = self._fetch(new_uri)
if field == 'security-groups':
sg_fields = ",".join(content.split('\n'))
self._data['%s' % (new_uri)] = sg_fields
else:
self._data['%s' % (new_uri)] = content
def fix_invalid_varnames(self, data):
"""Change ':'' and '-' to '_' to ensure valid template variable names"""
for (key, value) in data.items():
if ':' in key or '-' in key:
newkey = key.replace(':','_').replace('-','_')
del data[key]
data[newkey] = value
def add_ec2_region(self, data):
"""Use the 'ansible_ec2_placement_availability_zone' key/value
pair to add 'ansible_ec2_placement_region' key/value pair with
the EC2 region name.
"""
# Only add a 'ansible_ec2_placement_region' key if the
# 'ansible_ec2_placement_availability_zone' exists.
zone = data.get('ansible_ec2_placement_availability_zone')
if zone is not None:
# Use the zone name as the region name unless the zone
# name starts with a known AWS region name.
region = zone
for r in self.AWS_REGIONS:
if zone.startswith(r):
region = r
break
data['ansible_ec2_placement_region'] = region
def run(self):
self.fetch(self.uri_meta) # populate _data
data = self._mangle_fields(self._data, self.uri_meta)
data[self._prefix % 'user-data'] = self._fetch(self.uri_user)
data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh)
self.fix_invalid_varnames(data)
self.add_ec2_region(data)
return data
def main():
argument_spec = url_argument_spec()
module = AnsibleModule(
argument_spec = argument_spec,
supports_check_mode = True,
)
ec2_facts = Ec2Metadata(module).run()
ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 | -4,794,432,008,604,515,000 | 34.60221 | 99 | 0.579764 | false |
SantosDevelopers/sborganicos | venv/lib/python3.5/site-packages/django/contrib/gis/measure.py | 100 | 12463 | # Copyright (c) 2007, Robert Coup <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Distance nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Distance and Area objects to allow for sensible and convenient calculation
and conversions.
Authors: Robert Coup, Justin Bronn, Riccardo Di Virgilio
Inspired by GeoPy (https://github.com/geopy/geopy)
and Geoff Biggs' PhD work on dimensioned units for robotics.
"""
from decimal import Decimal
from functools import total_ordering
from django.utils import six
__all__ = ['A', 'Area', 'D', 'Distance']
NUMERIC_TYPES = six.integer_types + (float, Decimal)
AREA_PREFIX = "sq_"
def pretty_name(obj):
return obj.__name__ if obj.__class__ == type else obj.__class__.__name__
@total_ordering
class MeasureBase(object):
STANDARD_UNIT = None
ALIAS = {}
UNITS = {}
LALIAS = {}
def __init__(self, default_unit=None, **kwargs):
value, self._default_unit = self.default_units(kwargs)
setattr(self, self.STANDARD_UNIT, value)
if default_unit and isinstance(default_unit, six.string_types):
self._default_unit = default_unit
def _get_standard(self):
return getattr(self, self.STANDARD_UNIT)
def _set_standard(self, value):
setattr(self, self.STANDARD_UNIT, value)
standard = property(_get_standard, _set_standard)
def __getattr__(self, name):
if name in self.UNITS:
return self.standard / self.UNITS[name]
else:
raise AttributeError('Unknown unit type: %s' % name)
def __repr__(self):
return '%s(%s=%s)' % (pretty_name(self), self._default_unit, getattr(self, self._default_unit))
def __str__(self):
return '%s %s' % (getattr(self, self._default_unit), self._default_unit)
# **** Comparison methods ****
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.standard == other.standard
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.standard < other.standard
else:
return NotImplemented
# **** Operators methods ****
def __add__(self, other):
if isinstance(other, self.__class__):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard + other.standard)}
)
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __iadd__(self, other):
if isinstance(other, self.__class__):
self.standard += other.standard
return self
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __sub__(self, other):
if isinstance(other, self.__class__):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard - other.standard)}
)
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __isub__(self, other):
if isinstance(other, self.__class__):
self.standard -= other.standard
return self
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __mul__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)}
)
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __imul__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard *= float(other)
return self
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __rmul__(self, other):
return self * other
def __truediv__(self, other):
if isinstance(other, self.__class__):
return self.standard / other.standard
if isinstance(other, NUMERIC_TYPES):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)}
)
else:
raise TypeError('%(class)s must be divided with number or %(class)s' % {"class": pretty_name(self)})
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __itruediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard /= float(other)
return self
else:
raise TypeError('%(class)s must be divided with number' % {"class": pretty_name(self)})
def __idiv__(self, other): # Python 2 compatibility
return type(self).__itruediv__(self, other)
def __bool__(self):
return bool(self.standard)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def default_units(self, kwargs):
"""
Return the unit value and the default units specified
from the given keyword arguments dictionary.
"""
val = 0.0
default_unit = self.STANDARD_UNIT
for unit, value in six.iteritems(kwargs):
if not isinstance(value, float):
value = float(value)
if unit in self.UNITS:
val += self.UNITS[unit] * value
default_unit = unit
elif unit in self.ALIAS:
u = self.ALIAS[unit]
val += self.UNITS[u] * value
default_unit = u
else:
lower = unit.lower()
if lower in self.UNITS:
val += self.UNITS[lower] * value
default_unit = lower
elif lower in self.LALIAS:
u = self.LALIAS[lower]
val += self.UNITS[u] * value
default_unit = u
else:
raise AttributeError('Unknown unit type: %s' % unit)
return val, default_unit
@classmethod
def unit_attname(cls, unit_str):
"""
Retrieves the unit attribute name for the given unit string.
For example, if the given unit string is 'metre', 'm' would be returned.
An exception is raised if an attribute cannot be found.
"""
lower = unit_str.lower()
if unit_str in cls.UNITS:
return unit_str
elif lower in cls.UNITS:
return lower
elif lower in cls.LALIAS:
return cls.LALIAS[lower]
else:
raise Exception('Could not find a unit keyword associated with "%s"' % unit_str)
class Distance(MeasureBase):
STANDARD_UNIT = "m"
UNITS = {
'chain': 20.1168,
'chain_benoit': 20.116782,
'chain_sears': 20.1167645,
'british_chain_benoit': 20.1167824944,
'british_chain_sears': 20.1167651216,
'british_chain_sears_truncated': 20.116756,
'cm': 0.01,
'british_ft': 0.304799471539,
'british_yd': 0.914398414616,
'clarke_ft': 0.3047972654,
'clarke_link': 0.201166195164,
'fathom': 1.8288,
'ft': 0.3048,
'german_m': 1.0000135965,
'gold_coast_ft': 0.304799710181508,
'indian_yd': 0.914398530744,
'inch': 0.0254,
'km': 1000.0,
'link': 0.201168,
'link_benoit': 0.20116782,
'link_sears': 0.20116765,
'm': 1.0,
'mi': 1609.344,
'mm': 0.001,
'nm': 1852.0,
'nm_uk': 1853.184,
'rod': 5.0292,
'sears_yd': 0.91439841,
'survey_ft': 0.304800609601,
'um': 0.000001,
'yd': 0.9144,
}
# Unit aliases for `UNIT` terms encountered in Spatial Reference WKT.
ALIAS = {
'centimeter': 'cm',
'foot': 'ft',
'inches': 'inch',
'kilometer': 'km',
'kilometre': 'km',
'meter': 'm',
'metre': 'm',
'micrometer': 'um',
'micrometre': 'um',
'millimeter': 'mm',
'millimetre': 'mm',
'mile': 'mi',
'yard': 'yd',
'British chain (Benoit 1895 B)': 'british_chain_benoit',
'British chain (Sears 1922)': 'british_chain_sears',
'British chain (Sears 1922 truncated)': 'british_chain_sears_truncated',
'British foot (Sears 1922)': 'british_ft',
'British foot': 'british_ft',
'British yard (Sears 1922)': 'british_yd',
'British yard': 'british_yd',
"Clarke's Foot": 'clarke_ft',
"Clarke's link": 'clarke_link',
'Chain (Benoit)': 'chain_benoit',
'Chain (Sears)': 'chain_sears',
'Foot (International)': 'ft',
'German legal metre': 'german_m',
'Gold Coast foot': 'gold_coast_ft',
'Indian yard': 'indian_yd',
'Link (Benoit)': 'link_benoit',
'Link (Sears)': 'link_sears',
'Nautical Mile': 'nm',
'Nautical Mile (UK)': 'nm_uk',
'US survey foot': 'survey_ft',
'U.S. Foot': 'survey_ft',
'Yard (Indian)': 'indian_yd',
'Yard (Sears)': 'sears_yd'
}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __mul__(self, other):
if isinstance(other, self.__class__):
return Area(
default_unit=AREA_PREFIX + self._default_unit,
**{AREA_PREFIX + self.STANDARD_UNIT: (self.standard * other.standard)}
)
elif isinstance(other, NUMERIC_TYPES):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)}
)
else:
raise TypeError('%(distance)s must be multiplied with number or %(distance)s' % {
"distance": pretty_name(self.__class__),
})
class Area(MeasureBase):
STANDARD_UNIT = AREA_PREFIX + Distance.STANDARD_UNIT
# Getting the square units values and the alias dictionary.
UNITS = {'%s%s' % (AREA_PREFIX, k): v ** 2 for k, v in Distance.UNITS.items()}
ALIAS = {k: '%s%s' % (AREA_PREFIX, v) for k, v in Distance.ALIAS.items()}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __truediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)}
)
else:
raise TypeError('%(class)s must be divided by a number' % {"class": pretty_name(self)})
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
# Shortcuts
D = Distance
A = Area
| mit | 6,680,551,965,291,689,000 | 34.916427 | 112 | 0.574581 | false |
edulramirez/nova | nova/weights.py | 70 | 4450 | # Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Pluggable Weighing support
"""
import abc
import six
from nova import loadables
def normalize(weight_list, minval=None, maxval=None):
"""Normalize the values in a list between 0 and 1.0.
The normalization is made regarding the lower and upper values present in
weight_list. If the minval and/or maxval parameters are set, these values
will be used instead of the minimum and maximum from the list.
If all the values are equal, they are normalized to 0.
"""
if not weight_list:
return ()
if maxval is None:
maxval = max(weight_list)
if minval is None:
minval = min(weight_list)
maxval = float(maxval)
minval = float(minval)
if minval == maxval:
return [0] * len(weight_list)
range_ = maxval - minval
return ((i - minval) / range_ for i in weight_list)
class WeighedObject(object):
"""Object with weight information."""
def __init__(self, obj, weight):
self.obj = obj
self.weight = weight
def __repr__(self):
return "<WeighedObject '%s': %s>" % (self.obj, self.weight)
@six.add_metaclass(abc.ABCMeta)
class BaseWeigher(object):
"""Base class for pluggable weighers.
The attributes maxval and minval can be specified to set up the maximum
and minimum values for the weighed objects. These values will then be
taken into account in the normalization step, instead of taking the values
from the calculated weights.
"""
minval = None
maxval = None
def weight_multiplier(self):
"""How weighted this weigher should be.
Override this method in a subclass, so that the returned value is
read from a configuration option to permit operators specify a
multiplier for the weigher.
"""
return 1.0
@abc.abstractmethod
def _weigh_object(self, obj, weight_properties):
"""Weigh an specific object."""
def weigh_objects(self, weighed_obj_list, weight_properties):
"""Weigh multiple objects.
Override in a subclass if you need access to all objects in order
to calculate weights. Do not modify the weight of an object here,
just return a list of weights.
"""
# Calculate the weights
weights = []
for obj in weighed_obj_list:
weight = self._weigh_object(obj.obj, weight_properties)
# Record the min and max values if they are None. If they anything
# but none we assume that the weigher has set them
if self.minval is None:
self.minval = weight
if self.maxval is None:
self.maxval = weight
if weight < self.minval:
self.minval = weight
elif weight > self.maxval:
self.maxval = weight
weights.append(weight)
return weights
class BaseWeightHandler(loadables.BaseLoader):
object_class = WeighedObject
def get_weighed_objects(self, weighers, obj_list, weighing_properties):
"""Return a sorted (descending), normalized list of WeighedObjects."""
weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list]
if len(weighed_objs) <= 1:
return weighed_objs
for weigher in weighers:
weights = weigher.weigh_objects(weighed_objs, weighing_properties)
# Normalize the weights
weights = normalize(weights,
minval=weigher.minval,
maxval=weigher.maxval)
for i, weight in enumerate(weights):
obj = weighed_objs[i]
obj.weight += weigher.weight_multiplier() * weight
return sorted(weighed_objs, key=lambda x: x.weight, reverse=True)
| apache-2.0 | 491,235,432,058,026,900 | 30.118881 | 78 | 0.636854 | false |
Fizzixnerd/frontdown | installer.py | 1 | 1654 | import subprocess
class InstallError(Exception):
pass
class AptError(InstallError):
pass
class YumError(InstallError):
pass
class UnsupportedInstallerError(InstallError):
pass
class Installer:
"""Abstract Base Class for an installer. Represents the installation
system for the current platform (ie apt, yum, pacman, emerge,
etc).
"""
def __init__(self):
self.update()
def install(self, apps=[]):
raise InstallError("This is a generic installer. Use a specialized one.")
def update(self):
raise InstallError("This is a generic installer. Use a specialized one.")
class AptInstaller(Installer):
"""Installer for apt-based systems.
"""
def _aptget(self, command, args=[]):
command_list = ["sudo", "apt-get", command]
command_list.extend(args)
exit_code = subprocess.call(command_list)
if not exit_code:
return 0
else:
raise AptError("Apt exited with non-zero exit code {} when called with commands {}".format(exit_code, command_list))
def update(self):
return self._aptget("update")
def install(self, apps=[]):
return self._aptget("install", apps)
class UnsupportedInstaller(Installer):
def install(self, apps=[]):
raise UnsupportedInstallerError("This installer isn't supported yet.")
def update(self):
raise UnsupportedInstallerError("This installer isn't supported yet.")
class YumInstaller(UnsupportedInstaller):
pass
class ArchInstaller(UnsupportedInstaller):
pass
class GentooInstaller(UnsupportedInstaller):
pass
| gpl-3.0 | 6,210,993,864,312,144,000 | 20.763158 | 128 | 0.66445 | false |
darmaa/odoo | addons/base_import/test_models.py | 97 | 2366 | from openerp.osv import orm, fields
def name(n): return 'base_import.tests.models.%s' % n
class char(orm.Model):
_name = name('char')
_columns = {
'value': fields.char('unknown', size=None)
}
class char_required(orm.Model):
_name = name('char.required')
_columns = {
'value': fields.char('unknown', size=None, required=True)
}
class char_readonly(orm.Model):
_name = name('char.readonly')
_columns = {
'value': fields.char('unknown', size=None, readonly=True)
}
class char_states(orm.Model):
_name = name('char.states')
_columns = {
'value': fields.char('unknown', size=None, readonly=True, states={'draft': [('readonly', False)]})
}
class char_noreadonly(orm.Model):
_name = name('char.noreadonly')
_columns = {
'value': fields.char('unknown', size=None, readonly=True, states={'draft': [('invisible', True)]})
}
class char_stillreadonly(orm.Model):
_name = name('char.stillreadonly')
_columns = {
'value': fields.char('unknown', size=None, readonly=True, states={'draft': [('readonly', True)]})
}
# TODO: complex field (m2m, o2m, m2o)
class m2o(orm.Model):
_name = name('m2o')
_columns = {
'value': fields.many2one(name('m2o.related'))
}
class m2o_related(orm.Model):
_name = name('m2o.related')
_columns = {
'value': fields.integer()
}
_defaults = {
'value': 42
}
class m2o_required(orm.Model):
_name = name('m2o.required')
_columns = {
'value': fields.many2one(name('m2o.required.related'), required=True)
}
class m2o_required_related(orm.Model):
_name = name('m2o.required.related')
_columns = {
'value': fields.integer()
}
_defaults = {
'value': 42
}
class o2m(orm.Model):
_name = name('o2m')
_columns = {
'value': fields.one2many(name('o2m.child'), 'parent_id')
}
class o2m_child(orm.Model):
_name = name('o2m.child')
_columns = {
'parent_id': fields.many2one(name('o2m')),
'value': fields.integer()
}
class preview_model(orm.Model):
_name = name('preview')
_columns = {
'name': fields.char('Name', size=None),
'somevalue': fields.integer('Some Value', required=True),
'othervalue': fields.integer('Other Variable'),
}
| agpl-3.0 | -6,245,972,364,200,473,000 | 22.425743 | 106 | 0.579882 | false |
agusc/scrapy | tests/test_spidermiddleware_offsite.py | 113 | 2551 | from unittest import TestCase
from six.moves.urllib.parse import urlparse
from scrapy.http import Response, Request
from scrapy.spiders import Spider
from scrapy.spidermiddlewares.offsite import OffsiteMiddleware
from scrapy.utils.test import get_crawler
class TestOffsiteMiddleware(TestCase):
def setUp(self):
crawler = get_crawler(Spider)
self.spider = crawler._create_spider(**self._get_spiderargs())
self.mw = OffsiteMiddleware.from_crawler(crawler)
self.mw.spider_opened(self.spider)
def _get_spiderargs(self):
return dict(name='foo', allowed_domains=['scrapytest.org', 'scrapy.org'])
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
onsite_reqs = [Request('http://scrapytest.org/1'),
Request('http://scrapy.org/1'),
Request('http://sub.scrapy.org/1'),
Request('http://offsite.tld/letmepass', dont_filter=True)]
offsite_reqs = [Request('http://scrapy2.org'),
Request('http://offsite.tld/'),
Request('http://offsite.tld/scrapytest.org'),
Request('http://offsite.tld/rogue.scrapytest.org'),
Request('http://rogue.scrapytest.org.haha.com'),
Request('http://roguescrapytest.org')]
reqs = onsite_reqs + offsite_reqs
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEquals(out, onsite_reqs)
class TestOffsiteMiddleware2(TestOffsiteMiddleware):
def _get_spiderargs(self):
return dict(name='foo', allowed_domains=None)
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
reqs = [Request('http://a.com/b.html'), Request('http://b.com/1')]
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEquals(out, reqs)
class TestOffsiteMiddleware3(TestOffsiteMiddleware2):
def _get_spider(self):
return Spider('foo')
class TestOffsiteMiddleware4(TestOffsiteMiddleware3):
def _get_spider(self):
bad_hostname = urlparse('http:////scrapytest.org').hostname
return dict(name='foo', allowed_domains=['scrapytest.org', None, bad_hostname])
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
reqs = [Request('http://scrapytest.org/1')]
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEquals(out, reqs)
| bsd-3-clause | -8,960,045,200,606,087,000 | 37.074627 | 85 | 0.639749 | false |
kontais/EFI-MIPS | ToolKit/cmds/python/Lib/stringprep.py | 12 | 13492 | # This file is generated by mkstringprep.py. DO NOT EDIT.
"""Library that exposes various tables found in the StringPrep RFC 3454.
There are two kinds of tables: sets, for which a member test is provided,
and mappings, for which a mapping function is provided.
"""
import unicodedata
assert unicodedata.unidata_version == '3.2.0'
def in_table_a1(code):
if unicodedata.category(code) != 'Cn': return False
c = ord(code)
if 0xFDD0 <= c < 0xFDF0: return False
return (c & 0xFFFF) not in (0xFFFE, 0xFFFF)
b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + range(65024,65040))
def in_table_b1(code):
return ord(code) in b1_set
b3_exceptions = {
0xb5:u'\u03bc', 0xdf:u'ss', 0x130:u'i\u0307', 0x149:u'\u02bcn',
0x17f:u's', 0x1f0:u'j\u030c', 0x345:u'\u03b9', 0x37a:u' \u03b9',
0x390:u'\u03b9\u0308\u0301', 0x3b0:u'\u03c5\u0308\u0301', 0x3c2:u'\u03c3', 0x3d0:u'\u03b2',
0x3d1:u'\u03b8', 0x3d2:u'\u03c5', 0x3d3:u'\u03cd', 0x3d4:u'\u03cb',
0x3d5:u'\u03c6', 0x3d6:u'\u03c0', 0x3f0:u'\u03ba', 0x3f1:u'\u03c1',
0x3f2:u'\u03c3', 0x3f5:u'\u03b5', 0x587:u'\u0565\u0582', 0x1e96:u'h\u0331',
0x1e97:u't\u0308', 0x1e98:u'w\u030a', 0x1e99:u'y\u030a', 0x1e9a:u'a\u02be',
0x1e9b:u'\u1e61', 0x1f50:u'\u03c5\u0313', 0x1f52:u'\u03c5\u0313\u0300', 0x1f54:u'\u03c5\u0313\u0301',
0x1f56:u'\u03c5\u0313\u0342', 0x1f80:u'\u1f00\u03b9', 0x1f81:u'\u1f01\u03b9', 0x1f82:u'\u1f02\u03b9',
0x1f83:u'\u1f03\u03b9', 0x1f84:u'\u1f04\u03b9', 0x1f85:u'\u1f05\u03b9', 0x1f86:u'\u1f06\u03b9',
0x1f87:u'\u1f07\u03b9', 0x1f88:u'\u1f00\u03b9', 0x1f89:u'\u1f01\u03b9', 0x1f8a:u'\u1f02\u03b9',
0x1f8b:u'\u1f03\u03b9', 0x1f8c:u'\u1f04\u03b9', 0x1f8d:u'\u1f05\u03b9', 0x1f8e:u'\u1f06\u03b9',
0x1f8f:u'\u1f07\u03b9', 0x1f90:u'\u1f20\u03b9', 0x1f91:u'\u1f21\u03b9', 0x1f92:u'\u1f22\u03b9',
0x1f93:u'\u1f23\u03b9', 0x1f94:u'\u1f24\u03b9', 0x1f95:u'\u1f25\u03b9', 0x1f96:u'\u1f26\u03b9',
0x1f97:u'\u1f27\u03b9', 0x1f98:u'\u1f20\u03b9', 0x1f99:u'\u1f21\u03b9', 0x1f9a:u'\u1f22\u03b9',
0x1f9b:u'\u1f23\u03b9', 0x1f9c:u'\u1f24\u03b9', 0x1f9d:u'\u1f25\u03b9', 0x1f9e:u'\u1f26\u03b9',
0x1f9f:u'\u1f27\u03b9', 0x1fa0:u'\u1f60\u03b9', 0x1fa1:u'\u1f61\u03b9', 0x1fa2:u'\u1f62\u03b9',
0x1fa3:u'\u1f63\u03b9', 0x1fa4:u'\u1f64\u03b9', 0x1fa5:u'\u1f65\u03b9', 0x1fa6:u'\u1f66\u03b9',
0x1fa7:u'\u1f67\u03b9', 0x1fa8:u'\u1f60\u03b9', 0x1fa9:u'\u1f61\u03b9', 0x1faa:u'\u1f62\u03b9',
0x1fab:u'\u1f63\u03b9', 0x1fac:u'\u1f64\u03b9', 0x1fad:u'\u1f65\u03b9', 0x1fae:u'\u1f66\u03b9',
0x1faf:u'\u1f67\u03b9', 0x1fb2:u'\u1f70\u03b9', 0x1fb3:u'\u03b1\u03b9', 0x1fb4:u'\u03ac\u03b9',
0x1fb6:u'\u03b1\u0342', 0x1fb7:u'\u03b1\u0342\u03b9', 0x1fbc:u'\u03b1\u03b9', 0x1fbe:u'\u03b9',
0x1fc2:u'\u1f74\u03b9', 0x1fc3:u'\u03b7\u03b9', 0x1fc4:u'\u03ae\u03b9', 0x1fc6:u'\u03b7\u0342',
0x1fc7:u'\u03b7\u0342\u03b9', 0x1fcc:u'\u03b7\u03b9', 0x1fd2:u'\u03b9\u0308\u0300', 0x1fd3:u'\u03b9\u0308\u0301',
0x1fd6:u'\u03b9\u0342', 0x1fd7:u'\u03b9\u0308\u0342', 0x1fe2:u'\u03c5\u0308\u0300', 0x1fe3:u'\u03c5\u0308\u0301',
0x1fe4:u'\u03c1\u0313', 0x1fe6:u'\u03c5\u0342', 0x1fe7:u'\u03c5\u0308\u0342', 0x1ff2:u'\u1f7c\u03b9',
0x1ff3:u'\u03c9\u03b9', 0x1ff4:u'\u03ce\u03b9', 0x1ff6:u'\u03c9\u0342', 0x1ff7:u'\u03c9\u0342\u03b9',
0x1ffc:u'\u03c9\u03b9', 0x20a8:u'rs', 0x2102:u'c', 0x2103:u'\xb0c',
0x2107:u'\u025b', 0x2109:u'\xb0f', 0x210b:u'h', 0x210c:u'h',
0x210d:u'h', 0x2110:u'i', 0x2111:u'i', 0x2112:u'l',
0x2115:u'n', 0x2116:u'no', 0x2119:u'p', 0x211a:u'q',
0x211b:u'r', 0x211c:u'r', 0x211d:u'r', 0x2120:u'sm',
0x2121:u'tel', 0x2122:u'tm', 0x2124:u'z', 0x2128:u'z',
0x212c:u'b', 0x212d:u'c', 0x2130:u'e', 0x2131:u'f',
0x2133:u'm', 0x213e:u'\u03b3', 0x213f:u'\u03c0', 0x2145:u'd',
0x3371:u'hpa', 0x3373:u'au', 0x3375:u'ov', 0x3380:u'pa',
0x3381:u'na', 0x3382:u'\u03bca', 0x3383:u'ma', 0x3384:u'ka',
0x3385:u'kb', 0x3386:u'mb', 0x3387:u'gb', 0x338a:u'pf',
0x338b:u'nf', 0x338c:u'\u03bcf', 0x3390:u'hz', 0x3391:u'khz',
0x3392:u'mhz', 0x3393:u'ghz', 0x3394:u'thz', 0x33a9:u'pa',
0x33aa:u'kpa', 0x33ab:u'mpa', 0x33ac:u'gpa', 0x33b4:u'pv',
0x33b5:u'nv', 0x33b6:u'\u03bcv', 0x33b7:u'mv', 0x33b8:u'kv',
0x33b9:u'mv', 0x33ba:u'pw', 0x33bb:u'nw', 0x33bc:u'\u03bcw',
0x33bd:u'mw', 0x33be:u'kw', 0x33bf:u'mw', 0x33c0:u'k\u03c9',
0x33c1:u'm\u03c9', 0x33c3:u'bq', 0x33c6:u'c\u2215kg', 0x33c7:u'co.',
0x33c8:u'db', 0x33c9:u'gy', 0x33cb:u'hp', 0x33cd:u'kk',
0x33ce:u'km', 0x33d7:u'ph', 0x33d9:u'ppm', 0x33da:u'pr',
0x33dc:u'sv', 0x33dd:u'wb', 0xfb00:u'ff', 0xfb01:u'fi',
0xfb02:u'fl', 0xfb03:u'ffi', 0xfb04:u'ffl', 0xfb05:u'st',
0xfb06:u'st', 0xfb13:u'\u0574\u0576', 0xfb14:u'\u0574\u0565', 0xfb15:u'\u0574\u056b',
0xfb16:u'\u057e\u0576', 0xfb17:u'\u0574\u056d', 0x1d400:u'a', 0x1d401:u'b',
0x1d402:u'c', 0x1d403:u'd', 0x1d404:u'e', 0x1d405:u'f',
0x1d406:u'g', 0x1d407:u'h', 0x1d408:u'i', 0x1d409:u'j',
0x1d40a:u'k', 0x1d40b:u'l', 0x1d40c:u'm', 0x1d40d:u'n',
0x1d40e:u'o', 0x1d40f:u'p', 0x1d410:u'q', 0x1d411:u'r',
0x1d412:u's', 0x1d413:u't', 0x1d414:u'u', 0x1d415:u'v',
0x1d416:u'w', 0x1d417:u'x', 0x1d418:u'y', 0x1d419:u'z',
0x1d434:u'a', 0x1d435:u'b', 0x1d436:u'c', 0x1d437:u'd',
0x1d438:u'e', 0x1d439:u'f', 0x1d43a:u'g', 0x1d43b:u'h',
0x1d43c:u'i', 0x1d43d:u'j', 0x1d43e:u'k', 0x1d43f:u'l',
0x1d440:u'm', 0x1d441:u'n', 0x1d442:u'o', 0x1d443:u'p',
0x1d444:u'q', 0x1d445:u'r', 0x1d446:u's', 0x1d447:u't',
0x1d448:u'u', 0x1d449:u'v', 0x1d44a:u'w', 0x1d44b:u'x',
0x1d44c:u'y', 0x1d44d:u'z', 0x1d468:u'a', 0x1d469:u'b',
0x1d46a:u'c', 0x1d46b:u'd', 0x1d46c:u'e', 0x1d46d:u'f',
0x1d46e:u'g', 0x1d46f:u'h', 0x1d470:u'i', 0x1d471:u'j',
0x1d472:u'k', 0x1d473:u'l', 0x1d474:u'm', 0x1d475:u'n',
0x1d476:u'o', 0x1d477:u'p', 0x1d478:u'q', 0x1d479:u'r',
0x1d47a:u's', 0x1d47b:u't', 0x1d47c:u'u', 0x1d47d:u'v',
0x1d47e:u'w', 0x1d47f:u'x', 0x1d480:u'y', 0x1d481:u'z',
0x1d49c:u'a', 0x1d49e:u'c', 0x1d49f:u'd', 0x1d4a2:u'g',
0x1d4a5:u'j', 0x1d4a6:u'k', 0x1d4a9:u'n', 0x1d4aa:u'o',
0x1d4ab:u'p', 0x1d4ac:u'q', 0x1d4ae:u's', 0x1d4af:u't',
0x1d4b0:u'u', 0x1d4b1:u'v', 0x1d4b2:u'w', 0x1d4b3:u'x',
0x1d4b4:u'y', 0x1d4b5:u'z', 0x1d4d0:u'a', 0x1d4d1:u'b',
0x1d4d2:u'c', 0x1d4d3:u'd', 0x1d4d4:u'e', 0x1d4d5:u'f',
0x1d4d6:u'g', 0x1d4d7:u'h', 0x1d4d8:u'i', 0x1d4d9:u'j',
0x1d4da:u'k', 0x1d4db:u'l', 0x1d4dc:u'm', 0x1d4dd:u'n',
0x1d4de:u'o', 0x1d4df:u'p', 0x1d4e0:u'q', 0x1d4e1:u'r',
0x1d4e2:u's', 0x1d4e3:u't', 0x1d4e4:u'u', 0x1d4e5:u'v',
0x1d4e6:u'w', 0x1d4e7:u'x', 0x1d4e8:u'y', 0x1d4e9:u'z',
0x1d504:u'a', 0x1d505:u'b', 0x1d507:u'd', 0x1d508:u'e',
0x1d509:u'f', 0x1d50a:u'g', 0x1d50d:u'j', 0x1d50e:u'k',
0x1d50f:u'l', 0x1d510:u'm', 0x1d511:u'n', 0x1d512:u'o',
0x1d513:u'p', 0x1d514:u'q', 0x1d516:u's', 0x1d517:u't',
0x1d518:u'u', 0x1d519:u'v', 0x1d51a:u'w', 0x1d51b:u'x',
0x1d51c:u'y', 0x1d538:u'a', 0x1d539:u'b', 0x1d53b:u'd',
0x1d53c:u'e', 0x1d53d:u'f', 0x1d53e:u'g', 0x1d540:u'i',
0x1d541:u'j', 0x1d542:u'k', 0x1d543:u'l', 0x1d544:u'm',
0x1d546:u'o', 0x1d54a:u's', 0x1d54b:u't', 0x1d54c:u'u',
0x1d54d:u'v', 0x1d54e:u'w', 0x1d54f:u'x', 0x1d550:u'y',
0x1d56c:u'a', 0x1d56d:u'b', 0x1d56e:u'c', 0x1d56f:u'd',
0x1d570:u'e', 0x1d571:u'f', 0x1d572:u'g', 0x1d573:u'h',
0x1d574:u'i', 0x1d575:u'j', 0x1d576:u'k', 0x1d577:u'l',
0x1d578:u'm', 0x1d579:u'n', 0x1d57a:u'o', 0x1d57b:u'p',
0x1d57c:u'q', 0x1d57d:u'r', 0x1d57e:u's', 0x1d57f:u't',
0x1d580:u'u', 0x1d581:u'v', 0x1d582:u'w', 0x1d583:u'x',
0x1d584:u'y', 0x1d585:u'z', 0x1d5a0:u'a', 0x1d5a1:u'b',
0x1d5a2:u'c', 0x1d5a3:u'd', 0x1d5a4:u'e', 0x1d5a5:u'f',
0x1d5a6:u'g', 0x1d5a7:u'h', 0x1d5a8:u'i', 0x1d5a9:u'j',
0x1d5aa:u'k', 0x1d5ab:u'l', 0x1d5ac:u'm', 0x1d5ad:u'n',
0x1d5ae:u'o', 0x1d5af:u'p', 0x1d5b0:u'q', 0x1d5b1:u'r',
0x1d5b2:u's', 0x1d5b3:u't', 0x1d5b4:u'u', 0x1d5b5:u'v',
0x1d5b6:u'w', 0x1d5b7:u'x', 0x1d5b8:u'y', 0x1d5b9:u'z',
0x1d5d4:u'a', 0x1d5d5:u'b', 0x1d5d6:u'c', 0x1d5d7:u'd',
0x1d5d8:u'e', 0x1d5d9:u'f', 0x1d5da:u'g', 0x1d5db:u'h',
0x1d5dc:u'i', 0x1d5dd:u'j', 0x1d5de:u'k', 0x1d5df:u'l',
0x1d5e0:u'm', 0x1d5e1:u'n', 0x1d5e2:u'o', 0x1d5e3:u'p',
0x1d5e4:u'q', 0x1d5e5:u'r', 0x1d5e6:u's', 0x1d5e7:u't',
0x1d5e8:u'u', 0x1d5e9:u'v', 0x1d5ea:u'w', 0x1d5eb:u'x',
0x1d5ec:u'y', 0x1d5ed:u'z', 0x1d608:u'a', 0x1d609:u'b',
0x1d60a:u'c', 0x1d60b:u'd', 0x1d60c:u'e', 0x1d60d:u'f',
0x1d60e:u'g', 0x1d60f:u'h', 0x1d610:u'i', 0x1d611:u'j',
0x1d612:u'k', 0x1d613:u'l', 0x1d614:u'm', 0x1d615:u'n',
0x1d616:u'o', 0x1d617:u'p', 0x1d618:u'q', 0x1d619:u'r',
0x1d61a:u's', 0x1d61b:u't', 0x1d61c:u'u', 0x1d61d:u'v',
0x1d61e:u'w', 0x1d61f:u'x', 0x1d620:u'y', 0x1d621:u'z',
0x1d63c:u'a', 0x1d63d:u'b', 0x1d63e:u'c', 0x1d63f:u'd',
0x1d640:u'e', 0x1d641:u'f', 0x1d642:u'g', 0x1d643:u'h',
0x1d644:u'i', 0x1d645:u'j', 0x1d646:u'k', 0x1d647:u'l',
0x1d648:u'm', 0x1d649:u'n', 0x1d64a:u'o', 0x1d64b:u'p',
0x1d64c:u'q', 0x1d64d:u'r', 0x1d64e:u's', 0x1d64f:u't',
0x1d650:u'u', 0x1d651:u'v', 0x1d652:u'w', 0x1d653:u'x',
0x1d654:u'y', 0x1d655:u'z', 0x1d670:u'a', 0x1d671:u'b',
0x1d672:u'c', 0x1d673:u'd', 0x1d674:u'e', 0x1d675:u'f',
0x1d676:u'g', 0x1d677:u'h', 0x1d678:u'i', 0x1d679:u'j',
0x1d67a:u'k', 0x1d67b:u'l', 0x1d67c:u'm', 0x1d67d:u'n',
0x1d67e:u'o', 0x1d67f:u'p', 0x1d680:u'q', 0x1d681:u'r',
0x1d682:u's', 0x1d683:u't', 0x1d684:u'u', 0x1d685:u'v',
0x1d686:u'w', 0x1d687:u'x', 0x1d688:u'y', 0x1d689:u'z',
0x1d6a8:u'\u03b1', 0x1d6a9:u'\u03b2', 0x1d6aa:u'\u03b3', 0x1d6ab:u'\u03b4',
0x1d6ac:u'\u03b5', 0x1d6ad:u'\u03b6', 0x1d6ae:u'\u03b7', 0x1d6af:u'\u03b8',
0x1d6b0:u'\u03b9', 0x1d6b1:u'\u03ba', 0x1d6b2:u'\u03bb', 0x1d6b3:u'\u03bc',
0x1d6b4:u'\u03bd', 0x1d6b5:u'\u03be', 0x1d6b6:u'\u03bf', 0x1d6b7:u'\u03c0',
0x1d6b8:u'\u03c1', 0x1d6b9:u'\u03b8', 0x1d6ba:u'\u03c3', 0x1d6bb:u'\u03c4',
0x1d6bc:u'\u03c5', 0x1d6bd:u'\u03c6', 0x1d6be:u'\u03c7', 0x1d6bf:u'\u03c8',
0x1d6c0:u'\u03c9', 0x1d6d3:u'\u03c3', 0x1d6e2:u'\u03b1', 0x1d6e3:u'\u03b2',
0x1d6e4:u'\u03b3', 0x1d6e5:u'\u03b4', 0x1d6e6:u'\u03b5', 0x1d6e7:u'\u03b6',
0x1d6e8:u'\u03b7', 0x1d6e9:u'\u03b8', 0x1d6ea:u'\u03b9', 0x1d6eb:u'\u03ba',
0x1d6ec:u'\u03bb', 0x1d6ed:u'\u03bc', 0x1d6ee:u'\u03bd', 0x1d6ef:u'\u03be',
0x1d6f0:u'\u03bf', 0x1d6f1:u'\u03c0', 0x1d6f2:u'\u03c1', 0x1d6f3:u'\u03b8',
0x1d6f4:u'\u03c3', 0x1d6f5:u'\u03c4', 0x1d6f6:u'\u03c5', 0x1d6f7:u'\u03c6',
0x1d6f8:u'\u03c7', 0x1d6f9:u'\u03c8', 0x1d6fa:u'\u03c9', 0x1d70d:u'\u03c3',
0x1d71c:u'\u03b1', 0x1d71d:u'\u03b2', 0x1d71e:u'\u03b3', 0x1d71f:u'\u03b4',
0x1d720:u'\u03b5', 0x1d721:u'\u03b6', 0x1d722:u'\u03b7', 0x1d723:u'\u03b8',
0x1d724:u'\u03b9', 0x1d725:u'\u03ba', 0x1d726:u'\u03bb', 0x1d727:u'\u03bc',
0x1d728:u'\u03bd', 0x1d729:u'\u03be', 0x1d72a:u'\u03bf', 0x1d72b:u'\u03c0',
0x1d72c:u'\u03c1', 0x1d72d:u'\u03b8', 0x1d72e:u'\u03c3', 0x1d72f:u'\u03c4',
0x1d730:u'\u03c5', 0x1d731:u'\u03c6', 0x1d732:u'\u03c7', 0x1d733:u'\u03c8',
0x1d734:u'\u03c9', 0x1d747:u'\u03c3', 0x1d756:u'\u03b1', 0x1d757:u'\u03b2',
0x1d758:u'\u03b3', 0x1d759:u'\u03b4', 0x1d75a:u'\u03b5', 0x1d75b:u'\u03b6',
0x1d75c:u'\u03b7', 0x1d75d:u'\u03b8', 0x1d75e:u'\u03b9', 0x1d75f:u'\u03ba',
0x1d760:u'\u03bb', 0x1d761:u'\u03bc', 0x1d762:u'\u03bd', 0x1d763:u'\u03be',
0x1d764:u'\u03bf', 0x1d765:u'\u03c0', 0x1d766:u'\u03c1', 0x1d767:u'\u03b8',
0x1d768:u'\u03c3', 0x1d769:u'\u03c4', 0x1d76a:u'\u03c5', 0x1d76b:u'\u03c6',
0x1d76c:u'\u03c7', 0x1d76d:u'\u03c8', 0x1d76e:u'\u03c9', 0x1d781:u'\u03c3',
0x1d790:u'\u03b1', 0x1d791:u'\u03b2', 0x1d792:u'\u03b3', 0x1d793:u'\u03b4',
0x1d794:u'\u03b5', 0x1d795:u'\u03b6', 0x1d796:u'\u03b7', 0x1d797:u'\u03b8',
0x1d798:u'\u03b9', 0x1d799:u'\u03ba', 0x1d79a:u'\u03bb', 0x1d79b:u'\u03bc',
0x1d79c:u'\u03bd', 0x1d79d:u'\u03be', 0x1d79e:u'\u03bf', 0x1d79f:u'\u03c0',
0x1d7a0:u'\u03c1', 0x1d7a1:u'\u03b8', 0x1d7a2:u'\u03c3', 0x1d7a3:u'\u03c4',
0x1d7a4:u'\u03c5', 0x1d7a5:u'\u03c6', 0x1d7a6:u'\u03c7', 0x1d7a7:u'\u03c8',
0x1d7a8:u'\u03c9', 0x1d7bb:u'\u03c3', }
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None: return r
return code.lower()
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize("NFKC", al)
bl = u"".join([map_table_b3(ch) for ch in b])
c = unicodedata.normalize("NFKC", bl)
if b != c:
return c
else:
return al
def in_table_c11(code):
return code == u" "
def in_table_c12(code):
return unicodedata.category(code) == "Zs" and code != u" "
def in_table_c11_c12(code):
return unicodedata.category(code) == "Zs"
def in_table_c21(code):
return ord(code) < 128 and unicodedata.category(code) == "Cc"
c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + range(8288,8292) + range(8298,8304) + range(65529,65533) + range(119155,119163))
def in_table_c22(code):
c = ord(code)
if c < 128: return False
if unicodedata.category(code) == "Cc": return True
return c in c22_specials
def in_table_c21_c22(code):
return unicodedata.category(code) == "Cc" or \
ord(code) in c22_specials
def in_table_c3(code):
return unicodedata.category(code) == "Co"
def in_table_c4(code):
c = ord(code)
if c < 0xFDD0: return False
if c < 0xFDF0: return True
return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF)
def in_table_c5(code):
return unicodedata.category(code) == "Cs"
c6_set = set(range(65529,65534))
def in_table_c6(code):
return ord(code) in c6_set
c7_set = set(range(12272,12284))
def in_table_c7(code):
return ord(code) in c7_set
c8_set = set([832, 833, 8206, 8207] + range(8234,8239) + range(8298,8304))
def in_table_c8(code):
return ord(code) in c8_set
c9_set = set([917505] + range(917536,917632))
def in_table_c9(code):
return ord(code) in c9_set
def in_table_d1(code):
return unicodedata.bidirectional(code) in ("R","AL")
def in_table_d2(code):
return unicodedata.bidirectional(code) == "L"
| bsd-3-clause | 2,963,525,229,846,881,300 | 48.602941 | 151 | 0.663875 | false |
bhavin04890/finaldashboard | modules/savage/graph/__init__.py | 24 | 9731 | from base import BaseGraph, UnifiedGraph
from canvas import ScatterCanvas, DoubleScatterCanvas, BarCanvas, HorizontalBarCanvas, PieCanvas, LineCanvas
from axes import YAxis
from ..utils.struct import Vector as V
from ..graphics.utils import ViewBox, Translate, Rotate, addAttr, blank, boolean
from ..graphics.color import hex_to_color, Color
from ..graphics.shapes import Line, Rectangle, Text
from ..graphics.group import Group
class Graph (BaseGraph):
def __init__ (self, **attr):
BaseGraph.__init__ (self, None, **attr)
class ScatterPlot (UnifiedGraph):
def __init__ (self, regLine = True, settings = None):
UnifiedGraph.__init__ (self,
ScatterCanvas,
regLine = regLine,
settings = settings)
#self.addScript (self.jsLocator ())
def jsLocator (self):
return """
function Locator (root) {
var canvasRoot = root;
}
registerEvent (window, 'load', function () {
var root = document.getElementById ('canvas-root');
var l = new Locator (root);
}
"""
def jsPosition (self):
return """
function showPosition (element) {
}
"""
def formatSettings (self, settings):
UnifiedGraph.formatSettings (self, settings)
addAttr (settings, 'markerSize', float, 2.0)
addAttr (settings, 'markerType', str, 'circle')
addAttr (settings, 'colorScheme', str, 'tripleAxis')
addAttr (settings, 'color1', hex_to_color, hex_to_color ('ff0000'))
addAttr (settings, 'color2', hex_to_color, hex_to_color ('00ff00'))
addAttr (settings, 'color3', hex_to_color, hex_to_color ('0000ff'))
addAttr (settings, 'regLineColor', hex_to_color, hex_to_color('000000'))
addAttr (settings, 'regLineWidth', float, 1.0)
def setColors (self, color1= None, color2 = None, color3 = None):
self.settings.color1 = color1
self.settings.color2 = color2
self.settings.color3 = color3
def setProperties (self):
self.xaxis = True
self.yaxis = True
self.y2axis = False
def addPoint (self, x, y, name = None):
self.canvas.drawPoint (name, x, y)
class DoubleScatterPlot (ScatterPlot):
def __init__ (self, **attr):
UnifiedGraph.__init__ (self, DoubleScatterCanvas, **attr)
def jsPosition (self):
return """
function showPosition (element) {
}
"""
def formatSettings (self, settings):
UnifiedGraph.formatSettings (self, settings)
addAttr (settings, 'g1MarkerType', str, 'circle')
addAttr (settings, 'g1MarkerSize', float, '2.0')
addAttr (settings, 'g1ColorScheme', str, 'solid')
addAttr (settings, 'g1Color1', hex_to_color, Color (255, 0, 0))
addAttr (settings, 'g1Color2', hex_to_color, Color (0, 255, 0))
addAttr (settings, 'g1Color3', hex_to_color, Color (0, 0, 255))
addAttr (settings, 'g1RegLine', boolean, False)
addAttr (settings, 'g1RegLineColor', hex_to_color, Color (0, 0, 0))
addAttr (settings, 'g1RegLineWidth', float, 1.0)
addAttr (settings, 'g2MarkerType', str, 'square')
addAttr (settings, 'g2MarkerSize', float, '4.0')
addAttr (settings, 'g2ColorScheme', str, 'solid')
addAttr (settings, 'g2Color1', hex_to_color, Color (0, 0, 255))
addAttr (settings, 'g2Color2', hex_to_color, Color (0, 255, 0))
addAttr (settings, 'g2Color3', hex_to_color, Color (255, 0, 0))
addAttr (settings, 'g2RegLine', boolean, False)
addAttr (settings, 'g2RegLineColor', hex_to_color, Color (0, 0, 0))
addAttr (settings, 'g2RegLineWidth', float, 1.0)
def setColors (self, color1, color2):
raise NotImplementedError ()
def setProperties (self):
self.xaxis = True
self.yaxis = True
self.y2axis = True
"""def setY2Bounds (self):
return (self.canvas.minY2, self.canvas.maxY2)"""
def addPoint1 (self, x, y, name = None):
self.canvas.drawPoint (name, x, y)
def addPoint2 (self, x, y, name = None):
self.canvas.drawPoint2 (name, x, y)
class BarGraph (UnifiedGraph):
def __init__ (self, **attr):
"""if attr.has_key ('horizontal') and attr['horizontal']:
self.horizontal = True
UnifiedGraph.__init__ (self, HorizontalBarCanvas, **attr)
else:
self.horizontal = False
UnifiedGraph.__init__ (self, BarCanvas, **attr)"""
UnifiedGraph.__init__ (self, None, **attr)
if self.settings.horizontal == True:
self.attachCanvas (HorizontalBarCanvas)
else:
self.attachCanvas (BarCanvas)
#self.addScript ('hs/static/highlight.js')
def formatSettings (self, settings):
UnifiedGraph.formatSettings (self, settings)
addAttr (settings, 'barColor', hex_to_color, Color (210, 10, 10))
addAttr (settings, 'barWidth', float, 1.0)
addAttr (settings, 'barSpacing', float, .1)
addAttr (settings, 'blankSpace', float, .5)
addAttr (settings, 'horizontal', boolean, False)
def jsChangeTooltipPos (self):
if not self.settings.horizontal:
return UnifiedGraph.jsChangeTooltipPos (self)
else:
return """
if (target.getAttribute ('width'))
targetWidth = parseFloat (target.getAttribute ('width'));
else
targetWidth = 0;
v.x += targetWidth"""
def setProperties (self):
if self.settings.horizontal:
self.xaxis = True
self.yaxis = True
self.y2axis = False
else:
self.xaxis = True
self.yaxis = True
self.y2axis = False
def setColors (self, colors):
self.canvas.colors = colors
def addSpace (self):
self.canvas.addSpace ()
def addBar (self, name, data):
self.canvas.addBar (None, name, data)
#if self.horizontal:
# self.ylabels.append (name)
def addGroup (self, name, data):
for key, value in data:
self.canvas.addBar (name, key, value)
self.canvas.addSpace ()
def createXAxisSpace (self):
if self.settings.horizontal:
UnifiedGraph.createXAxisSpace (self)
else:
h = self.settings.xAxisTextHeight
width = []
for child in self.canvas.data:
if child.xml.has_key ('data') and not child.xml['data'] is None :
w = Text.textWidth (child.xml['data'], h)
width.append (w)
if len (width) > 0:
maxWidth = max (width)
else:
maxWidth = 0
delta = self.settings.xAxisSpace + maxWidth
self.canvas.move (0, delta)
self.canvas.changeSize (0, -delta)
def createYAxis (self):
if not self.settings.horizontal:
UnifiedGraph.createYAxis (self)
else:
for child in self.canvas.data:
self.ypositions.append (child.y + (child.height / 2.0))
self.ylabels.append (child.xml['data'])
UnifiedGraph.createYAxis (self)
def createXAxis (self):
ax = Group ()
x = self.canvas.x - self.canvas.height
y = self.canvas.y + self.canvas.height
ax.appendTransform (Rotate (-90, self.canvas.x, y))
if self.settings.horizontal:
UnifiedGraph.createXAxis (self)
else:
textProperties = {'textHeight': self.settings.xAxisTextHeight,
'verticalAnchor': 'middle',
'horizontalAnchor': 'right',
}
xaxis = YAxis (id = 'x-axis',
inf = self.canvas.y + self.canvas.height,
sup = self.canvas.y + self.canvas.height - self.canvas.width,
x = self.canvas.x - self.canvas.height - self.settings.xAxisSpace,
lower = self.xbounds[0],
upper = self.xbounds[1],
textProperties = textProperties)
ticks = []
labels = []
for child in self.canvas.data:
if child.xml.has_key ('name'):
ticks.append (child.x + child.width / 2.0)
labels.append (child.xml['data'])
xaxis.createTicks (ticks)
xaxis.setText (map (str, labels))
xaxis.drawTicks ()
ax.draw (xaxis)
self.dataGroup.drawAt (ax, 0)
def setSVG (self):
attr = UnifiedGraph.setSVG (self)
return attr
class LineChart (UnifiedGraph):
def __init__ (self, **attr):
UnifiedGraph.__init__ (self, LineCanvas, **attr)
def setProperties (self):
self.xaxis = True
self.yaxis = True
self.y2axis = False
def setColors (self, colorDict):
self.canvas.colors.update (colorDict)
def addSeries (self, name, series):
self.canvas.addData (name, *series)
def setSeriesNames (self, seriesNames):
self.xlabels = seriesNames
class PieChart (BaseGraph):
def __init__ (self, **attr):
BaseGraph.__init__ (self, PieCanvas)
self.addScript ('hs/static/pie.js')
def addWedge (self, name, value):
self.canvas.addData (name, float (value))
def finalize (self):
BaseGraph.finalize (self)
self.canvas.finalize ()
| mit | -2,421,170,796,185,139,000 | 33.507092 | 108 | 0.564382 | false |
lianqiw/maos | scripts/interface.py | 1 | 12202 | #!/Usr/bin/env python
#Use ctypes to interface with C libraries
#POINTER is the class type of pointer
#pointer() acts on actual array, while POINTER() works on class type.
#pointer(cell(arr)) #creates cell Structure for np.array type arr and makes a pointer
#pcell=POINTER(cell) ; pcell() #Creates a class for cell Structure and then makes an object pointer with no content.
#c_int is a ctypes type
#a=c_int(42) #creates a ctypes int object
#p=pointer(a) creates C compatible pointers to object a
#p.contents retreates the contents of pointer p
#addressof(p) retreates the address of p
#use pd=cast(p, POINTER(c_double)) to convert pointer p to c_double pointer
#use pd=cast(address, POINTER(c_double)) to convert address in c_double pointer
#cast(addressof(a), POINTER(c_double)).contents #reinterpreted int a as double
#pointer() creates a real pointer to a ctype object (Sturecture)
#byref() is a simplified version of pointer(). It cannot be used as byref(byref())
#can use byref(pointer())
#Set restype to return correct value
#Set argtypes for type checking for input into C code
#be careful regarding memory management.
#TODO: investigate whether CFFI is a better solution.
import os
import sys
from pdb import set_trace as keyboard
from ctypes import *
import json
import numpy as np
import scipy.sparse as sp
from warnings import warn
aolib_so=os.environ.get('MAOS_AOLIB', 'aolib.so')
try:
lib=cdll.LoadLibrary(aolib_so)
except:
raise Exception('aolib.so is not found at '+aolib_so)
id2ctype={
#obtain type information from MAOS id.
#The value is (type, is complex, kind(0:dense, 1: sparse, 2: loc, 10: cell))
25600: (c_double,1,1), #M_CSP64
25601: (c_double,0,1), #M_SP64
25602: (c_double,0,0), #'M_DBL'
25603: (c_long, 0,0), #'M_INT64'
25604: (c_double,1,0), #'M_CMP'
25605: (c_int, 0,0), #'M_INT32',),
25606: (c_float, 1,1), #'M_CSP32',),
25607: (c_float, 0,1), #'M_SP32',),
25608: (c_float, 0,0), #'M_FLT',),
25609: (c_float, 1,0), #'M_ZMP',),
25610: (c_char, 0,0), #'M_INT8',),
25611: (c_short, 0,0), # 'M_INT16',),
25633: (c_void_p,0,10),#MC_ANY
222210: (c_double,0,2),#M_LOC64
}
#convert C array pointer to numpy array. Freeing C memory
def pt2py(pointer):
if bool(pointer):
out=pointer.contents.as_array()
pointer.contents.free()
return out
else:
return None
#convert C vector to numpy array. Memory is copied.
def as_array(arr, id, shape):
''' convert C array arr to numpy based in id'''
(tt, iscomplex, issparse)=id2ctype.get(id)
if tt is None or not bool(arr) or shape[0]==0:
return np.empty((0,))
else:
parr=cast(arr, POINTER(tt))
if iscomplex:
nparr=np.ctypeslib.as_array(parr, shape=(*shape,2))
nparr2=nparr[...,0]+1j*nparr[...,1]
else:
nparr=np.ctypeslib.as_array(parr, shape=shape)
nparr2=np.copy(nparr)
return nparr2
#convert numpy array to any C array adaptively
def py2cell(arr):
if type(arr) is list:
arr=np.asarray(arr)
if sp.isspmatrix_csc(arr):
return csc(arr)
else:
return cell(arr)
#convert numpy array to any C array pointer adaptively
def py2cellref(arr):
if type(arr) is list:
arr = np.asarray(arr)
if type(arr) is np.ndarray:
if arr.size==0:
return None #turn empty ndarray to Null pointer. do not use 0
elif sp.isspmatrix_csc(arr):
return byref(csc(arr))
else:
return byref(cell(arr))
else:
return byref(arr)
class cell(Structure):
_fields_ = [ #fields compatible with C type
('id', c_uint32),
('p', c_void_p),
('nx', c_long),
('ny', c_long),
('header', c_char_p),
('mmap', c_void_p),
('nref', c_void_p),
('fft', c_void_p),
]
def __init__(self, arr=None):#convert from numpy to C. Memory is borrowed
dtype2id={#Conversion from numpy type to maos id
np.double:25602,
np.complex128:25604,
np.int64: 25603,
np.object_:25633,
}
if type(arr) is list:
arr=np.asarray(arr)
if arr is not None:
if arr.strides[-1]!=arr.itemsize:
raise(Exception('Non standard indexing is not supported. Please make a copy.'))
self.id=dtype2id.get(arr.dtype.type)
if self.id is None:
print("init: Unknown data" +str( arr.dtype.type))
return None
if arr.ndim>2:
print("init: Only use 2 dimensions\n");
if arr.ndim>0:
self.nx=arr.shape[-1]
if arr.ndim>1:
self.ny=arr.shape[-2]
else:
if self.nx>0:
self.ny=1
else:
self.ny=0
if self.nx==0:
self.p=0
elif arr.dtype.kind != 'O':
self.p=arr.ctypes.data_as(c_void_p)
else:
self.qarr=np.zeros(self.shape(1), dtype=object)
self.parr=np.zeros(self.shape(1), dtype=c_void_p) #store pointers
for iy in range(self.ny):
for ix in range(self.nx):
if arr.ndim==1:
arri=arr[ix]
else:
arri=arr[iy,ix]
if arri is not None:
self.qarr[iy,ix]=py2cell(arri) #keep reference
self.parr[iy,ix]=addressof(self.qarr[iy,ix]) #pointer
else:
self.parr[iy,ix]=0
self.p=self.parr.ctypes.data_as(c_void_p)
else:
self.id=25633
self.p=None
self.nx=0
self.ny=0
self.header=None
self.mmap=None
self.nref=None
self.fft=None
def shape(self, twod):
if self.ny > 1 or twod:
return (self.ny, self.nx)
else:
return (self.nx,) #last , is necessary
def as_array(self): #convert form C to numpy. Memory is copied
try:
(tt, iscomplex, kind)=id2ctype.get(self.id)
except:
kind=-1
if kind==0: #dense matrix
if self.header:
print(self.header)
return as_array(self.p, self.id, self.shape(0))
elif kind==1: #sparse matrix
return cast(addressof(self), POINTER(csc)).contents.as_array()
elif kind==2: #loc
return cast(addressof(self), POINTER(loc)).contents.as_array()
elif kind==10: #cell
res=np.empty(self.shape(1), dtype=object)
parr=cast(self.p, POINTER(c_void_p))
for iy in range(self.ny):
for ix in range(self.nx):
address=parr[ix+self.nx*iy]
if address is not None:
pp=cast(int(address), POINTER(cell))
res[iy, ix]=pp.contents.as_array() #recursive
else:
res[iy, ix]=np.empty(())
if self.ny==1:
res=res[0,]
return res
else:
print('as_array: Unknown data, id='+ str(self.id))
return np.empty((),dtype=object)
def free(self):
lib.cellfree_do(byref(self)) #will fail if memory is not allocated by C
class loc(Structure):
_fields_ = [
('id', c_uint32),
('locx', c_void_p),
('locy', c_void_p),
('nloc', c_long),
('dx', c_double),
('dy', c_double),
('ht', c_double),
('iac', c_double),
('locstat_t', c_void_p),
('map', c_void_p),
('npad', c_int),
('nref', c_void_p),
]
def __init__(self, arr=None): #convert from numpy to C. Memory is borrowed
self.id= 222210 #0x036402 #M_LOC64
if arr is not None:
if len(arr.shape)!=2 or arr.shape[0] !=2 :
raise(Exception('Array has to of shape 2xn'))
else:
self.nloc=arr.shape[1]
self.locx=arr[0,].ctypes.data_as(c_void_p)
self.locy=arr[1,].ctypes.data_as(c_void_p)
dlocx=arr[0,1:]-arr[0,0:-1]
self.dx=min(dlocx[dlocx>0]);
dlocy=arr[1,1:]-arr[1,0:-1]
self.dy=min(dlocy[dlocy>0]);
#print('loc: dx={0}, dy={1}'.format(self.dx, self.dy))
else:
self.nloc=0
self.locx=None
self.locy=None
self.dx=0
self.dy=0
self.ht=0
self.iac=0
self.locstat_t=0
self.map=0
self.npad=0
self.nref=0
def as_array(self): #convert form C to numpy. Memory is copied
if(self.locx):
if self.id!=222210:
raise(Exception('Wrong type'))
else:
arr=np.empty((2, self.nloc))
arr[0,]=as_array(self.locx, 25602, shape=(self.nloc,))
arr[1,]=as_array(self.locy, 25602, shape=(self.nloc,))
return arr
def free(self):
lib.cellfree_do(byref(self))
class csc(Structure):#CSC sparse matrix
_fields_=[
('id', c_uint32),
('x', c_void_p),
('nx', c_long),
('ny', c_long),
('header', c_char_p),
('nzmax', c_long),
('p', c_void_p),
('i', c_void_p),
('nref', c_void_p),
]
def __init__(self, arr=None): #convert from numpy to C. Memory is borrowed
dtype2id={#Conversion from sparse type to maos id
np.float32: 25607,
np.float64: 25601,
np.complex64: 25606,
np.complex128:25600,
}
if arr is not None and sp.isspmatrix_csc(arr):
self.id=dtype2id.get(arr.dtype.type)
#save subarrays
self.xp=arr.data
self.ip=arr.indices.astype(np.long)
self.pp=arr.indptr.astype(np.long) #p
self.x=self.xp.ctypes.data_as(c_void_p) #data
self.i=self.ip.ctypes.data_as(c_void_p) #row index
self.p=self.pp.ctypes.data_as(c_void_p)
self.nx, self.ny=arr.shape #Fortran order
self.nzmax=self.pp[-1]
else:
self.id=dtype2id.get(np.float64)
self.x=None
self.i=None
self.p=None
self.nx=0
self.ny=0
self.nzmax=0
self.header=None
self.nref=None
def as_array(self): #convert form C to numpy. Memory is copied
if self.nzmax>0:
self.xp=as_array(self.x, self.id, (self.nzmax,))
self.ip=as_array(self.i, 25603, (self.nzmax,))
self.pp=as_array(self.p, 25603, (self.ny+1,))
return sp.csc_matrix((self.xp, self.ip, self.pp), shape=(self.nx, self.ny))
else:
return sp.csc_matrix((self.nx,self.ny))
def free(self):
lib.cellfree_do(byref(self))
def convert_fields(fields):
val2type={
'*':c_void_p,
'double':c_double,
'long':c_long,
'int':c_int,
}
newfields=[]
for key,val in fields.items():
if val[-1]=='*':
val=c_void_p
else:
val=val2type[val]
newfields.append((key,val))
return newfields
#Create a ctypes class with field listed
def make_class(name, fields):
newfields=convert_fields(fields)
class newclass(Structure):
pass
def as_array(self):#convert struct into dictionary
out=dict()
for ff in self._fields_:
#convert C pointers to POINTER then to array
if ff[1] is c_void_p:
exec('out[\''+ff[0]+'\']=cast(self.'+ff[0]+',POINTER(cell)).contents.as_array()')
else:
exec('out[\''+ff[0]+'\']=self.'+ff[0])
return out
def free(self):
print('to implement: free');
newclass._fields_=newfields
return newclass
| gpl-3.0 | -7,094,381,525,359,293,000 | 33.763533 | 116 | 0.5277 | false |
Chilledheart/gyp | test/compiler-override/gyptest-compiler-env.py | 14 | 3332 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that the user can override the compiler and linker using CC/CXX/LD
environment variables.
"""
import TestGyp
import os
import copy
import sys
here = os.path.dirname(os.path.abspath(__file__))
if sys.platform == 'win32':
# cross compiling not supported by ninja on windows
# and make not supported on windows at all.
sys.exit(0)
# Clear any existing compiler related env vars.
for key in ['CC', 'CXX', 'LINK', 'CC_host', 'CXX_host', 'LINK_host']:
if key in os.environ:
del os.environ[key]
def CheckCompiler(test, gypfile, check_for, run_gyp):
if run_gyp:
test.run_gyp(gypfile)
test.build(gypfile)
test.must_contain_all_lines(test.stdout(), check_for)
test = TestGyp.TestGyp(formats=['ninja', 'make'])
def TestTargetOveride():
expected = ['my_cc.py', 'my_cxx.py', 'FOO' ]
if test.format != 'ninja': # ninja just uses $CC / $CXX as linker.
expected.append('FOO_LINK')
# Check that CC, CXX and LD set target compiler
oldenv = os.environ.copy()
try:
os.environ['CC'] = 'python %s/my_cc.py FOO' % here
os.environ['CXX'] = 'python %s/my_cxx.py FOO' % here
os.environ['LINK'] = 'python %s/my_ld.py FOO_LINK' % here
CheckCompiler(test, 'compiler-exe.gyp', expected, True)
finally:
os.environ.clear()
os.environ.update(oldenv)
# Run the same tests once the eviron has been restored. The
# generated should have embedded all the settings in the
# project files so the results should be the same.
CheckCompiler(test, 'compiler-exe.gyp', expected, False)
def TestTargetOverideCompilerOnly():
# Same test again but with that CC, CXX and not LD
oldenv = os.environ.copy()
try:
os.environ['CC'] = 'python %s/my_cc.py FOO' % here
os.environ['CXX'] = 'python %s/my_cxx.py FOO' % here
CheckCompiler(test, 'compiler-exe.gyp',
['my_cc.py', 'my_cxx.py', 'FOO'],
True)
finally:
os.environ.clear()
os.environ.update(oldenv)
# Run the same tests once the eviron has been restored. The
# generated should have embedded all the settings in the
# project files so the results should be the same.
CheckCompiler(test, 'compiler-exe.gyp',
['my_cc.py', 'my_cxx.py', 'FOO'],
False)
def TestHostOveride():
expected = ['my_cc.py', 'my_cxx.py', 'HOST' ]
if test.format != 'ninja': # ninja just uses $CC / $CXX as linker.
expected.append('HOST_LINK')
# Check that CC_host sets host compilee
oldenv = os.environ.copy()
try:
os.environ['CC_host'] = 'python %s/my_cc.py HOST' % here
os.environ['CXX_host'] = 'python %s/my_cxx.py HOST' % here
os.environ['LINK_host'] = 'python %s/my_ld.py HOST_LINK' % here
CheckCompiler(test, 'compiler-host.gyp', expected, True)
finally:
os.environ.clear()
os.environ.update(oldenv)
# Run the same tests once the eviron has been restored. The
# generated should have embedded all the settings in the
# project files so the results should be the same.
CheckCompiler(test, 'compiler-host.gyp', expected, False)
TestTargetOveride()
TestTargetOverideCompilerOnly()
TestHostOveride()
test.pass_test()
| bsd-3-clause | 6,040,763,236,472,243,000 | 29.568807 | 75 | 0.667167 | false |
pfnet/chainercv | chainercv/links/model/faster_rcnn/region_proposal_network.py | 3 | 6706 | import numpy as np
import chainer
from chainer.backends import cuda
import chainer.functions as F
import chainer.links as L
from chainercv.links.model.faster_rcnn.utils.generate_anchor_base import \
generate_anchor_base
from chainercv.links.model.faster_rcnn.utils.proposal_creator import \
ProposalCreator
class RegionProposalNetwork(chainer.Chain):
"""Region Proposal Network introduced in Faster R-CNN.
This is Region Proposal Network introduced in Faster R-CNN [#]_.
This takes features extracted from images and propose
class agnostic bounding boxes around "objects".
.. [#] Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun. \
Faster R-CNN: Towards Real-Time Object Detection with \
Region Proposal Networks. NIPS 2015.
Args:
in_channels (int): The channel size of input.
mid_channels (int): The channel size of the intermediate tensor.
ratios (list of floats): This is ratios of width to height of
the anchors.
anchor_scales (list of numbers): This is areas of anchors.
Those areas will be the product of the square of an element in
:obj:`anchor_scales` and the original area of the reference
window.
feat_stride (int): Stride size after extracting features from an
image.
initialW (callable): Initial weight value. If :obj:`None` then this
function uses Gaussian distribution scaled by 0.1 to
initialize weight.
May also be a callable that takes an array and edits its values.
proposal_creator_params (dict): Key valued paramters for
:class:`~chainercv.links.model.faster_rcnn.ProposalCreator`.
.. seealso::
:class:`~chainercv.links.model.faster_rcnn.ProposalCreator`
"""
def __init__(
self, in_channels=512, mid_channels=512, ratios=[0.5, 1, 2],
anchor_scales=[8, 16, 32], feat_stride=16,
initialW=None,
proposal_creator_params={},
):
self.anchor_base = generate_anchor_base(
anchor_scales=anchor_scales, ratios=ratios)
self.feat_stride = feat_stride
self.proposal_layer = ProposalCreator(**proposal_creator_params)
n_anchor = self.anchor_base.shape[0]
super(RegionProposalNetwork, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_channels, mid_channels, 3, 1, 1, initialW=initialW)
self.score = L.Convolution2D(
mid_channels, n_anchor * 2, 1, 1, 0, initialW=initialW)
self.loc = L.Convolution2D(
mid_channels, n_anchor * 4, 1, 1, 0, initialW=initialW)
def forward(self, x, img_size, scales=None):
"""Forward Region Proposal Network.
Here are notations.
* :math:`N` is batch size.
* :math:`C` channel size of the input.
* :math:`H` and :math:`W` are height and witdh of the input feature.
* :math:`A` is number of anchors assigned to each pixel.
Args:
x (~chainer.Variable): The Features extracted from images.
Its shape is :math:`(N, C, H, W)`.
img_size (tuple of ints): A tuple :obj:`height, width`,
which contains image size after scaling.
scales (tuple of floats): The amount of scaling done to each input
image during preprocessing.
Returns:
(~chainer.Variable, ~chainer.Variable, array, array, array):
This is a tuple of five following values.
* **rpn_locs**: Predicted bounding box offsets and scales for \
anchors. Its shape is :math:`(N, H W A, 4)`.
* **rpn_scores**: Predicted foreground scores for \
anchors. Its shape is :math:`(N, H W A, 2)`.
* **rois**: A bounding box array containing coordinates of \
proposal boxes. This is a concatenation of bounding box \
arrays from multiple images in the batch. \
Its shape is :math:`(R', 4)`. Given :math:`R_i` predicted \
bounding boxes from the :math:`i` th image, \
:math:`R' = \\sum _{i=1} ^ N R_i`.
* **roi_indices**: An array containing indices of images to \
which RoIs correspond to. Its shape is :math:`(R',)`.
* **anchor**: Coordinates of enumerated shifted anchors. \
Its shape is :math:`(H W A, 4)`.
"""
n, _, hh, ww = x.shape
if scales is None:
scales = [1.0] * n
if not isinstance(scales, chainer.utils.collections_abc.Iterable):
scales = [scales] * n
anchor = _enumerate_shifted_anchor(
self.xp.array(self.anchor_base), self.feat_stride, hh, ww)
n_anchor = anchor.shape[0] // (hh * ww)
h = F.relu(self.conv1(x))
rpn_locs = self.loc(h)
rpn_locs = rpn_locs.transpose((0, 2, 3, 1)).reshape((n, -1, 4))
rpn_scores = self.score(h)
rpn_scores = rpn_scores.transpose((0, 2, 3, 1))
rpn_fg_scores =\
rpn_scores.reshape((n, hh, ww, n_anchor, 2))[:, :, :, :, 1]
rpn_fg_scores = rpn_fg_scores.reshape((n, -1))
rpn_scores = rpn_scores.reshape((n, -1, 2))
rois = []
roi_indices = []
for i in range(n):
roi = self.proposal_layer(
rpn_locs[i].array, rpn_fg_scores[i].array, anchor, img_size,
scale=scales[i])
batch_index = i * self.xp.ones((len(roi),), dtype=np.int32)
rois.append(roi)
roi_indices.append(batch_index)
rois = self.xp.concatenate(rois, axis=0)
roi_indices = self.xp.concatenate(roi_indices, axis=0)
return rpn_locs, rpn_scores, rois, roi_indices, anchor
def _enumerate_shifted_anchor(anchor_base, feat_stride, height, width):
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
xp = cuda.get_array_module(anchor_base)
shift_y = xp.arange(0, height * feat_stride, feat_stride)
shift_x = xp.arange(0, width * feat_stride, feat_stride)
shift_x, shift_y = xp.meshgrid(shift_x, shift_y)
shift = xp.stack((shift_y.ravel(), shift_x.ravel(),
shift_y.ravel(), shift_x.ravel()), axis=1)
A = anchor_base.shape[0]
K = shift.shape[0]
anchor = anchor_base.reshape((1, A, 4)) + \
shift.reshape((1, K, 4)).transpose((1, 0, 2))
anchor = anchor.reshape((K * A, 4)).astype(np.float32)
return anchor
| mit | 2,033,087,664,001,716,500 | 39.642424 | 78 | 0.58977 | false |
agutieda/QuantEcon.py | quantecon/tests/test_matrix_eqn.py | 7 | 1050 | """
tests for quantecon.util
"""
from __future__ import division
from collections import Counter
import unittest
import numpy as np
from numpy.testing import assert_allclose
from nose.plugins.attrib import attr
import pandas as pd
from quantecon import matrix_eqn as qme
def test_solve_discrete_lyapunov_zero():
'Simple test where X is all zeros'
A = np.eye(4) * .95
B = np.zeros((4, 4))
X = qme.solve_discrete_lyapunov(A, B)
assert_allclose(X, np.zeros((4, 4)))
def test_solve_discrete_lyapunov_B():
'Simple test where X is same as B'
A = np.ones((2, 2)) * .5
B = np.array([[.5, -.5], [-.5, .5]])
X = qme.solve_discrete_lyapunov(A, B)
assert_allclose(B, X)
def test_solve_discrete_lyapunov_complex():
'Complex test, A is companion matrix'
A = np.array([[0.5 + 0.3j, 0.1 + 0.1j],
[ 1, 0]])
B = np.eye(2)
X = qme.solve_discrete_lyapunov(A, B)
assert_allclose(np.dot(np.dot(A, X), A.conj().transpose()) - X, -B,
atol=1e-15)
| bsd-3-clause | -6,728,560,543,262,172,000 | 22.863636 | 71 | 0.602857 | false |
vv1133/home_web | django/db/backends/postgresql_psycopg2/creation.py | 60 | 3814 | from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.util import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
output = []
if f.db_index or f.unique:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
if tablespace_sql:
tablespace_sql = ' ' + tablespace_sql
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
if not f.unique:
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
db_type = f.db_type(connection=self.connection)
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
return output
| bsd-3-clause | -2,422,059,111,492,486,700 | 48.532468 | 146 | 0.554536 | false |
tyagiarpit/servo | python/mach/mach/test/providers/conditions.py | 128 | 1324 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
from mach.decorators import (
CommandProvider,
Command,
)
def is_foo(cls):
"""Foo must be true"""
return cls.foo
def is_bar(cls):
"""Bar must be true"""
return cls.bar
@CommandProvider
class ConditionsProvider(object):
foo = True
bar = False
@Command('cmd_foo', category='testing', conditions=[is_foo])
def run_foo(self):
pass
@Command('cmd_bar', category='testing', conditions=[is_bar])
def run_bar(self):
pass
@Command('cmd_foobar', category='testing', conditions=[is_foo, is_bar])
def run_foobar(self):
pass
@CommandProvider
class ConditionsContextProvider(object):
def __init__(self, context):
self.foo = context.foo
self.bar = context.bar
@Command('cmd_foo_ctx', category='testing', conditions=[is_foo])
def run_foo(self):
pass
@Command('cmd_bar_ctx', category='testing', conditions=[is_bar])
def run_bar(self):
pass
@Command('cmd_foobar_ctx', category='testing', conditions=[is_foo, is_bar])
def run_foobar(self):
pass
| mpl-2.0 | -497,926,431,847,197,300 | 23.981132 | 79 | 0.639728 | false |
WimPessemier/uaf | unittests/pyuaftests/client/client_discovery.py | 3 | 2861 | import pyuaf
import unittest
from pyuaf.util.unittesting import parseArgs, TestResults
import thread, time
ARGS = parseArgs()
def suite(args=None):
if args is not None:
global ARGS
ARGS = args
return unittest.TestLoader().loadTestsFromTestCase(ClientDiscoveryTest)
def testParallel(c, results):
try:
c.findServersNow()
except pyuaf.util.errors.DiscoveryError:
pass
except pyuaf.util.errors.InvalidRequestError:
pass
except Exception, e:
print("Unexpected error for this test: %s %s" %(type(e), e))
results.fail()
finally:
results.finish()
def isDemoServerFound(descriptions):
for desc in descriptions:
if desc.applicationUri == ARGS.demo_server_uri:
return True
return False
class ClientDiscoveryTest(unittest.TestCase):
def setUp(self):
self.client = pyuaf.client.Client()
# create a new ClientSettings instance and add the localhost to the URLs to discover
self.settings = pyuaf.client.settings.ClientSettings()
self.settings.discoveryUrls.append(ARGS.demo_url)
self.settings.applicationName = "client"
self.settings.logToStdOutLevel = ARGS.loglevel
def test_client_Client_serversFound_after_setClientSettings(self):
# update the settings of the client, and therefore provoke a new FindServers invocation
self.client.setClientSettings(self.settings)
self.assertTrue( isDemoServerFound(self.client.serversFound()),
"The demo server (uaservercpp) could not be discovered! "
"Make sure that it's running and properly configured!")
def test_client_Client_findServersNow_100_times_in_parallel(self):
testResults = TestResults(100)
for i in xrange(testResults.total()):
thread.start_new_thread(testParallel, (self.client, testResults))
# wait until all tests have finished
t_timeout = time.time() + 5.0
while time.time() < t_timeout and testResults.finished() < testResults.total():
time.sleep(0.1)
self.assertEqual( testResults.failed() , 0 )
def test_client_Client_setClientSettings_without_discoveryUrls(self):
self.assertFalse( isDemoServerFound(self.client.serversFound()) )
def tearDown(self):
# delete the client instances manually (now!) instead of letting them be garbage collected
# automatically (which may happen during a another test, and which may cause logging output
# of the destruction to be mixed with the logging output of the other test).
del self.client
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = ARGS.verbosity).run(suite())
| lgpl-3.0 | 4,160,934,910,780,872,700 | 32.658824 | 99 | 0.655365 | false |
Endika/django | tests/update/models.py | 282 | 1196 | """
Tests for the update() queryset method that allows in-place, multi-object
updates.
"""
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class DataPoint(models.Model):
name = models.CharField(max_length=20)
value = models.CharField(max_length=20)
another_value = models.CharField(max_length=20, blank=True)
def __str__(self):
return six.text_type(self.name)
@python_2_unicode_compatible
class RelatedPoint(models.Model):
name = models.CharField(max_length=20)
data = models.ForeignKey(DataPoint, models.CASCADE)
def __str__(self):
return six.text_type(self.name)
class A(models.Model):
x = models.IntegerField(default=10)
class B(models.Model):
a = models.ForeignKey(A, models.CASCADE)
y = models.IntegerField(default=10)
class C(models.Model):
y = models.IntegerField(default=10)
class D(C):
a = models.ForeignKey(A, models.CASCADE)
class Foo(models.Model):
target = models.CharField(max_length=10, unique=True)
class Bar(models.Model):
foo = models.ForeignKey(Foo, models.CASCADE, to_field='target')
| bsd-3-clause | 3,164,474,899,840,068,000 | 22 | 73 | 0.710702 | false |
ixs/func | func/yaml/load.py | 12 | 10649 | """
pyyaml legacy
Copyright (c) 2001 Steve Howell and Friends; All Rights Reserved
(see open source license information in docs/ directory)
"""
import re, string
from implicit import convertImplicit
from inline import InlineTokenizer
from klass import DefaultResolver
from stream import YamlLoaderException, FileStream, StringStream, NestedDocs
try:
iter(list()) # is iter supported by this version of Python?
except:
# XXX - Python 2.1 does not support iterators
class StopIteration: pass
class iter:
def __init__(self,parser):
self._docs = []
try:
while 1:
self._docs.append(parser.next())
except StopIteration: pass
self._idx = 0
def __len__(self): return len(self._docs)
def __getitem__(self,idx): return self._docs[idx]
def next(self):
if self._idx < len(self._docs):
ret = self._docs[self._idx]
self._idx = self._idx + 1
return ret
raise StopIteration
def loadFile(filename, typeResolver=None):
return loadStream(FileStream(filename),typeResolver)
def load(str, typeResolver=None):
return loadStream(StringStream(str), typeResolver)
def l(str): return load(str).next()
def loadStream(stream, typeResolver):
return iter(Parser(stream, typeResolver))
def tryProductions(productions, value):
for production in productions:
results = production(value)
if results:
(ok, result) = results
if ok:
return (1, result)
def dumpDictionary(): return {}
class Parser:
def __init__(self, stream, typeResolver=None):
try:
self.dictionary = dict
except:
self.dictionary = dumpDictionary
self.nestedDocs = NestedDocs(stream)
self.aliases = {}
if typeResolver:
self.typeResolver = typeResolver
else:
self.typeResolver = DefaultResolver()
def error(self, msg):
self.nestedDocs.error(msg, self.line)
def nestPop(self):
line = self.nestedDocs.pop()
if line is not None:
self.line = line
return 1
def value(self, indicator):
return getToken(indicator+"\s*(.*)", self.line)
def getNextDocument(self): raise "getNextDocument() deprecated--use next()"
def next(self):
line = self.nestedDocs.popDocSep()
indicator = getIndicator(line)
if indicator:
return self.parse_value(indicator)
if line:
self.nestedDocs.nestToNextLine()
return self.parseLines()
raise StopIteration
def __iter__(self): return self
def parseLines(self):
peekLine = self.nestedDocs.peek()
if peekLine:
if re.match("\s*-", peekLine):
return self.parse_collection([], self.parse_seq_line)
else:
return self.parse_collection(self.dictionary(), self.parse_map_line)
raise StopIteration
def parse_collection(self, items, lineParser):
while self.nestPop():
if self.line:
lineParser(items)
return items
def parse_seq_line(self, items):
value = self.value("-")
if value is not None:
items.append(self.parse_seq_value(value))
else:
self.error("missing '-' for seq")
def parse_map_line(self, items):
if (self.line == '?'):
self.parse_map_line_nested(items)
else:
self.parse_map_line_simple(items, self.line)
def parse_map_line_nested(self, items):
self.nestedDocs.nestToNextLine()
key = self.parseLines()
if self.nestPop():
value = self.value(':')
if value is not None:
items[tuple(key)] = self.parse_value(value)
return
self.error("key has no value for nested map")
def parse_map_line_simple(self, items, line):
map_item = self.key_value(line)
if map_item:
(key, value) = map_item
key = convertImplicit(key)
if items.has_key(key):
self.error("Duplicate key "+key)
items[key] = self.parse_value(value)
else:
self.error("bad key for map")
def is_map(self, value):
# XXX - need real tokenizer
if len(value) == 0:
return 0
if value[0] == "'":
return 0
if re.search(':(\s|$)', value):
return 1
def parse_seq_value(self, value):
if self.is_map(value):
return self.parse_compressed_map(value)
else:
return self.parse_value(value)
def parse_compressed_map(self, value):
items = self.dictionary()
line = self.line
token = getToken("(\s*-\s*)", line)
self.nestedDocs.nestBySpecificAmount(len(token))
self.parse_map_line_simple(items, value)
return self.parse_collection(items, self.parse_map_line)
def parse_value(self, value):
(alias, value) = self.testForRepeatOfAlias(value)
if alias:
return value
(alias, value) = self.testForAlias(value)
value = self.parse_unaliased_value(value)
if alias:
self.aliases[alias] = value
return value
def parse_unaliased_value(self, value):
match = re.match(r"(!\S*)(.*)", value)
if match:
(url, value) = match.groups()
value = self.parse_untyped_value(value)
if url[:2] == '!!':
return self.typeResolver.resolveType(value, url)
else:
# XXX - allows syntax, but ignores it
return value
return self.parse_untyped_value(value)
def parseInlineArray(self, value):
if re.match("\s*\[", value):
return self.parseInline([], value, ']',
self.parseInlineArrayItem)
def parseInlineHash(self, value):
if re.match("\s*{", value):
return self.parseInline(self.dictionary(), value, '}',
self.parseInlineHashItem)
def parseInlineArrayItem(self, result, token):
return result.append(convertImplicit(token))
def parseInlineHashItem(self, result, token):
(key, value) = self.key_value(token)
result[key] = value
def parseInline(self, result, value, end_marker, itemMethod):
tokenizer = InlineTokenizer(value)
tokenizer.next()
while 1:
token = tokenizer.next()
if token == end_marker:
break
itemMethod(result, token)
return (1, result)
def parseSpecial(self, value):
productions = [
self.parseMultiLineScalar,
self.parseInlineHash,
self.parseInlineArray,
]
return tryProductions(productions, value)
def parse_untyped_value(self, value):
parse = self.parseSpecial(value)
if parse:
(ok, data) = parse
return data
token = getToken("(\S.*)", value)
if token:
lines = [token] + \
pruneTrailingEmpties(self.nestedDocs.popNestedLines())
return convertImplicit(joinLines(lines))
else:
self.nestedDocs.nestToNextLine()
return self.parseLines()
def parseNative(self, value):
return (1, convertImplicit(value))
def parseMultiLineScalar(self, value):
if value == '>':
return (1, self.parseFolded())
elif value == '|':
return (1, joinLiteral(self.parseBlock()))
elif value == '|+':
return (1, joinLiteral(self.unprunedBlock()))
def parseFolded(self):
data = self.parseBlock()
i = 0
resultString = ''
while i < len(data)-1:
resultString = resultString + data[i]
resultString = resultString + foldChar(data[i], data[i+1])
i = i + 1
return resultString + data[-1] + "\n"
def unprunedBlock(self):
self.nestedDocs.nestToNextLine()
data = []
while self.nestPop():
data.append(self.line)
return data
def parseBlock(self):
return pruneTrailingEmpties(self.unprunedBlock())
def testForAlias(self, value):
match = re.match("&(\S*)\s*(.*)", value)
if match:
return match.groups()
return (None, value)
def testForRepeatOfAlias(self, value):
match = re.match("\*(\S+)", value)
if match:
alias = match.groups()[0]
if self.aliases.has_key(alias):
return (alias, self.aliases[alias])
else:
self.error("Unknown alias")
return (None, value)
def key_value(self, str):
if str[-1] == ' ':
self.error("Trailing spaces not allowed without quotes.")
# XXX This allows mis-balanced " vs. ' stuff
match = re.match("[\"'](.+)[\"']\s*:\s*(.*)", str)
if match:
(key, value) = match.groups()
return (key, value)
match = re.match("(.+?)\s*:\s*(.*)", str)
if match:
(key, value) = match.groups()
if len(value) and value[0] == '#':
value = ''
return (key, value)
def getToken(regex, value):
match = re.search(regex, value)
if match:
return match.groups()[0]
def pruneTrailingEmpties(data):
while len(data) > 0 and data[-1] == '':
data = data[:-1]
return data
def foldChar(line1, line2):
if re.match("^\S", line1) and re.match("^\S", line2):
return " "
return "\n"
def getIndicator(line):
if line:
header = r"(#YAML:\d+\.\d+\s*){0,1}"
match = re.match("--- "+header+"(\S*.*)", line)
if match:
return match.groups()[-1]
def joinLines(lines):
result = ''
for line in lines[:-1]:
if line[-1] == '\\':
result = result + line[:-1]
else:
result = result + line + " "
return result + lines[-1]
def joinLiteral(data):
return string.join(data,"\n") + "\n"
| gpl-2.0 | 1,194,643,404,956,306,200 | 29.978979 | 84 | 0.533947 | false |
Itxaka/st2 | st2actions/st2actions/runners/python_action_wrapper.py | 6 | 4417 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import argparse
from st2common import log as logging
from st2actions import config
from st2actions.runners.pythonrunner import Action
from st2common.util import loader as action_loader
from st2common.util.config_parser import ContentPackConfigParser
from st2common.constants.action import ACTION_OUTPUT_RESULT_DELIMITER
__all__ = [
'PythonActionWrapper'
]
LOG = logging.getLogger(__name__)
class PythonActionWrapper(object):
def __init__(self, pack, file_path, parameters=None, parent_args=None):
"""
:param pack: Name of the pack this action belongs to.
:type pack: ``str``
:param file_path: Path to the action module.
:type file_path: ``str``
:param parameters: action parameters.
:type parameters: ``dict`` or ``None``
:param parent_args: Command line arguments passed to the parent process.
:type parse_args: ``list``
"""
self._pack = pack
self._file_path = file_path
self._parameters = parameters or {}
self._parent_args = parent_args or []
try:
config.parse_args(args=self._parent_args)
except Exception:
pass
def run(self):
action = self._get_action_instance()
output = action.run(**self._parameters)
# Print output to stdout so the parent can capture it
sys.stdout.write(ACTION_OUTPUT_RESULT_DELIMITER)
print_output = None
try:
print_output = json.dumps(output)
except:
print_output = str(output)
sys.stdout.write(print_output + '\n')
sys.stdout.write(ACTION_OUTPUT_RESULT_DELIMITER)
def _get_action_instance(self):
actions_cls = action_loader.register_plugin(Action, self._file_path)
action_cls = actions_cls[0] if actions_cls and len(actions_cls) > 0 else None
if not action_cls:
raise Exception('File "%s" has no action or the file doesn\'t exist.' %
(self._file_path))
config_parser = ContentPackConfigParser(pack_name=self._pack)
config = config_parser.get_action_config(action_file_path=self._file_path)
if config:
LOG.info('Using config "%s" for action "%s"' % (config.file_path,
self._file_path))
return action_cls(config=config.config)
else:
LOG.info('No config found for action "%s"' % (self._file_path))
return action_cls(config={})
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Python action runner process wrapper')
parser.add_argument('--pack', required=True,
help='Name of the pack this action belongs to')
parser.add_argument('--file-path', required=True,
help='Path to the action module')
parser.add_argument('--parameters', required=False,
help='Serialized action parameters')
parser.add_argument('--parent-args', required=False,
help='Command line arguments passed to the parent process')
args = parser.parse_args()
parameters = args.parameters
parameters = json.loads(parameters) if parameters else {}
parent_args = json.loads(args.parent_args) if args.parent_args else []
assert isinstance(parent_args, list)
obj = PythonActionWrapper(pack=args.pack,
file_path=args.file_path,
parameters=parameters,
parent_args=parent_args)
obj.run()
| apache-2.0 | -7,786,053,132,188,463,000 | 37.077586 | 88 | 0.635726 | false |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Amazon/IAM/ListVirtualMFADevices.py | 5 | 4701 | # -*- coding: utf-8 -*-
###############################################################################
#
# ListVirtualMFADevices
# Lists the virtual MFA devices under the AWS account.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListVirtualMFADevices(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListVirtualMFADevices Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListVirtualMFADevices, self).__init__(temboo_session, '/Library/Amazon/IAM/ListVirtualMFADevices')
def new_input_set(self):
return ListVirtualMFADevicesInputSet()
def _make_result_set(self, result, path):
return ListVirtualMFADevicesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListVirtualMFADevicesChoreographyExecution(session, exec_id, path)
class ListVirtualMFADevicesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListVirtualMFADevices
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(ListVirtualMFADevicesInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(ListVirtualMFADevicesInputSet, self)._set_input('AWSSecretKeyId', value)
def set_AssignmentStatus(self, value):
"""
Set the value of the AssignmentStatus input for this Choreo. ((optional, string) Filters by the whether the device is assigned or unassigned to a specific user. Valid values: "Unassigned", "Assigned" or "Any" (default - both assigned and unassigned devices).)
"""
super(ListVirtualMFADevicesInputSet, self)._set_input('AssignmentStatus', value)
def set_Marker(self, value):
"""
Set the value of the Marker input for this Choreo. ((optional, string) Used for pagination to indicate the starting point of the results to return.)
"""
super(ListVirtualMFADevicesInputSet, self)._set_input('Marker', value)
def set_MaxItems(self, value):
"""
Set the value of the MaxItems input for this Choreo. ((optional, integer) Used for pagination to limit the number of results returned. Defaults to 100.)
"""
super(ListVirtualMFADevicesInputSet, self)._set_input('MaxItems', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
super(ListVirtualMFADevicesInputSet, self)._set_input('ResponseFormat', value)
class ListVirtualMFADevicesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListVirtualMFADevices Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class ListVirtualMFADevicesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListVirtualMFADevicesResultSet(response, path)
| apache-2.0 | -2,138,961,115,561,622,000 | 42.934579 | 267 | 0.689215 | false |
wbond/subversion | contrib/hook-scripts/case-insensitive.py | 3 | 4628 | #!/usr/bin/env python
# Licensed under the same terms as Subversion.
# A pre-commit hook to detect case-insensitive filename clashes.
#
# What this script does:
# - Detects new paths that 'clash' with existing, or other new, paths.
# - Ignores existings paths that already 'clash'
# - Exits with an error code, and a diagnostic on stderr, if 'clashes'
# are detected.
#
# How it does it:
# - Get a list of changed paths.
# - From that list extract the new paths that represent adds or replaces.
# - For each new path:
# - Split the path into a directory and a name.
# - Get the names of all the entries in the version of the directory
# within the txn.
# - Compare the canonical new name with each canonical entry name.
# - If the canonical names match and the pristine names do not match
# then we have a 'clash'.
#
# Notes:
# - All the paths from the Subversion filesystem bindings are encoded
# in UTF-8 and the separator is '/' on all OS's.
# - The canonical form determines what constitutes a 'clash', at present
# a simple 'lower case' is used. That's probably not identical to the
# behaviour of Windows or OSX, but it might be good enough.
# - Hooks get invoked with an empty environment so this script explicitly
# sets a locale; make sure it is a sensible value.
# - If used with Apache the 'clash' diagnostic must be ASCII irrespective
# of the locale, see the 'Force' comment near the end of the script for
# one way to achieve this.
#
# How to call it:
#
# On a Unix system put this script in the hooks directory and add this to
# the pre-commit script:
#
# $REPOS/hooks/case-insensitive.py "$REPOS" "$TXN" || exit 1
#
# On a windows machine add this to pre-commit.bat:
#
# python <path-to-script>\case-insensitive.py %1 %2
# if errorlevel 1 goto :ERROR
# exit 0
# :ERROR
# echo Error found in commit 1>&2
# exit 1
#
# Make sure the python bindings are installed and working on Windows. The
# zip file can be downloaded from the Subversion site. The bindings depend
# on dll's shipped as part of the Subversion binaries, if the script cannot
# load the _fs dll it is because it cannot find the other Subversion dll's.
#
# $HeadURL$
# $LastChangedRevision$
# $LastChangedDate$
# $LastChangedBy$
import sys, locale
sys.path.append('/usr/local/subversion/lib/svn-python')
from svn import repos, fs
locale.setlocale(locale.LC_ALL, 'en_GB')
def canonicalize(path):
return path.decode('utf-8').lower().encode('utf-8')
def get_new_paths(txn_root):
new_paths = []
for path, change in fs.paths_changed(txn_root).iteritems():
if (change.change_kind == fs.path_change_add
or change.change_kind == fs.path_change_replace):
new_paths.append(path)
return new_paths
def split_path(path):
slash = path.rindex('/')
if (slash == 0):
return '/', path[1:]
return path[:slash], path[slash+1:]
def join_path(dir, name):
if (dir == '/'):
return '/' + name
return dir + '/' + name
def ensure_names(path, names, txn_root):
if (not names.has_key(path)):
names[path] = []
for name, dirent in fs.dir_entries(txn_root, path).iteritems():
names[path].append([canonicalize(name), name])
names = {} # map of: key - path, value - list of two element lists of names
clashes = {} # map of: key - path, value - map of: key - path, value - dummy
native = locale.getlocale()[1]
if not native: native = 'ascii'
repos_handle = repos.open(sys.argv[1].decode(native).encode('utf-8'))
fs_handle = repos.fs(repos_handle)
txn_handle = fs.open_txn(fs_handle, sys.argv[2].decode(native).encode('utf-8'))
txn_root = fs.txn_root(txn_handle)
new_paths = get_new_paths(txn_root)
for path in new_paths:
dir, name = split_path(path)
canonical = canonicalize(name)
ensure_names(dir, names, txn_root)
for name_pair in names[dir]:
if (name_pair[0] == canonical and name_pair[1] != name):
canonical_path = join_path(dir, canonical)
if (not clashes.has_key(canonical_path)):
clashes[canonical_path] = {}
clashes[canonical_path][join_path(dir, name)] = True
clashes[canonical_path][join_path(dir, name_pair[1])] = True
if (clashes):
# native = 'ascii' # Force ASCII output for Apache
for canonical_path in clashes.iterkeys():
sys.stderr.write(u'Clash:'.encode(native))
for path in clashes[canonical_path].iterkeys():
sys.stderr.write(u' \''.encode(native) +
str(path).decode('utf-8').encode(native, 'replace') +
u'\''.encode(native))
sys.stderr.write(u'\n'.encode(native))
sys.exit(1)
| apache-2.0 | -6,640,162,807,570,445,000 | 35.440945 | 79 | 0.671997 | false |
tlakshman26/cinder-bug-fix-volume-conversion-full | cinder/objects/volume.py | 2 | 13218 | # Copyright 2015 SimpliVity Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_versionedobjects import fields
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder import utils
CONF = cfg.CONF
OPTIONAL_FIELDS = ['metadata', 'admin_metadata',
'volume_type', 'volume_attachment']
LOG = logging.getLogger(__name__)
@base.CinderObjectRegistry.register
class Volume(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat, base.CinderComparableObject):
# Version 1.0: Initial version
# Version 1.1: Added metadata, admin_metadata, volume_attachment, and
# volume_type
VERSION = '1.1'
fields = {
'id': fields.UUIDField(),
'_name_id': fields.UUIDField(nullable=True),
'ec2_id': fields.UUIDField(nullable=True),
'user_id': fields.UUIDField(nullable=True),
'project_id': fields.UUIDField(nullable=True),
'snapshot_id': fields.UUIDField(nullable=True),
'host': fields.StringField(nullable=True),
'size': fields.IntegerField(),
'availability_zone': fields.StringField(),
'status': fields.StringField(),
'attach_status': fields.StringField(),
'migration_status': fields.StringField(nullable=True),
'scheduled_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'provider_id': fields.UUIDField(nullable=True),
'provider_location': fields.StringField(nullable=True),
'provider_auth': fields.StringField(nullable=True),
'provider_geometry': fields.StringField(nullable=True),
'volume_type_id': fields.UUIDField(nullable=True),
'source_volid': fields.UUIDField(nullable=True),
'encryption_key_id': fields.UUIDField(nullable=True),
'consistencygroup_id': fields.UUIDField(nullable=True),
'deleted': fields.BooleanField(default=False),
'bootable': fields.BooleanField(default=False),
'multiattach': fields.BooleanField(default=False),
'replication_status': fields.StringField(nullable=True),
'replication_extended_status': fields.StringField(nullable=True),
'replication_driver_data': fields.StringField(nullable=True),
'previous_status': fields.StringField(nullable=True),
'metadata': fields.DictOfStringsField(nullable=True),
'admin_metadata': fields.DictOfStringsField(nullable=True),
'volume_type': fields.ObjectField('VolumeType', nullable=True),
'volume_attachment': fields.ListOfObjectsField('VolumeAttachment',
nullable=True),
}
# NOTE(thangp): obj_extra_fields is used to hold properties that are not
# usually part of the model
obj_extra_fields = ['name', 'name_id']
@property
def name_id(self):
return self.id if not self._name_id else self._name_id
@name_id.setter
def name_id(self, value):
self._name_id = value
@property
def name(self):
return CONF.volume_name_template % self.name_id
def __init__(self, *args, **kwargs):
super(Volume, self).__init__(*args, **kwargs)
self._orig_metadata = {}
self._orig_admin_metadata = {}
self._reset_metadata_tracking()
def obj_reset_changes(self, fields=None):
super(Volume, self).obj_reset_changes(fields)
self._reset_metadata_tracking(fields=fields)
def _reset_metadata_tracking(self, fields=None):
if fields is None or 'metadata' in fields:
self._orig_metadata = (dict(self.metadata)
if 'metadata' in self else {})
if fields is None or 'admin_metadata' in fields:
self._orig_admin_metadata = (dict(self.admin_metadata)
if 'admin_metadata' in self
else {})
def obj_what_changed(self):
changes = super(Volume, self).obj_what_changed()
if 'metadata' in self and self.metadata != self._orig_metadata:
changes.add('metadata')
if ('admin_metadata' in self and
self.admin_metadata != self._orig_admin_metadata):
changes.add('admin_metadata')
return changes
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version."""
target_version = utils.convert_version_to_tuple(target_version)
@staticmethod
def _from_db_object(context, volume, db_volume, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for name, field in volume.fields.items():
if name in OPTIONAL_FIELDS:
continue
value = db_volume.get(name)
if isinstance(field, fields.IntegerField):
value = value or 0
volume[name] = value
# Get data from db_volume object that was queried by joined query
# from DB
if 'metadata' in expected_attrs:
volume.metadata = {}
metadata = db_volume.get('volume_metadata', [])
if metadata:
volume.metadata = {item['key']: item['value']
for item in metadata}
if 'admin_metadata' in expected_attrs:
volume.admin_metadata = {}
metadata = db_volume.get('volume_admin_metadata', [])
if metadata:
volume.admin_metadata = {item['key']: item['value']
for item in metadata}
if 'volume_type' in expected_attrs:
db_volume_type = db_volume.get('volume_type')
if db_volume_type:
volume.volume_type = objects.VolumeType._from_db_object(
context, objects.VolumeType(), db_volume_type,
expected_attrs='extra_specs')
if 'volume_attachment' in expected_attrs:
attachments = base.obj_make_list(
context, objects.VolumeAttachmentList(context),
objects.VolumeAttachment,
db_volume.get('volume_attachment'))
volume.volume_attachment = attachments.objects
volume._context = context
volume.obj_reset_changes()
return volume
@base.remotable_classmethod
def get_by_id(cls, context, id):
db_volume = db.volume_get(context, id)
expected_attrs = ['admin_metadata', 'metadata']
return cls._from_db_object(context, cls(context), db_volume,
expected_attrs=expected_attrs)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason=_('already created'))
updates = self.cinder_obj_get_changes()
db_volume = db.volume_create(self._context, updates)
self._from_db_object(self._context, self, db_volume)
@base.remotable
def refresh(self):
current = self.__class__.get_by_id(self._context, self.id)
for field in self.fields:
# NOTE(thangp): Only update attributes that are already set. We
# do not want to unexpectedly trigger a lazy-load.
if self.obj_attr_is_set(field):
if self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
@base.remotable
def save(self):
updates = self.cinder_obj_get_changes()
if updates:
if 'metadata' in updates:
# Metadata items that are not specified in the
# self.metadata will be deleted
metadata = updates.pop('metadata', None)
self.metadata = db.volume_metadata_update(self._context,
self.id, metadata,
True)
if self._context.is_admin and 'admin_metadata' in updates:
metadata = updates.pop('admin_metadata', None)
self.admin_metadata = db.volume_admin_metadata_update(
self._context, self.id, metadata, True)
db.volume_update(self._context, self.id, updates)
self.obj_reset_changes()
@base.remotable
def destroy(self):
with self.obj_as_admin():
db.volume_destroy(self._context, self.id)
def obj_load_attr(self, attrname):
if attrname not in OPTIONAL_FIELDS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
if attrname == 'metadata':
self.metadata = db.volume_metadata_get(self._context, self.id)
elif attrname == 'admin_metadata':
with self.obj_as_admin():
self.admin_metadata = db.volume_admin_metadata_get(
self._context, self.id)
elif attrname == 'volume_type':
self.volume_type = objects.VolumeType.get_by_id(
self._context, self.volume_type_id)
elif attrname == 'volume_attachment':
attachments = (
objects.VolumeAttachmentList.get_all_by_volume_id(
self._context, self.id))
self.volume_attachment = attachments.objects
self.obj_reset_changes(fields=[attrname])
def delete_metadata_key(self, key):
db.volume_metadata_delete(self._context, self.id, key)
md_was_changed = 'metadata' in self.obj_what_changed()
del self.metadata[key]
self._orig_metadata.pop(key, None)
if not md_was_changed:
self.obj_reset_changes(['metadata'])
@base.CinderObjectRegistry.register
class VolumeList(base.ObjectListBase, base.CinderObject):
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('Volume'),
}
child_versions = {
'1.0': '1.0',
'1.1': '1.1',
}
@base.remotable_classmethod
def get_all(cls, context, marker, limit, sort_keys=None, sort_dirs=None,
filters=None, offset=None):
volumes = db.volume_get_all(context, marker, limit,
sort_keys=sort_keys, sort_dirs=sort_dirs,
filters=filters, offset=offset)
expected_attrs = ['admin_metadata', 'metadata']
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)
@base.remotable_classmethod
def get_all_by_host(cls, context, host, filters=None):
volumes = db.volume_get_all_by_host(context, host, filters)
expected_attrs = ['admin_metadata', 'metadata']
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)
@base.remotable_classmethod
def get_all_by_group(cls, context, group_id, filters=None):
volumes = db.volume_get_all_by_group(context, group_id, filters)
expected_attrs = ['admin_metadata', 'metadata']
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)
@base.remotable_classmethod
def get_all_by_project(cls, context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None,
offset=None):
volumes = db.volume_get_all_by_project(context, project_id, marker,
limit, sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters, offset=offset)
expected_attrs = ['admin_metadata', 'metadata']
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)
| apache-2.0 | -8,163,795,531,553,758,000 | 40.17757 | 78 | 0.591996 | false |
leighpauls/k2cro4 | third_party/python_26/Lib/site-packages/win32/scripts/regsetup.py | 21 | 19756 | # A tool to setup the Python registry.
class error(Exception):
pass
import sys # at least we can count on this!
def FileExists(fname):
"""Check if a file exists. Returns true or false.
"""
import os
try:
os.stat(fname)
return 1
except os.error, details:
return 0
def IsPackageDir(path, packageName, knownFileName):
"""Given a path, a ni package name, and possibly a known file name in
the root of the package, see if this path is good.
"""
import os
if knownFileName is None:
knownFileName = "."
return FileExists(os.path.join(os.path.join(path, packageName),knownFileName))
def IsDebug():
"""Return "_d" if we're running a debug version.
This is to be used within DLL names when locating them.
"""
import imp
for suffix_item in imp.get_suffixes():
if suffix_item[0]=='_d.pyd':
return '_d'
return ''
def FindPackagePath(packageName, knownFileName, searchPaths):
"""Find a package.
Given a ni style package name, check the package is registered.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
import regutil, os
pathLook = regutil.GetRegisteredNamedPath(packageName)
if pathLook and IsPackageDir(pathLook, packageName, knownFileName):
return pathLook, None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if IsPackageDir(pathLook, packageName, knownFileName):
# Found it
ret = os.path.abspath(pathLook)
return ret, ret
raise error, "The package %s can not be located" % packageName
def FindHelpPath(helpFile, helpDesc, searchPaths):
# See if the current registry entry is OK
import os, win32api, win32con
try:
key = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, "Software\\Microsoft\\Windows\\Help", 0, win32con.KEY_ALL_ACCESS)
try:
try:
path = win32api.RegQueryValueEx(key, helpDesc)[0]
if FileExists(os.path.join(path, helpFile)):
return os.path.abspath(path)
except win32api.error:
pass # no registry entry.
finally:
key.Close()
except win32api.error:
pass
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, helpFile)):
return os.path.abspath(pathLook)
pathLook = os.path.join(pathLook, "Help")
if FileExists(os.path.join( pathLook, helpFile)):
return os.path.abspath(pathLook)
raise error, "The help file %s can not be located" % helpFile
def FindAppPath(appName, knownFileName, searchPaths):
"""Find an application.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
# Look in the first path.
import regutil, string, os
regPath = regutil.GetRegisteredNamedPath(appName)
if regPath:
pathLook = string.split(regPath,";")[0]
if regPath and FileExists(os.path.join(pathLook, knownFileName)):
return None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, knownFileName)):
# Found it
return os.path.abspath(pathLook)
raise error, "The file %s can not be located for application %s" % (knownFileName, appName)
def FindPythonExe(exeAlias, possibleRealNames, searchPaths):
"""Find an exe.
Returns the full path to the .exe, and a boolean indicating if the current
registered entry is OK. We don't trust the already registered version even
if it exists - it may be wrong (ie, for a different Python version)
"""
import win32api, regutil, string, os, sys
if possibleRealNames is None:
possibleRealNames = exeAlias
# Look first in Python's home.
found = os.path.join(sys.prefix, possibleRealNames)
if not FileExists(found): # for developers
found = os.path.join(sys.prefix, "PCBuild", possibleRealNames)
if not FileExists(found):
found = LocateFileName(possibleRealNames, searchPaths)
registered_ok = 0
try:
registered = win32api.RegQueryValue(regutil.GetRootKey(), regutil.GetAppPathsKey() + "\\" + exeAlias)
registered_ok = found==registered
except win32api.error:
pass
return found, registered_ok
def QuotedFileName(fname):
"""Given a filename, return a quoted version if necessary
"""
import regutil, string
try:
string.index(fname, " ") # Other chars forcing quote?
return '"%s"' % fname
except ValueError:
# No space in name.
return fname
def LocateFileName(fileNamesString, searchPaths):
"""Locate a file name, anywhere on the search path.
If the file can not be located, prompt the user to find it for us
(using a common OpenFile dialog)
Raises KeyboardInterrupt if the user cancels.
"""
import regutil, string, os
fileNames = string.split(fileNamesString,";")
for path in searchPaths:
for fileName in fileNames:
try:
retPath = os.path.join(path, fileName)
os.stat(retPath)
break
except os.error:
retPath = None
if retPath:
break
else:
fileName = fileNames[0]
try:
import win32ui, win32con
except ImportError:
raise error, "Need to locate the file %s, but the win32ui module is not available\nPlease run the program again, passing as a parameter the path to this file." % fileName
# Display a common dialog to locate the file.
flags=win32con.OFN_FILEMUSTEXIST
ext = os.path.splitext(fileName)[1]
filter = "Files of requested type (*%s)|*%s||" % (ext,ext)
dlg = win32ui.CreateFileDialog(1,None,fileName,flags,filter,None)
dlg.SetOFNTitle("Locate " + fileName)
if dlg.DoModal() <> win32con.IDOK:
raise KeyboardInterrupt, "User cancelled the process"
retPath = dlg.GetPathName()
return os.path.abspath(retPath)
def LocatePath(fileName, searchPaths):
"""Like LocateFileName, but returns a directory only.
"""
import os
return os.path.abspath(os.path.split(LocateFileName(fileName, searchPaths))[0])
def LocateOptionalPath(fileName, searchPaths):
"""Like LocatePath, but returns None if the user cancels.
"""
try:
return LocatePath(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocateOptionalFileName(fileName, searchPaths = None):
"""Like LocateFileName, but returns None if the user cancels.
"""
try:
return LocateFileName(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocatePythonCore(searchPaths):
"""Locate and validate the core Python directories. Returns a list
of paths that should be used as the core (ie, un-named) portion of
the Python path.
"""
import string, os, regutil
currentPath = regutil.GetRegisteredNamedPath(None)
if currentPath:
presearchPaths = string.split(currentPath, ";")
else:
presearchPaths = [os.path.abspath(".")]
libPath = None
for path in presearchPaths:
if FileExists(os.path.join(path, "os.py")):
libPath = path
break
if libPath is None and searchPaths is not None:
libPath = LocatePath("os.py", searchPaths)
if libPath is None:
raise error, "The core Python library could not be located."
corePath = None
suffix = IsDebug()
for path in presearchPaths:
if FileExists(os.path.join(path, "unicodedata%s.pyd" % suffix)):
corePath = path
break
if corePath is None and searchPaths is not None:
corePath = LocatePath("unicodedata%s.pyd" % suffix, searchPaths)
if corePath is None:
raise error, "The core Python path could not be located."
installPath = os.path.abspath(os.path.join(libPath, ".."))
return installPath, [libPath, corePath]
def FindRegisterPackage(packageName, knownFile, searchPaths, registryAppName = None):
"""Find and Register a package.
Assumes the core registry setup correctly.
In addition, if the location located by the package is already
in the **core** path, then an entry is registered, but no path.
(no other paths are checked, as the application whose path was used
may later be uninstalled. This should not happen with the core)
"""
import regutil, string
if not packageName: raise error, "A package name must be supplied"
corePaths = string.split(regutil.GetRegisteredNamedPath(None),";")
if not searchPaths: searchPaths = corePaths
registryAppName = registryAppName or packageName
try:
pathLook, pathAdd = FindPackagePath(packageName, knownFile, searchPaths)
if pathAdd is not None:
if pathAdd in corePaths:
pathAdd = ""
regutil.RegisterNamedPath(registryAppName, pathAdd)
return pathLook
except error, details:
print "*** The %s package could not be registered - %s" % (packageName, details)
print "*** Please ensure you have passed the correct paths on the command line."
print "*** - For packages, you should pass a path to the packages parent directory,"
print "*** - and not the package directory itself..."
def FindRegisterApp(appName, knownFiles, searchPaths):
"""Find and Register a package.
Assumes the core registry setup correctly.
"""
import regutil, string
if type(knownFiles)==type(''):
knownFiles = [knownFiles]
paths=[]
try:
for knownFile in knownFiles:
pathLook = FindAppPath(appName, knownFile, searchPaths)
if pathLook:
paths.append(pathLook)
except error, details:
print "*** ", details
return
regutil.RegisterNamedPath(appName, string.join(paths,";"))
def FindRegisterPythonExe(exeAlias, searchPaths, actualFileNames = None):
"""Find and Register a Python exe (not necessarily *the* python.exe)
Assumes the core registry setup correctly.
"""
import regutil, string
fname, ok = FindPythonExe(exeAlias, actualFileNames, searchPaths)
if not ok:
regutil.RegisterPythonExe(fname, exeAlias)
return fname
def FindRegisterHelpFile(helpFile, searchPaths, helpDesc = None ):
import regutil
try:
pathLook = FindHelpPath(helpFile, helpDesc, searchPaths)
except error, details:
print "*** ", details
return
# print "%s found at %s" % (helpFile, pathLook)
regutil.RegisterHelpFile(helpFile, pathLook, helpDesc)
def SetupCore(searchPaths):
"""Setup the core Python information in the registry.
This function makes no assumptions about the current state of sys.path.
After this function has completed, you should have access to the standard
Python library, and the standard Win32 extensions
"""
import sys
for path in searchPaths:
sys.path.append(path)
import string, os
import regutil, win32api,win32con
installPath, corePaths = LocatePythonCore(searchPaths)
# Register the core Pythonpath.
print corePaths
regutil.RegisterNamedPath(None, string.join(corePaths,";"))
# Register the install path.
hKey = win32api.RegCreateKey(regutil.GetRootKey() , regutil.BuildDefaultPythonKey())
try:
# Core Paths.
win32api.RegSetValue(hKey, "InstallPath", win32con.REG_SZ, installPath)
finally:
win32api.RegCloseKey(hKey)
# Register the win32 core paths.
win32paths = os.path.abspath( os.path.split(win32api.__file__)[0]) + ";" + \
os.path.abspath( os.path.split(LocateFileName("win32con.py;win32con.pyc", sys.path ) )[0] )
# Python has builtin support for finding a "DLLs" directory, but
# not a PCBuild. Having it in the core paths means it is ignored when
# an EXE not in the Python dir is hosting us - so we add it as a named
# value
check = os.path.join(sys.prefix, "PCBuild")
if os.path.isdir(check):
regutil.RegisterNamedPath("PCBuild",check)
def RegisterShellInfo(searchPaths):
"""Registers key parts of the Python installation with the Windows Shell.
Assumes a valid, minimal Python installation exists
(ie, SetupCore() has been previously successfully run)
"""
import regutil, win32con
suffix = IsDebug()
# Set up a pointer to the .exe's
exePath = FindRegisterPythonExe("Python%s.exe" % suffix, searchPaths)
regutil.SetRegistryDefaultValue(".py", "Python.File", win32con.HKEY_CLASSES_ROOT)
regutil.RegisterShellCommand("Open", QuotedFileName(exePath)+" \"%1\" %*", "&Run")
regutil.SetRegistryDefaultValue("Python.File\\DefaultIcon", "%s,0" % exePath, win32con.HKEY_CLASSES_ROOT)
FindRegisterHelpFile("Python.hlp", searchPaths, "Main Python Documentation")
FindRegisterHelpFile("ActivePython.chm", searchPaths, "Main Python Documentation")
# We consider the win32 core, as it contains all the win32 api type
# stuff we need.
# FindRegisterApp("win32", ["win32con.pyc", "win32api%s.pyd" % suffix], searchPaths)
usage = """\
regsetup.py - Setup/maintain the registry for Python apps.
Run without options, (but possibly search paths) to repair a totally broken
python registry setup. This should allow other options to work.
Usage: %s [options ...] paths ...
-p packageName -- Find and register a package. Looks in the paths for
a sub-directory with the name of the package, and
adds a path entry for the package.
-a appName -- Unconditionally add an application name to the path.
A new path entry is create with the app name, and the
paths specified are added to the registry.
-c -- Add the specified paths to the core Pythonpath.
If a path appears on the core path, and a package also
needs that same path, the package will not bother
registering it. Therefore, By adding paths to the
core path, you can avoid packages re-registering the same path.
-m filename -- Find and register the specific file name as a module.
Do not include a path on the filename!
--shell -- Register everything with the Win95/NT shell.
--upackage name -- Unregister the package
--uapp name -- Unregister the app (identical to --upackage)
--umodule name -- Unregister the module
--description -- Print a description of the usage.
--examples -- Print examples of usage.
""" % sys.argv[0]
description="""\
If no options are processed, the program attempts to validate and set
the standard Python path to the point where the standard library is
available. This can be handy if you move Python to a new drive/sub-directory,
in which case most of the options would fail (as they need at least string.py,
os.py etc to function.)
Running without options should repair Python well enough to run with
the other options.
paths are search paths that the program will use to seek out a file.
For example, when registering the core Python, you may wish to
provide paths to non-standard places to look for the Python help files,
library files, etc.
See also the "regcheck.py" utility which will check and dump the contents
of the registry.
"""
examples="""\
Examples:
"regsetup c:\\wierd\\spot\\1 c:\\wierd\\spot\\2"
Attempts to setup the core Python. Looks in some standard places,
as well as the 2 wierd spots to locate the core Python files (eg, Python.exe,
python14.dll, the standard library and Win32 Extensions.
"regsetup -a myappname . .\subdir"
Registers a new Pythonpath entry named myappname, with "C:\\I\\AM\\HERE" and
"C:\\I\\AM\\HERE\subdir" added to the path (ie, all args are converted to
absolute paths)
"regsetup -c c:\\my\\python\\files"
Unconditionally add "c:\\my\\python\\files" to the 'core' Python path.
"regsetup -m some.pyd \\windows\\system"
Register the module some.pyd in \\windows\\system as a registered
module. This will allow some.pyd to be imported, even though the
windows system directory is not (usually!) on the Python Path.
"regsetup --umodule some"
Unregister the module "some". This means normal import rules then apply
for that module.
"""
if __name__=='__main__':
if len(sys.argv)>1 and sys.argv[1] in ['/?','-?','-help','-h']:
print usage
elif len(sys.argv)==1 or not sys.argv[1][0] in ['/','-']:
# No args, or useful args.
searchPath = sys.path[:]
for arg in sys.argv[1:]:
searchPath.append(arg)
# Good chance we are being run from the "regsetup.py" directory.
# Typically this will be "\somewhere\win32\Scripts" and the
# "somewhere" and "..\Lib" should also be searched.
searchPath.append("..\\Build")
searchPath.append("..\\Lib")
searchPath.append("..")
searchPath.append("..\\..")
# for developers:
# also search somewhere\lib, ..\build, and ..\..\build
searchPath.append("..\\..\\lib")
searchPath.append("..\\build")
searchPath.append("..\\..\\pcbuild")
print "Attempting to setup/repair the Python core"
SetupCore(searchPath)
RegisterShellInfo(searchPath)
FindRegisterHelpFile("PyWin32.chm", searchPath, "Pythonwin Reference")
# Check the registry.
print "Registration complete - checking the registry..."
import regcheck
regcheck.CheckRegistry()
else:
searchPaths = []
import getopt, string
opts, args = getopt.getopt(sys.argv[1:], 'p:a:m:c',
['shell','upackage=','uapp=','umodule=','description','examples'])
for arg in args:
searchPaths.append(arg)
for o,a in opts:
if o=='--description':
print description
if o=='--examples':
print examples
if o=='--shell':
print "Registering the Python core."
RegisterShellInfo(searchPaths)
if o=='-p':
print "Registering package", a
FindRegisterPackage(a,None,searchPaths)
if o in ['--upackage', '--uapp']:
import regutil
print "Unregistering application/package", a
regutil.UnregisterNamedPath(a)
if o=='-a':
import regutil
path = string.join(searchPaths,";")
print "Registering application", a,"to path",path
regutil.RegisterNamedPath(a,path)
if o=='-c':
if not len(searchPaths):
raise error, "-c option must provide at least one additional path"
import win32api, regutil
currentPaths = string.split(regutil.GetRegisteredNamedPath(None),";")
oldLen = len(currentPaths)
for newPath in searchPaths:
if newPath not in currentPaths:
currentPaths.append(newPath)
if len(currentPaths)<>oldLen:
print "Registering %d new core paths" % (len(currentPaths)-oldLen)
regutil.RegisterNamedPath(None,string.join(currentPaths,";"))
else:
print "All specified paths are already registered."
| bsd-3-clause | -4,757,676,226,586,110,000 | 37.661448 | 182 | 0.648006 | false |
leki75/ansible | lib/ansible/modules/monitoring/nagios.py | 71 | 36457 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is largely copied from the Nagios module included in the
# Func project. Original copyright follows:
#
# func-nagios - Schedule downtime and enables/disable notifications
# Copyright 2011, Red Hat, Inc.
# Tim Bielawa <[email protected]>
#
# This software may be freely redistributed under the terms of the GNU
# general public license version 2 or any later version.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nagios
short_description: Perform common tasks in Nagios related to downtime and notifications.
description:
- "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
- All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer
to the host the playbook is currently running on.
- You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
- When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself),
e.g., C(service=host). This keyword may not be given with other services at the same time.
I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all
services on particular host use keyword "all", e.g., C(service=all).
- When using the C(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter.
version_added: "0.7"
options:
action:
description:
- Action to take.
- servicegroup options were added in 2.0.
- delete_downtime options were added in 2.2.
required: true
choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
"silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
"servicegroup_host_downtime" ]
host:
description:
- Host to operate on in Nagios.
required: false
default: null
cmdfile:
description:
- Path to the nagios I(command file) (FIFO pipe).
Only required if auto-detection fails.
required: false
default: auto-detected
author:
description:
- Author to leave downtime comments as.
Only usable with the C(downtime) action.
required: false
default: Ansible
comment:
version_added: "2.0"
description:
- Comment for C(downtime) action.
required: false
default: Scheduling downtime
minutes:
description:
- Minutes to schedule downtime for.
- Only usable with the C(downtime) action.
required: false
default: 30
services:
description:
- What to manage downtime/alerts for. Separate multiple services with commas.
C(service) is an alias for C(services).
B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions.
aliases: [ "service" ]
required: true
servicegroup:
version_added: "2.0"
description:
- The Servicegroup we want to set downtimes/alerts for.
B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
command:
description:
- The raw command to send to nagios, which
should not include the submitted time header or the line-feed
B(Required) option when using the C(command) action.
required: true
author: "Tim Bielawa (@tbielawa)"
'''
EXAMPLES = '''
# set 30 minutes of apache downtime
- nagios:
action: downtime
minutes: 30
service: httpd
host: '{{ inventory_hostname }}'
# schedule an hour of HOST downtime
- nagios:
action: downtime
minutes: 60
service: host
host: '{{ inventory_hostname }}'
# schedule an hour of HOST downtime, with a comment describing the reason
- nagios:
action: downtime
minutes: 60
service: host
host: '{{ inventory_hostname }}'
comment: Rebuilding machine
# schedule downtime for ALL services on HOST
- nagios:
action: downtime
minutes: 45
service: all
host: '{{ inventory_hostname }}'
# schedule downtime for a few services
- nagios:
action: downtime
services: frob,foobar,qeuz
host: '{{ inventory_hostname }}'
# set 30 minutes downtime for all services in servicegroup foo
- nagios:
action: servicegroup_service_downtime
minutes: 30
servicegroup: foo
host: '{{ inventory_hostname }}'
# set 30 minutes downtime for all host in servicegroup foo
- nagios:
action: servicegroup_host_downtime
minutes: 30
servicegroup: foo
host: '{{ inventory_hostname }}'
# delete all downtime for a given host
- nagios:
action: delete_downtime
host: '{{ inventory_hostname }}'
service: all
# delete all downtime for HOST with a particular comment
- nagios:
action: delete_downtime
host: '{{ inventory_hostname }}'
service: host
comment: Planned maintenance
# enable SMART disk alerts
- nagios:
action: enable_alerts
service: smart
host: '{{ inventory_hostname }}'
# "two services at once: disable httpd and nfs alerts"
- nagios:
action: disable_alerts
service: httpd,nfs
host: '{{ inventory_hostname }}'
# disable HOST alerts
- nagios:
action: disable_alerts
service: host
host: '{{ inventory_hostname }}'
# silence ALL alerts
- nagios:
action: silence
host: '{{ inventory_hostname }}'
# unsilence all alerts
- nagios:
action: unsilence
host: '{{ inventory_hostname }}'
# SHUT UP NAGIOS
- nagios:
action: silence_nagios
# ANNOY ME NAGIOS
- nagios:
action: unsilence_nagios
# command something
- nagios:
action: command
command: DISABLE_FAILURE_PREDICTION
'''
import types
import time
import os.path
from ansible.module_utils.basic import AnsibleModule
######################################################################
def which_cmdfile():
locations = [
# rhel
'/etc/nagios/nagios.cfg',
# debian
'/etc/nagios3/nagios.cfg',
# older debian
'/etc/nagios2/nagios.cfg',
# bsd, solaris
'/usr/local/etc/nagios/nagios.cfg',
# groundwork it monitoring
'/usr/local/groundwork/nagios/etc/nagios.cfg',
# open monitoring distribution
'/omd/sites/oppy/tmp/nagios/nagios.cfg',
# ???
'/usr/local/nagios/etc/nagios.cfg',
'/usr/local/nagios/nagios.cfg',
'/opt/nagios/etc/nagios.cfg',
'/opt/nagios/nagios.cfg',
# icinga on debian/ubuntu
'/etc/icinga/icinga.cfg',
# icinga installed from source (default location)
'/usr/local/icinga/etc/icinga.cfg',
]
for path in locations:
if os.path.exists(path):
for line in open(path):
if line.startswith('command_file'):
return line.split('=')[1].strip()
return None
######################################################################
def main():
ACTION_CHOICES = [
'downtime',
'delete_downtime',
'silence',
'unsilence',
'enable_alerts',
'disable_alerts',
'silence_nagios',
'unsilence_nagios',
'command',
'servicegroup_host_downtime',
'servicegroup_service_downtime',
]
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True, default=None, choices=ACTION_CHOICES),
author=dict(default='Ansible'),
comment=dict(default='Scheduling downtime'),
host=dict(required=False, default=None),
servicegroup=dict(required=False, default=None),
minutes=dict(default=30),
cmdfile=dict(default=which_cmdfile()),
services=dict(default=None, aliases=['service']),
command=dict(required=False, default=None),
)
)
action = module.params['action']
host = module.params['host']
servicegroup = module.params['servicegroup']
minutes = module.params['minutes']
services = module.params['services']
cmdfile = module.params['cmdfile']
command = module.params['command']
##################################################################
# Required args per action:
# downtime = (minutes, service, host)
# (un)silence = (host)
# (enable/disable)_alerts = (service, host)
# command = command
#
# AnsibleModule will verify most stuff, we need to verify
# 'minutes' and 'service' manually.
##################################################################
if action not in ['command', 'silence_nagios', 'unsilence_nagios']:
if not host:
module.fail_json(msg='no host specified for action requiring one')
######################################################################
if action == 'downtime':
# Make sure there's an actual service selected
if not services:
module.fail_json(msg='no service selected to set downtime for')
# Make sure minutes is a number
try:
m = int(minutes)
if not isinstance(m, types.IntType):
module.fail_json(msg='minutes must be a number')
except Exception:
module.fail_json(msg='invalid entry for minutes')
######################################################################
if action == 'delete_downtime':
# Make sure there's an actual service selected
if not services:
module.fail_json(msg='no service selected to set downtime for')
######################################################################
if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
# Make sure there's an actual servicegroup selected
if not servicegroup:
module.fail_json(msg='no servicegroup selected to set downtime for')
# Make sure minutes is a number
try:
m = int(minutes)
if not isinstance(m, types.IntType):
module.fail_json(msg='minutes must be a number')
except Exception:
module.fail_json(msg='invalid entry for minutes')
##################################################################
if action in ['enable_alerts', 'disable_alerts']:
if not services:
module.fail_json(msg='a service is required when setting alerts')
if action in ['command']:
if not command:
module.fail_json(msg='no command passed for command action')
##################################################################
if not cmdfile:
module.fail_json(msg='unable to locate nagios.cfg')
##################################################################
ansible_nagios = Nagios(module, **module.params)
if module.check_mode:
module.exit_json(changed=True)
else:
ansible_nagios.act()
##################################################################
######################################################################
class Nagios(object):
"""
Perform common tasks in Nagios related to downtime and
notifications.
The complete set of external commands Nagios handles is documented
on their website:
http://old.nagios.org/developerinfo/externalcommands/commandlist.php
Note that in the case of `schedule_svc_downtime`,
`enable_svc_notifications`, and `disable_svc_notifications`, the
service argument should be passed as a list.
"""
def __init__(self, module, **kwargs):
self.module = module
self.action = kwargs['action']
self.author = kwargs['author']
self.comment = kwargs['comment']
self.host = kwargs['host']
self.servicegroup = kwargs['servicegroup']
self.minutes = int(kwargs['minutes'])
self.cmdfile = kwargs['cmdfile']
self.command = kwargs['command']
if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
self.services = kwargs['services']
else:
self.services = kwargs['services'].split(',')
self.command_results = []
def _now(self):
"""
The time in seconds since 12:00:00AM Jan 1, 1970
"""
return int(time.time())
def _write_command(self, cmd):
"""
Write the given command to the Nagios command file
"""
try:
fp = open(self.cmdfile, 'w')
fp.write(cmd)
fp.flush()
fp.close()
self.command_results.append(cmd.strip())
except IOError:
self.module.fail_json(msg='unable to write to nagios command file',
cmdfile=self.cmdfile)
def _fmt_dt_str(self, cmd, host, duration, author=None,
comment=None, start=None,
svc=None, fixed=1, trigger=0):
"""
Format an external-command downtime string.
cmd - Nagios command ID
host - Host schedule downtime on
duration - Minutes to schedule downtime for
author - Name to file the downtime as
comment - Reason for running this command (upgrade, reboot, etc)
start - Start of downtime in seconds since 12:00AM Jan 1 1970
Default is to use the entry time (now)
svc - Service to schedule downtime for, omit when for host downtime
fixed - Start now if 1, start when a problem is detected if 0
trigger - Optional ID of event to start downtime from. Leave as 0 for
fixed downtime.
Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
entry_time = self._now()
if start is None:
start = entry_time
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
duration_s = (duration * 60)
end = start + duration_s
if not author:
author = self.author
if not comment:
comment = self.comment
if svc is not None:
dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
str(duration_s), author, comment]
else:
# Downtime for a host if no svc specified
dt_args = [str(start), str(end), str(fixed), str(trigger),
str(duration_s), author, comment]
dt_arg_str = ";".join(dt_args)
dt_str = hdr + dt_arg_str + "\n"
return dt_str
def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None):
"""
Format an external-command downtime deletion string.
cmd - Nagios command ID
host - Host to remove scheduled downtime from
comment - Reason downtime was added (upgrade, reboot, etc)
start - Start of downtime in seconds since 12:00AM Jan 1 1970
svc - Service to remove downtime for, omit to remove all downtime for the host
Syntax: [submitted] COMMAND;<host_name>;
[<service_desription>];[<start_time>];[<comment>]
"""
entry_time = self._now()
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
if comment is None:
comment = self.comment
dt_del_args = []
if svc is not None:
dt_del_args.append(svc)
else:
dt_del_args.append('')
if start is not None:
dt_del_args.append(str(start))
else:
dt_del_args.append('')
if comment is not None:
dt_del_args.append(comment)
else:
dt_del_args.append('')
dt_del_arg_str = ";".join(dt_del_args)
dt_del_str = hdr + dt_del_arg_str + "\n"
return dt_del_str
def _fmt_notif_str(self, cmd, host=None, svc=None):
"""
Format an external-command notification string.
cmd - Nagios command ID.
host - Host to en/disable notifications on.. A value is not required
for global downtime
svc - Service to schedule downtime for. A value is not required
for host downtime.
Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
"""
entry_time = self._now()
notif_str = "[%s] %s" % (entry_time, cmd)
if host is not None:
notif_str += ";%s" % host
if svc is not None:
notif_str += ";%s" % svc
notif_str += "\n"
return notif_str
def schedule_svc_downtime(self, host, services=None, minutes=30):
"""
This command is used to schedule downtime for a particular
service.
During the specified downtime, Nagios will not send
notifications out about the service.
Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SVC_DOWNTIME"
if services is None:
services = []
for service in services:
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service)
self._write_command(dt_cmd_str)
def schedule_host_downtime(self, host, minutes=30):
"""
This command is used to schedule downtime for a particular
host.
During the specified downtime, Nagios will not send
notifications out about the host.
Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
self._write_command(dt_cmd_str)
def schedule_host_svc_downtime(self, host, minutes=30):
"""
This command is used to schedule downtime for
all services associated with a particular host.
During the specified downtime, Nagios will not send
notifications out about the host.
SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
self._write_command(dt_cmd_str)
def delete_host_downtime(self, host, services=None, comment=None):
"""
This command is used to remove scheduled downtime for a particular
host.
Syntax: DEL_DOWNTIME_BY_HOST_NAME;<host_name>;
[<service_desription>];[<start_time>];[<comment>]
"""
cmd = "DEL_DOWNTIME_BY_HOST_NAME"
if services is None:
dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment)
self._write_command(dt_del_cmd_str)
else:
for service in services:
dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
self._write_command(dt_del_cmd_str)
def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
particular hostgroup.
During the specified downtime, Nagios will not send
notifications out about the hosts.
Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
self._write_command(dt_cmd_str)
def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all services in
a particular hostgroup.
During the specified downtime, Nagios will not send
notifications out about the services.
Note that scheduling downtime for services does not
automatically schedule downtime for the hosts those services
are associated with.
Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
self._write_command(dt_cmd_str)
def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
particular servicegroup.
During the specified downtime, Nagios will not send
notifications out about the hosts.
Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
self._write_command(dt_cmd_str)
def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30):
"""
This command is used to schedule downtime for all services in
a particular servicegroup.
During the specified downtime, Nagios will not send
notifications out about the services.
Note that scheduling downtime for services does not
automatically schedule downtime for the hosts those services
are associated with.
Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
self._write_command(dt_cmd_str)
def disable_host_svc_notifications(self, host):
"""
This command is used to prevent notifications from being sent
out for all services on the specified host.
Note that this command does not disable notifications from
being sent out about the host.
Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
"""
cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def disable_host_notifications(self, host):
"""
This command is used to prevent notifications from being sent
out for the specified host.
Note that this command does not disable notifications for
services associated with this host.
Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = "DISABLE_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def disable_svc_notifications(self, host, services=None):
"""
This command is used to prevent notifications from being sent
out for the specified service.
Note that this command does not disable notifications from
being sent out about the host.
Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
"""
cmd = "DISABLE_SVC_NOTIFICATIONS"
if services is None:
services = []
for service in services:
notif_str = self._fmt_notif_str(cmd, host, svc=service)
self._write_command(notif_str)
def disable_servicegroup_host_notifications(self, servicegroup):
"""
This command is used to prevent notifications from being sent
out for all hosts in the specified servicegroup.
Note that this command does not disable notifications for
services associated with hosts in this service group.
Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
self._write_command(notif_str)
def disable_servicegroup_svc_notifications(self, servicegroup):
"""
This command is used to prevent notifications from being sent
out for all services in the specified servicegroup.
Note that this does not prevent notifications from being sent
out about the hosts in this servicegroup.
Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
self._write_command(notif_str)
def disable_hostgroup_host_notifications(self, hostgroup):
"""
Disables notifications for all hosts in a particular
hostgroup.
Note that this does not disable notifications for the services
associated with the hosts in the hostgroup - see the
DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
self._write_command(notif_str)
def disable_hostgroup_svc_notifications(self, hostgroup):
"""
Disables notifications for all services associated with hosts
in a particular hostgroup.
Note that this does not disable notifications for the hosts in
the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
command for that.
Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
self._write_command(notif_str)
def enable_host_notifications(self, host):
"""
Enables notifications for a particular host.
Note that this command does not enable notifications for
services associated with this host.
Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = "ENABLE_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def enable_host_svc_notifications(self, host):
"""
Enables notifications for all services on the specified host.
Note that this does not enable notifications for the host.
Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
"""
cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_svc_notifications(self, host, services=None):
"""
Enables notifications for a particular service.
Note that this does not enable notifications for the host.
Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
"""
cmd = "ENABLE_SVC_NOTIFICATIONS"
if services is None:
services = []
nagios_return = True
return_str_list = []
for service in services:
notif_str = self._fmt_notif_str(cmd, host, svc=service)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def enable_hostgroup_host_notifications(self, hostgroup):
"""
Enables notifications for all hosts in a particular hostgroup.
Note that this command does not enable notifications for
services associated with the hosts in this hostgroup.
Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_hostgroup_svc_notifications(self, hostgroup):
"""
Enables notifications for all services that are associated
with hosts in a particular hostgroup.
Note that this does not enable notifications for the hosts in
this hostgroup.
Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_servicegroup_host_notifications(self, servicegroup):
"""
Enables notifications for all hosts that have services that
are members of a particular servicegroup.
Note that this command does not enable notifications for
services associated with the hosts in this servicegroup.
Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_servicegroup_svc_notifications(self, servicegroup):
"""
Enables notifications for all services that are members of a
particular servicegroup.
Note that this does not enable notifications for the hosts in
this servicegroup.
Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def silence_host(self, host):
"""
This command is used to prevent notifications from being sent
out for the host and all services on the specified host.
This is equivalent to calling disable_host_svc_notifications
and disable_host_notifications.
Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = [
"DISABLE_HOST_SVC_NOTIFICATIONS",
"DISABLE_HOST_NOTIFICATIONS"
]
nagios_return = True
return_str_list = []
for c in cmd:
notif_str = self._fmt_notif_str(c, host)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def unsilence_host(self, host):
"""
This command is used to enable notifications for the host and
all services on the specified host.
This is equivalent to calling enable_host_svc_notifications
and enable_host_notifications.
Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = [
"ENABLE_HOST_SVC_NOTIFICATIONS",
"ENABLE_HOST_NOTIFICATIONS"
]
nagios_return = True
return_str_list = []
for c in cmd:
notif_str = self._fmt_notif_str(c, host)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def silence_nagios(self):
"""
This command is used to disable notifications for all hosts and services
in nagios.
This is a 'SHUT UP, NAGIOS' command
"""
cmd = 'DISABLE_NOTIFICATIONS'
self._write_command(self._fmt_notif_str(cmd))
def unsilence_nagios(self):
"""
This command is used to enable notifications for all hosts and services
in nagios.
This is a 'OK, NAGIOS, GO'' command
"""
cmd = 'ENABLE_NOTIFICATIONS'
self._write_command(self._fmt_notif_str(cmd))
def nagios_cmd(self, cmd):
"""
This sends an arbitrary command to nagios
It prepends the submitted time and appends a \n
You just have to provide the properly formatted command
"""
pre = '[%s]' % int(time.time())
post = '\n'
cmdstr = '%s %s%s' % (pre, cmd, post)
self._write_command(cmdstr)
def act(self):
"""
Figure out what you want to do from ansible, and then do the
needful (at the earliest).
"""
# host or service downtime?
if self.action == 'downtime':
if self.services == 'host':
self.schedule_host_downtime(self.host, self.minutes)
elif self.services == 'all':
self.schedule_host_svc_downtime(self.host, self.minutes)
else:
self.schedule_svc_downtime(self.host,
services=self.services,
minutes=self.minutes)
elif self.action == 'delete_downtime':
if self.services=='host':
self.delete_host_downtime(self.host)
elif self.services=='all':
self.delete_host_downtime(self.host, comment='')
else:
self.delete_host_downtime(self.host, services=self.services)
elif self.action == "servicegroup_host_downtime":
if self.servicegroup:
self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
elif self.action == "servicegroup_service_downtime":
if self.servicegroup:
self.schedule_servicegroup_svc_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
# toggle the host AND service alerts
elif self.action == 'silence':
self.silence_host(self.host)
elif self.action == 'unsilence':
self.unsilence_host(self.host)
# toggle host/svc alerts
elif self.action == 'enable_alerts':
if self.services == 'host':
self.enable_host_notifications(self.host)
elif self.services == 'all':
self.enable_host_svc_notifications(self.host)
else:
self.enable_svc_notifications(self.host,
services=self.services)
elif self.action == 'disable_alerts':
if self.services == 'host':
self.disable_host_notifications(self.host)
elif self.services == 'all':
self.disable_host_svc_notifications(self.host)
else:
self.disable_svc_notifications(self.host,
services=self.services)
elif self.action == 'silence_nagios':
self.silence_nagios()
elif self.action == 'unsilence_nagios':
self.unsilence_nagios()
elif self.action == 'command':
self.nagios_cmd(self.command)
# wtf?
else:
self.module.fail_json(msg="unknown action specified: '%s'" % \
self.action)
self.module.exit_json(nagios_commands=self.command_results,
changed=True)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,060,819,141,069,020,000 | 32.446789 | 143 | 0.600379 | false |
yinwenpeng/rescale | en/parser/nltk_lite/stem/regexp.py | 9 | 1677 | # Natural Language Toolkit: Stemmers
#
# Copyright (C) 2001-2006 University of Melbourne
# Author: Trevor Cohn <[email protected]>
# Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from en.parser.nltk_lite.stem import *
class Regexp(StemI):
"""
A stemmer that uses regular expressions to identify morphological
affixes. Any substrings that matches the regular expressions will
be removed.
"""
def __init__(self, regexp, min=0):
"""
Create a new regexp stemmer.
@type regexp: C{string} or C{regexp}
@param regexp: The regular expression that should be used to
identify morphological affixes.
@type min: int
@param min: The minimum length of string to stem
"""
if not hasattr(regexp, 'pattern'):
regexp = re.compile(regexp)
self._regexp = regexp
self._min = min
def stem(self, word):
if len(word) < self._min:
return word
else:
return self._regexp.sub('', word)
def __repr__(self):
return '<Regexp Stemmer: %r>' % self._regexp.pattern
def demo():
from en.parser.nltk_lite import tokenize, stem
# Create a simple regular expression based stemmer
stemmer = stem.Regexp('ing$|s$|e$', min=4)
text = "John was eating icecream"
tokens = tokenize.whitespace(text)
# Print the results.
print stemmer
for word in tokens:
print '%20s => %s' % (word, stemmer.stem(word))
print
if __name__ == '__main__': demo()
| gpl-3.0 | 3,809,936,516,278,807,000 | 26.95 | 70 | 0.608229 | false |
ajnirp/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/testdata/handlers/sub/plain_wsh.py | 499 | 1789 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
request.connection.write('sub/plain_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 | 3,938,430,287,483,820,000 | 43.725 | 72 | 0.759642 | false |
waveform80/ctutils | ctutils/ctinfo.py | 1 | 2101 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
# Copyright 2014 Dave Jones <[email protected]>.
#
# This file is part of ctutils.
#
# ctutils is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# ctutils is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# ctutils. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import logging
from . import __version__
from .terminal import TerminalApplication, FileType
from .readers import open_scan
class CtInfoApplication(TerminalApplication):
"""
This utility can be used to rapidly query the header of CT-scanner output
in various formats. Output is written to stdout in a format conducive to
script processing. File formats supported include TXM files (".txm"),
VGI files (".vgi" with an equivalently named ".vol" file), or TIFF stacks
(specify one of the TIFF filenames and all equivalently sized TIFFs in the
directory will be loaded as part of the stack).
"""
def __init__(self):
super(CtInfoApplication, self).__init__(
version=__version__,
config_files=[],
)
self.parser.add_argument('input', type=FileType('rb'))
def main(self, args):
print('Filename: %s' % args.input.name)
reader = open_scan(args.input)
print('Input format: %s' % reader.format_name)
print('Input resolution: %dx%d' % (reader.width, reader.height))
print('Input datatype: %s' % reader.datatype().dtype.name)
print('Input images: %d' % len(reader))
main = CtInfoApplication()
| gpl-3.0 | -5,083,303,698,079,360,000 | 34.610169 | 79 | 0.687292 | false |
manterd/myPhyloDB | functions/analysis/spls_graphs.py | 1 | 32640 | import datetime
from django.http import HttpResponse
import logging
import pandas as pd
from pyper import *
from scipy import stats
import json
from database.models import Kingdom, Phyla, Class, Order, Family, Genus, Species, OTU_99, \
ko_lvl1, ko_lvl2, ko_lvl3, \
nz_lvl1, nz_lvl2, nz_lvl3, nz_lvl4
import functions
reload(sys)
sys.setdefaultencoding('utf8')
LOG_FILENAME = 'error_log.txt'
pd.set_option('display.max_colwidth', -1)
def getSPLS(request, stops, RID, PID):
try:
while True:
if request.is_ajax():
allJson = request.body.split('&')[0]
all = json.loads(allJson)
functions.setBase(RID, 'Step 1 of 6: Reading normalized data file...')
functions.setBase(RID, 'Step 2 of 6: Selecting your chosen meta-variables...')
selectAll = int(all["selectAll"])
keggAll = int(all["keggAll"])
nzAll = int(all["nzAll"])
# Select samples and meta-variables from savedDF
metaValsCat = []
metaIDsCat = []
metaValsQuant = all['metaValsQuant']
metaIDsQuant = all['metaIDsQuant']
treeType = int(all['treeType'])
DepVar = int(all["DepVar"])
# Create meta-variable DataFrame, final sample list, final category and quantitative field lists based on tree selections
savedDF, metaDF, finalSampleIDs, catFields, remCatFields, quantFields, catValues, quantValues = functions.getMetaDF(request.user, metaValsCat, metaIDsCat, metaValsQuant, metaIDsQuant, DepVar)
allFields = catFields + quantFields
print "ok"
if not finalSampleIDs:
error = "No valid samples were contained in your final dataset.\nPlease select different variable(s)."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
result = ''
if treeType == 1:
if selectAll == 1:
result += 'Taxa level: Kingdom' + '\n'
elif selectAll == 2:
result += 'Taxa level: Phyla' + '\n'
elif selectAll == 3:
result += 'Taxa level: Class' + '\n'
elif selectAll == 4:
result += 'Taxa level: Order' + '\n'
elif selectAll == 5:
result += 'Taxa level: Family' + '\n'
elif selectAll == 6:
result += 'Taxa level: Genus' + '\n'
elif selectAll == 7:
result += 'Taxa level: Species' + '\n'
elif selectAll == 9:
result += 'Taxa level: OTU_99' + '\n'
elif treeType == 2:
if keggAll == 1:
result += 'KEGG Pathway level: 1' + '\n'
elif keggAll == 2:
result += 'KEGG Pathway level: 2' + '\n'
elif keggAll == 3:
result += 'KEGG Pathway level: 3' + '\n'
elif treeType == 3:
if nzAll == 1:
result += 'KEGG Enzyme level: 1' + '\n'
elif nzAll == 2:
result += 'KEGG Enzyme level: 2' + '\n'
elif nzAll == 3:
result += 'KEGG Enzyme level: 3' + '\n'
elif nzAll == 4:
result += 'KEGG Enzyme level: 4' + '\n'
elif keggAll == 5:
result += 'KEGG Enzyme level: GIBBs' + '\n'
elif keggAll == 6:
result += 'KEGG Enzyme level: Nitrogen cycle' + '\n'
result += 'Categorical variables selected by user: ' + ", ".join(catFields + remCatFields) + '\n'
result += 'Categorical variables not included in the statistical analysis (contains only 1 level): ' + ", ".join(remCatFields) + '\n'
result += 'Quantitative variables selected by user: ' + ", ".join(quantFields) + '\n'
result += '===============================================\n\n'
x_scale = all['x_scale']
if x_scale == 'yes':
result += 'Predictor (X) variables have been scaled by dividing by their standard deviation.\n'
else:
result += 'Predictor (X) variables have not been scaled.\n'
y_scale = all['y_scale']
if y_scale == 'yes':
result += 'All response (Y) variables (i.e., observed & predicted) have been scaled by dividing by their standard deviation.\n'
else:
result += 'All response (Y) variables (i.e., observed & predicted) have not been scaled.\n'
result += '===============================================\n\n'
functions.setBase(RID, 'Step 2 of 6: Selecting your chosen meta-variables...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 3 of 6: Selecting your chosen taxa or KEGG level...')
# filter otus based on user settings
remUnclass = all['remUnclass']
remZeroes = all['remZeroes']
perZeroes = int(all['perZeroes'])
filterData = all['filterData']
filterPer = int(all['filterPer'])
filterMeth = int(all['filterMeth'])
remMito = all['remMito']
remChloro = all['remChloro']
mapTaxa = 'no'
finalDF = pd.DataFrame()
if treeType == 1:
if selectAll != 8:
filteredDF = functions.filterDF(savedDF, DepVar, selectAll, remUnclass, remMito, remChloro, remZeroes, perZeroes, filterData, filterPer, filterMeth)
else:
filteredDF = savedDF.copy()
finalDF, missingList = functions.getTaxaDF(selectAll, '', filteredDF, metaDF, allFields, DepVar, RID, stops, PID)
if selectAll == 8:
result += '\nThe following PGPRs were not detected: ' + ", ".join(missingList) + '\n'
result += '===============================================\n'
if treeType == 2:
finalDF, allDF = functions.getKeggDF(keggAll, '', savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
if treeType == 3:
finalDF, allDF = functions.getNZDF(nzAll, '', savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
if finalDF.empty:
error = "Selected taxa were not found in your selected samples."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
# make sure column types are correct
finalDF[quantFields] = finalDF[quantFields].astype(float)
# transform Y, if requested
transform = int(all["transform"])
finalDF = functions.transformDF(transform, DepVar, finalDF)
# save location info to session
myDir = 'myPhyloDB/media/temp/spls/'
if not os.path.exists(myDir):
os.makedirs(myDir)
path = str(myDir) + str(RID) + '.biom'
functions.imploding_panda(path, treeType, DepVar, finalSampleIDs, metaDF, finalDF)
functions.setBase(RID, 'Step 3 of 6: Selecting your chosen taxa or KEGG level...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 4 of 6: Calculating sPLS...')
if DepVar == 0:
result += 'Dependent Variable: Abundance' + '\n'
elif DepVar == 1:
result += 'Dependent Variable: Relative Abundance' + '\n'
elif DepVar == 2:
result += 'Dependent Variable: OTU Richness' + '\n'
elif DepVar == 3:
result += 'Dependent Variable: OTU Diversity' + '\n'
elif DepVar == 4:
result += 'Dependent Variable: Total Abundance' + '\n'
result += '\n===============================================\n'
count_rDF = pd.DataFrame()
if DepVar == 0:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='abund')
elif DepVar == 1:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='rel_abund')
elif DepVar == 2:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='rich')
elif DepVar == 3:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='diversity')
elif DepVar == 4:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='abund_16S')
count_rDF.fillna(0, inplace=True)
if os.name == 'nt':
r = R(RCMD="R/R-Portable/App/R-Portable/bin/R.exe", use_pandas=True)
else:
r = R(RCMD="R/R-Linux/bin/R", use_pandas=True)
functions.setBase(RID, 'Verifying R packages...missing packages are being installed')
# R packages from cran
r("list.of.packages <- c('mixOmics', 'spls', 'pheatmap', 'RColorBrewer')")
r("new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,'Package'])]")
print r("if (length(new.packages)) install.packages(new.packages, repos='http://cran.us.r-project.org', dependencies=T)")
functions.setBase(RID, 'Step 4 of 6: Calculating sPLS...')
print r("library(mixOmics)")
print r("library(spls)")
print r("library(pheatmap)")
print r("library(RColorBrewer)")
count_rDF.sort_index(axis=0, inplace=True)
metaDF.sort_values('sampleid', inplace=True)
r.assign("X", count_rDF)
r.assign("Y", metaDF[quantFields])
r.assign("names", count_rDF.columns.values)
r("colnames(X) <- names")
freqCut = all["freqCut"]
num = int(freqCut.split('/')[0])
den = int(freqCut.split('/')[1])
r.assign("num", num)
r.assign("den", den)
uniqueCut = int(all["uniqueCut"])
r.assign("uniqueCut", uniqueCut)
r("nzv_cols <- nearZeroVar(X, freqCut=num/den, uniqueCut=uniqueCut)")
r("if(length(nzv_cols$Position > 0)) X <- X[,-nzv_cols$Position]")
columns = r.get("ncol(X)")
if columns == 0:
myDict = {'error': "All predictor variables have zero variance.\nsPLS-Regr was aborted!"}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
if x_scale == 'yes':
r("X_scaled <- scale(X, center=FALSE, scale=TRUE)")
else:
r("X_scaled <- scale(X, center=FALSE, scale=FALSE)")
if y_scale == 'yes':
r("Y_scaled <- scale(Y, center=FALSE, scale=TRUE)")
else:
r("Y_scaled <- scale(Y, center=FALSE, scale=FALSE)")
r("detach('package:mixOmics', unload=TRUE)")
r("set.seed(1)")
r("maxK <- length(Y)")
spls_string = "cv <- cv.spls(X_scaled, Y_scaled, scale.x=FALSE, scale.y=FALSE, eta=seq(0.1, 0.9, 0.1), K=c(1:maxK), plot.it=FALSE)"
r.assign("cmd", spls_string)
r("eval(parse(text=cmd))")
r("f <- spls(X_scaled, Y_scaled, scale.x=FALSE, scale.y=FALSE, eta=cv$eta.opt, K=cv$K.opt)")
r("out <- capture.output(print(f))")
fout = r.get("out")
if fout is not None:
for i in fout:
result += str(i) + '\n'
else:
myDict = {'error': "Analysis did not converge.\nsPLS-Regr was aborted!"}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
r("set.seed(1)")
r("ci.f <- ci.spls(f, plot.it=FALSE, plot.fix='y')")
r("cis <- ci.f$cibeta")
r("cf <- correct.spls(ci.f, plot.it=FALSE)")
r("out <- capture.output(cis)")
fout = r.get("out")
if fout is not None:
result += '\n\nBootstrapped confidence intervals of coefficients:\n'
for i in fout:
result += str(i) + '\n'
result += '\n===============================================\n'
r("coef.f <- coef(f)")
r("sum <- sum(coef.f != 0)")
total = r.get("sum")
functions.setBase(RID, 'Step 4 of 6: Calculating sPLS...done!')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 5 of 6: Formatting sPLS coefficient table...')
finalDict = {}
if total is not None:
r("pred.f <- predict(f, type='fit')")
r("pred.f.rows <- row.names(pred.f)")
pred = r.get("pred.f")
rows = r.get("pred.f.rows")
predList = ['pred_' + s for s in quantFields]
predDF = pd.DataFrame(pred, columns=predList, index=rows)
meta_scaled = r.get("Y_scaled")
metaDF_scaled = pd.DataFrame(data=meta_scaled, columns=quantFields, index=rows)
resultDF = pd.merge(metaDF_scaled, predDF, left_index=True, right_index=True)
result += 'sPLS Model Fit (y = mx + b):\n'
result += 'y = predicted\n'
result += 'x = observed\n\n'
for i in xrange(len(quantFields)):
r.assign("myCol", quantFields[i])
x = r.get("Y_scaled[,myCol]")
x = x.tolist()
y = resultDF[predList[i]].astype(float).values.tolist()
slp, inter, r_value, p, se = stats.linregress(x, y)
r_sq = r_value * r_value
result += 'Variable: ' + str(quantFields[i]) + '\n'
result += 'Slope (m): ' + str(slp) + '\n'
result += 'Intercept (b): ' + str(inter) + '\n'
result += 'R2: ' + str(r_sq) + '\n'
result += 'Std Error: ' + str(se) + '\n\n\n'
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
r("coef.f.rows <- row.names(coef.f)")
cf = r.get("coef.f")
rows = r.get("coef.f.rows")
coeffsDF = pd.DataFrame(cf, columns=quantFields, index=rows)
coeffsDF = coeffsDF.loc[(coeffsDF != 0).any(axis=1)]
coeffsDF.sort_index(inplace=True)
taxIDList = coeffsDF.index.values.tolist()
namesDF = pd.DataFrame()
if treeType == 1:
if selectAll == 1:
taxNameList = Kingdom.objects.filter(kingdomid__in=taxIDList).values('kingdomid', 'kingdomName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'kingdomName': 'rank_name', 'kingdomid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 2:
taxNameList = Phyla.objects.filter(phylaid__in=taxIDList).values('phylaid', 'phylaName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'phylaName': 'rank_name', 'phylaid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 3:
taxNameList = Class.objects.filter(classid__in=taxIDList).values('classid', 'className')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'className': 'rank_name', 'classid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 4:
taxNameList = Order.objects.filter(orderid__in=taxIDList).values('orderid', 'orderName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'orderName': 'rank_name', 'orderid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 5:
taxNameList = Family.objects.filter(familyid__in=taxIDList).values('familyid', 'familyName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'familyName': 'rank_name', 'familyid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 6:
taxNameList = Genus.objects.filter(genusid__in=taxIDList).values('genusid', 'genusName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'genusName': 'rank_name', 'genusid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 7:
taxNameList = Species.objects.filter(speciesid__in=taxIDList).values('speciesid', 'speciesName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'speciesName': 'rank_name', 'speciesid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 9:
taxNameList = OTU_99.objects.filter(otuid__in=taxIDList).values('otuid', 'otuName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'otuName': 'rank_name', 'otuid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif treeType == 2:
if keggAll == 1:
taxNameList = ko_lvl1.objects.using('picrust').filter(ko_lvl1_id__in=taxIDList).values('ko_lvl1_id', 'ko_lvl1_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'ko_lvl1_name': 'rank_name', 'ko_lvl1_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif keggAll == 2:
taxNameList = ko_lvl2.objects.using('picrust').filter(ko_lvl2_id__in=taxIDList).values('ko_lvl2_id', 'ko_lvl2_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'ko_lvl2_name': 'rank_name', 'ko_lvl2_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif keggAll == 3:
taxNameList = ko_lvl3.objects.using('picrust').filter(ko_lvl3_id__in=taxIDList).values('ko_lvl3_id', 'ko_lvl3_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'ko_lvl3_name': 'rank_name', 'ko_lvl3_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif treeType == 3:
if nzAll == 1:
taxNameList = nz_lvl1.objects.using('picrust').filter(nz_lvl1_id__in=taxIDList).values('nz_lvl1_id', 'nz_lvl1_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl1_name': 'rank_name', 'nz_lvl1_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif nzAll == 2:
taxNameList = nz_lvl2.objects.using('picrust').filter(nz_lvl2_id__in=taxIDList).values('nz_lvl2_id', 'nz_lvl2_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl2_name': 'rank_name', 'nz_lvl2_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif nzAll == 3:
taxNameList = nz_lvl3.objects.using('picrust').filter(nz_lvl3_id__in=taxIDList).values('nz_lvl3_id', 'nz_lvl3_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl3_name': 'rank_name', 'nz_lvl3_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif nzAll == 4:
taxNameList = nz_lvl4.objects.using('picrust').filter(nz_lvl4_id__in=taxIDList).values('nz_lvl4_id', 'nz_lvl4_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl4_name': 'rank_name', 'nz_lvl4_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif nzAll == 5:
taxNameList = nz_lvl4.objects.using('picrust').filter(nz_lvl4_id__in=taxIDList).values('nz_lvl4_id', 'nz_lvl4_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl4_name': 'rank_name', 'nz_lvl4_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif nzAll == 6:
taxNameList = nz_lvl4.objects.using('picrust').filter(nz_lvl4_id__in=taxIDList).values('nz_lvl4_id', 'nz_lvl4_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl1_name': 'rank_name', 'nz_lvl1_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
namesDF.sort_index(inplace=True)
taxNameList = namesDF['rank_name'].values.tolist()
if treeType == 2:
if keggAll > 1:
taxNameList[:] = (item[:20] + '...' if len(item) > 20 else item for item in taxNameList)
elif treeType == 3:
if nzAll > 1:
taxNameList[:] = (item.split()[0] for item in taxNameList)
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
coeffsDF = pd.merge(namesDF, coeffsDF, left_index=True, right_index=True, how='inner')
coeffsDF.reset_index(inplace=True)
res_table = coeffsDF.to_html(classes="table display")
res_table = res_table.replace('border="1"', 'border="0"')
finalDict['res_table'] = str(res_table)
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
resultDF.reset_index(inplace=True)
resultDF.rename(columns={'index': 'sampleid'}, inplace=True)
pred_table = resultDF.to_html(classes="table display")
pred_table = pred_table.replace('border="1"', 'border="0"')
finalDict['pred_table'] = str(pred_table)
functions.setBase(RID, 'Step 5 of 6: Formatting sPLS coefficient table...done')
functions.setBase(RID, 'Step 6 of 6: Formatting graph data for display...')
xAxisDict = {}
xAxisDict['categories'] = taxNameList
labelsDict = {}
labelsDict['rotation'] = 270
labelsDict['enabled'] = True
labelsDict['style'] = {'fontSize': '14px'}
xAxisDict['labels'] = labelsDict
xAxisDict['title'] = {'text': None}
xAxisDict['tickLength'] = 0
yAxisDict = {}
yAxisDict['categories'] = quantFields
yAxisDict['labels'] = {'style': {'fontSize': '14px'}}
yAxisDict['title'] = {'text': None}
seriesList = []
seriesDict = {}
seriesDict['borderWidth'] = '1'
row, col = coeffsDF.shape
dataList = []
for i in xrange(row):
for j in xrange(len(quantFields)):
val = round(coeffsDF[quantFields[j]].iloc[i], 5)
tup = (i, j, val)
obsList = list(tup)
dataList.append(obsList)
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
seriesDict['data'] = dataList
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
labelDict = {}
labelDict['enabled'] = True
labelDict['color'] = 'black',
labelDict['syle'] = {'textShadow': 'none'}
seriesList.append(seriesDict)
finalDict['xAxis'] = xAxisDict
finalDict['yAxis'] = yAxisDict
finalDict['series'] = seriesList
# R clustered heatmap
clustDF = coeffsDF.drop('rank_id', axis=1)
row, col = clustDF.shape
method = all['methodVal']
metric = all['metricVal']
path = "myPhyloDB/media/temp/spls/Rplots/" + str(RID) + ".spls.pdf"
if os.path.exists(path):
os.remove(path)
if not os.path.exists('myPhyloDB/media/temp/spls/Rplots'):
os.makedirs('myPhyloDB/media/temp/spls/Rplots')
height = 2.5 + 0.2*row
width = 5 + 0.2*(col-1)
file = "pdf('myPhyloDB/media/temp/spls/Rplots/" + str(RID) + ".spls.pdf', height=" + str(height) + ", width=" + str(width) + ", onefile=FALSE)"
r.assign("cmd", file)
r("eval(parse(text=cmd))")
r.assign("df", clustDF[quantFields])
r("df <- as.matrix(df)")
r.assign("rows", taxNameList)
r("rownames(df) <- rows")
r("col.pal <- brewer.pal(9,'RdBu')")
if row > 2 and col > 3:
hmap_str = "pheatmap(df, fontsize=12, color=col.pal, clustering_method='" + str(method) + "', clustering_distance_rows='" + str(metric) + "', clustering_distance_cols='" + str(metric) + "')"
r.assign("cmd", hmap_str)
r("eval(parse(text=cmd))")
if row > 2 and col <= 3:
hmap_str = "pheatmap(df, color=col.pal, cluster_col=FALSE, clustering_method='" + str(method) + "', clustering_distance_rows='" + str(metric) + "')"
r.assign("cmd", hmap_str)
r("eval(parse(text=cmd))")
if row <= 2 and col > 3:
hmap_str = "pheatmap(df, color=col.pal, cluster_row=FALSE, clustering_method='" + str(method) + "', clustering_distance_cols='" + str(metric) + "')"
r.assign("cmd", hmap_str)
r("eval(parse(text=cmd))")
if row <= 2 and col <= 3:
hmap_str = "pheatmap(df, color=col.pal, cluster_col=FALSE, cluster_row=FALSE)"
r.assign("cmd", hmap_str)
r("eval(parse(text=cmd))")
r("dev.off()")
finalDict['text'] = result
functions.setBase(RID, 'Step 6 of 6: Formatting graph data for display...done!')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
finalDict['error'] = 'none'
res = json.dumps(finalDict)
return HttpResponse(res, content_type='application/json')
except Exception as e:
if not stops[PID] == RID:
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG,)
myDate = "\nDate: " + str(datetime.datetime.now()) + "\n"
logging.exception(myDate)
myDict = {}
myDict['error'] = "There was an error during your analysis:\nError: " + str(e.message) + "\nTimestamp: " + str(datetime.datetime.now())
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
| gpl-3.0 | -5,018,934,051,595,870,000 | 52.24633 | 214 | 0.447917 | false |
KodiColdkeys/coldkeys-addons | repository/plugin.video.white.devil/resources/lib/sources/afdah_wp.py | 6 | 3239 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,json,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.language = ['en']
self.domains = ['fmovie.co', 'afdah.org', 'xmovies8.org', 'putlockerhd.co']
self.base_link = 'https://fmovie.co'
self.search_link = '/results?q=%s'
def movie(self, imdb, title, year):
try:
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
r = client.request(query)
r = client.parseDOM(r, 'div', attrs = {'class': 'cell_container'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
referer = urlparse.urljoin(self.base_link, url)
h = {'X-Requested-With': 'XMLHttpRequest'}
try: post = urlparse.parse_qs(urlparse.urlparse(referer).query).values()[0][0]
except: post = referer.strip('/').split('/')[-1].split('watch_', 1)[-1].rsplit('#')[0].rsplit('.')[0]
post = urllib.urlencode({'v': post})
url = urlparse.urljoin(self.base_link, '/video_info/iframe')
r = client.request(url, post=post, headers=h, referer=url)
r = json.loads(r).values()
r = [urllib.unquote(i.split('url=')[-1]) for i in r]
for i in r:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Afdah', 'url': i, 'direct': True, 'debridonly': False})
except: pass
return sources
except:
return sources
def resolve(self, url):
return directstream.googlepass(url)
| gpl-2.0 | 6,610,243,343,099,286,000 | 33.457447 | 177 | 0.570546 | false |
conejoninja/xbmc-seriesly | servers/zippyshare.py | 1 | 2002 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# seriesly - XBMC Plugin
# Conector para zippyshare
# http://blog.tvalacarta.info/plugin-xbmc/seriesly/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[zippyshare.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
headers=[]
headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0"])
data = scrapertools.cache_page(page_url,headers=headers)
location = scrapertools.get_match(data,"var submitCaptcha.*?document.location \= '([^']+)'")
mediaurl = urlparse.urljoin(page_url,location)+"|"+urllib.urlencode({'Referer' : page_url})
extension = scrapertools.get_filename_from_url(mediaurl)[-4:]
video_urls.append( [ extension + " [zippyshare]",mediaurl ] )
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#http://www5.zippyshare.com/v/11178679/file.html
patronvideos = '([a-z0-9]+\.zippyshare.com/v/\d+/file.html)'
logger.info("[zippyshare.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[zippyshare]"
url = "http://"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'zippyshare' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def test():
video_urls = get_video_url("http://www5.zippyshare.com/v/11178679/file.html")
return len(video_urls)>0 | gpl-3.0 | 1,196,410,013,671,276,000 | 32.366667 | 118 | 0.610195 | false |
harayz/raspberry_pwn | src/pentest/sqlmap/plugins/dbms/oracle/fingerprint.py | 7 | 3732 | #!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.session import setDbms
from lib.core.settings import ORACLE_ALIASES
from lib.request import inject
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.ORACLE)
def getFingerprint(self):
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp:
value += "%s\n" % dbmsOsFp
value += "back-end DBMS: "
if not conf.extensiveFp:
value += DBMS.ORACLE
return value
actVer = Format.getDbms()
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
banVer = kb.bannerFp["dbmsVersion"] if 'dbmsVersion' in kb.bannerFp else None
banVer = Format.getDbms([banVer])
value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer)
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
return value
def checkDbms(self):
if not conf.extensiveFp and (Backend.isDbmsWithin(ORACLE_ALIASES) or (conf.dbms or "").lower() in ORACLE_ALIASES):
setDbms(DBMS.ORACLE)
self.getBanner()
return True
infoMsg = "testing %s" % DBMS.ORACLE
logger.info(infoMsg)
# NOTE: SELECT ROWNUM=ROWNUM FROM DUAL does not work connecting
# directly to the Oracle database
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("ROWNUM=ROWNUM")
if result:
infoMsg = "confirming %s" % DBMS.ORACLE
logger.info(infoMsg)
# NOTE: SELECT LENGTH(SYSDATE)=LENGTH(SYSDATE) FROM DUAL does
# not work connecting directly to the Oracle database
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("LENGTH(SYSDATE)=LENGTH(SYSDATE)")
if not result:
warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE
logger.warn(warnMsg)
return False
setDbms(DBMS.ORACLE)
self.getBanner()
if not conf.extensiveFp:
return True
infoMsg = "actively fingerprinting %s" % DBMS.ORACLE
logger.info(infoMsg)
for version in ("11i", "10g", "9i", "8i"):
number = int(re.search("([\d]+)", version).group(1))
output = inject.checkBooleanExpression("%d=(SELECT SUBSTR((VERSION),1,%d) FROM SYS.PRODUCT_COMPONENT_VERSION WHERE ROWNUM=1)" % (number, 1 if number < 10 else 2))
if output:
Backend.setVersion(version)
break
return True
else:
warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE
logger.warn(warnMsg)
return False
def forceDbmsEnum(self):
if conf.db:
conf.db = conf.db.upper()
if conf.tbl:
conf.tbl = conf.tbl.upper()
| gpl-3.0 | 4,609,657,013,334,637,000 | 28.856 | 178 | 0.580118 | false |
salaria/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/NewReport.py | 384 | 3903 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
from lib.logreport import *
from LoginTest import *
from lib.rpc import *
database="test"
uid = 3
#
#
#
# Start OpenOffice.org, listen for connections and open testing document
#
#
class NewReport(unohelper.Base, XJobExecutor):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
self.logobj=Logger()
if not loginstatus and __name__=="package":
exit(1)
self.win=DBModalDialog(60, 50, 180, 115, "Open New Report")
self.win.addFixedText("lblModuleSelection", 2, 2, 60, 15, "Module Selection")
self.win.addComboListBox("lstModule", -2,13,176,80 , False)
self.lstModule = self.win.getControl( "lstModule" )
self.aModuleName=[]
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search',[])
fields = [ 'model','name']
res = self.sock.execute(database, uid, self.password, 'ir.model' , 'read', ids, fields)
res.sort(lambda x, y: cmp(x['name'],y['name']))
for i in range(len(res)):
self.lstModule.addItem(res[i]['name'],self.lstModule.getItemCount())
self.aModuleName.append(res[i]['model'])
self.win.addButton('btnOK',-2 ,-5, 70,15,'Use Module in Report' ,actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-2 - 70 - 5 ,-5, 35,15,'Cancel' ,actionListenerProc = self.btnCancel_clicked )
self.win.doModalDialog("",None)
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
docinfo.setUserFieldValue(3,self.aModuleName[self.lstModule.getSelectedItemPos()])
self.logobj.log_write('Module Name',LOG_INFO, ':Module use in creating a report %s using database %s' % (self.aModuleName[self.lstModule.getSelectedItemPos()], database))
self.win.endExecute()
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
NewReport(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( \
NewReport,
"org.openoffice.openerp.report.opennewreport",
("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,020,361,088,547,412,000 | 38.826531 | 179 | 0.638739 | false |
istehem/laundry_booking_bot | classes.py | 1 | 1814 | class calendar:
def __repr__(self):
def color(text):
colors = {
'BLUE' : '\033[94m',
'GREEN' : '\033[92m',
'YELLOW' : '\033[93m',
'RED' : '\033[91m',
'ENDC' : '\033[0m'
}
return {
'free' : colors['GREEN'] + text + colors['ENDC'],
'reserved' : colors['RED'] + text + colors['ENDC'],
'passed' : colors['YELLOW'] + text + colors['ENDC'],
'booked' : colors['BLUE'] + text + colors['ENDC']
}.get(text,text)
days = {
0 : 'Monday',
1 : 'Tuesday',
2 : 'Wednesday',
3 : 'Thursday',
4 : 'Friday',
5 : 'Saturday',
6 : 'Sunday'
}
try:
l1 = "statuses for shifts %s using week offset %s" % (self.machines,self.week_offset) + '\n'
l2 = '-'*81 + '\n'
l3 = ("%-10s: " + "%-9i"*8) % tuple(["shift"] + range(0,8)) + '\n'
l4 = '-'*81 + '\n'
lr = ""
for day in range(0,7):
day_name = days[day]
xs = [color(self.items[(day,shift)]['status']) for shift in range(0,8)]
lr = lr + ("%-10s: " + "%-17s "*8) % tuple([day_name] + xs) + '\n'
return l1 + l2 + l3 + l4 + lr
except:
return "not a fully defined calendar object"
statuses = {
'free' : [],
'passed' : [],
'reserved' : [],
'booked' : []
}
machines_id = None
machines = None
week_offset = None
items = dict()
| bsd-3-clause | 2,414,067,194,950,932,500 | 36.020408 | 105 | 0.351709 | false |
J-Liu/qemu | scripts/tracetool/format/tcg_helper_wrapper_h.py | 24 | 2167 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generate trace/generated-helpers-wrappers.h.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2016, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
from tracetool.transform import *
import tracetool.vcpu
def generate(events, backend, group):
events = [e for e in events
if "disable" not in e.properties]
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#define tcg_temp_new_nop(v) (v)',
'#define tcg_temp_free_nop(v)',
'',
)
for e in events:
if "tcg-exec" not in e.properties:
continue
# tracetool.generate always transforms types to host
e_args = tracetool.vcpu.transform_args("tcg_helper_c", e.original, "wrapper")
# mixed-type to TCG helper bridge
args_tcg_compat = e_args.transform(HOST_2_TCG_COMPAT)
code_new = [
"%(tcg_type)s __%(name)s = %(tcg_func)s(%(name)s);" %
{"tcg_type": transform_type(type_, HOST_2_TCG),
"tcg_func": transform_type(type_, HOST_2_TCG_TMP_NEW),
"name": name}
for (type_, name) in args_tcg_compat
]
code_free = [
"%(tcg_func)s(__%(name)s);" %
{"tcg_func": transform_type(type_, HOST_2_TCG_TMP_FREE),
"name": name}
for (type_, name) in args_tcg_compat
]
gen_name = "gen_helper_" + e.api()
out('static inline void %(name)s(%(args)s)',
'{',
' %(code_new)s',
' %(proxy_name)s(%(tmp_names)s);',
' %(code_free)s',
'}',
name=gen_name,
args=e_args,
proxy_name=gen_name + "_proxy",
code_new="\n ".join(code_new),
code_free="\n ".join(code_free),
tmp_names=", ".join(["__%s" % name for _, name in e_args]),
)
| gpl-2.0 | -846,805,332,940,118,900 | 29.492958 | 85 | 0.518707 | false |
shakamunyi/tensorflow | tensorflow/__init__.py | 81 | 1481 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Bring in all of the public TensorFlow interface into this
# module.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.python import *
# pylint: enable=wildcard-import
from tensorflow.python.util.lazy_loader import LazyLoader
contrib = LazyLoader('contrib', globals(), 'tensorflow.contrib')
del LazyLoader
del absolute_import
del division
del print_function
# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
del python
del core
# pylint: enable=undefined-variable
| apache-2.0 | -592,881,706,878,708,000 | 34.261905 | 80 | 0.736664 | false |
Jorge-Rodriguez/ansible | lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule.py | 22 | 9610 | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_sqlfirewallrule
version_added: "2.7"
short_description: Manage Firewall Rule instance.
description:
- Create, update and delete instance of Firewall Rule.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the firewall rule.
required: True
start_ip_address:
description:
- The start IP address of the firewall rule. Must be IPv4 format. Use value C(0.0.0.0) to represent all Azure-internal IP addresses.
end_ip_address:
description:
- "The end IP address of the firewall rule. Must be IPv4 format. Must be greater than or equal to startIpAddress. Use value C(0.0.0.0) to represe
nt all Azure-internal IP addresses."
state:
description:
- Assert the state of the SQL Database. Use 'present' to create or update an SQL Database and 'absent' to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create (or update) Firewall Rule
azure_rm_sqlfirewallrule:
resource_group: firewallrulecrudtest-12
server_name: firewallrulecrudtest-6285
name: firewallrulecrudtest-5370
start_ip_address: 172.28.10.136
end_ip_address: 172.28.10.138
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/firewallrulecrudtest-12/providers/Microsoft.Sql/servers/firewallrulecrudtest-628
5/firewallRules/firewallrulecrudtest-5370"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.sql import SqlManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMFirewallRules(AzureRMModuleBase):
"""Configuration class for an Azure RM Firewall Rule resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
start_ip_address=dict(
type='str'
),
end_ip_address=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.start_ip_address = None
self.end_ip_address = None
self.results = dict(changed=False)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMFirewallRules, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = self.get_firewallrule()
response = None
if not old_response:
self.log("Firewall Rule instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Firewall Rule instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if Firewall Rule instance has to be deleted or may be updated")
if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
self.to_do = Actions.Update
if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Firewall Rule instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_firewallrule()
if not old_response:
self.results['changed'] = True
else:
self.results['changed'] = old_response.__ne__(response)
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Firewall Rule instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_firewallrule()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_firewallrule():
time.sleep(20)
else:
self.log("Firewall Rule instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_firewallrule(self):
'''
Creates or updates Firewall Rule with the specified configuration.
:return: deserialized Firewall Rule instance state dictionary
'''
self.log("Creating / Updating the Firewall Rule instance {0}".format(self.name))
try:
response = self.sql_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name,
start_ip_address=self.start_ip_address,
end_ip_address=self.end_ip_address)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Firewall Rule instance.')
self.fail("Error creating the Firewall Rule instance: {0}".format(str(exc)))
return response.as_dict()
def delete_firewallrule(self):
'''
Deletes specified Firewall Rule instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Firewall Rule instance {0}".format(self.name))
try:
response = self.sql_client.firewall_rules.delete(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Firewall Rule instance.')
self.fail("Error deleting the Firewall Rule instance: {0}".format(str(e)))
return True
def get_firewallrule(self):
'''
Gets the properties of the specified Firewall Rule.
:return: deserialized Firewall Rule instance state dictionary
'''
self.log("Checking if the Firewall Rule instance {0} is present".format(self.name))
found = False
try:
response = self.sql_client.firewall_rules.get(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Firewall Rule instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Firewall Rule instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMFirewallRules()
if __name__ == '__main__':
main()
| gpl-3.0 | -3,101,940,032,950,997,000 | 34.330882 | 160 | 0.565453 | false |
shubhdev/edx-platform | cms/djangoapps/contentstore/views/tests/test_entrance_exam.py | 83 | 12383 | """
Test module for Entrance Exams AJAX callback handler workflows
"""
import json
from mock import patch
from django.conf import settings
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from contentstore.tests.utils import AjaxEnabledTestClient, CourseTestCase
from contentstore.utils import reverse_url
from contentstore.views.entrance_exam import create_entrance_exam, update_entrance_exam, delete_entrance_exam
from contentstore.views.helpers import GRADER_TYPES
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from opaque_keys.edx.keys import UsageKey
from student.tests.factories import UserFactory
from util import milestones_helpers
from xmodule.modulestore.django import modulestore
class EntranceExamHandlerTests(CourseTestCase):
"""
Base test class for create, save, and delete
"""
if settings.FEATURES.get('ENTRANCE_EXAMS', False):
def setUp(self):
"""
Shared scaffolding for individual test runs
"""
super(EntranceExamHandlerTests, self).setUp()
self.course_key = self.course.id
self.usage_key = self.course.location
self.course_url = '/course/{}'.format(unicode(self.course.id))
self.exam_url = '/course/{}/entrance_exam/'.format(unicode(self.course.id))
milestones_helpers.seed_milestone_relationship_types()
self.milestone_relationship_types = milestones_helpers.get_milestone_relationship_types()
def test_contentstore_views_entrance_exam_post(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post
"""
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
# Reload the test course now that the exam module has been added
self.course = modulestore().get_course(self.course.id)
metadata = CourseMetadata.fetch_all(self.course)
self.assertTrue(metadata['entrance_exam_enabled'])
self.assertIsNotNone(metadata['entrance_exam_minimum_score_pct'])
self.assertIsNotNone(metadata['entrance_exam_id']['value'])
self.assertTrue(len(milestones_helpers.get_course_milestones(unicode(self.course.id))))
content_milestones = milestones_helpers.get_course_content_milestones(
unicode(self.course.id),
metadata['entrance_exam_id']['value'],
self.milestone_relationship_types['FULFILLS']
)
self.assertTrue(len(content_milestones))
def test_contentstore_views_entrance_exam_post_new_sequential_confirm_grader(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post
"""
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
# Reload the test course now that the exam module has been added
self.course = modulestore().get_course(self.course.id)
# Add a new child sequential to the exam module
# Confirm that the grader type is 'Entrance Exam'
chapter_locator_string = json.loads(resp.content).get('locator')
# chapter_locator = UsageKey.from_string(chapter_locator_string)
seq_data = {
'category': "sequential",
'display_name': "Entrance Exam Subsection",
'parent_locator': chapter_locator_string,
}
resp = self.client.ajax_post(reverse_url('xblock_handler'), seq_data)
seq_locator_string = json.loads(resp.content).get('locator')
seq_locator = UsageKey.from_string(seq_locator_string)
section_grader_type = CourseGradingModel.get_section_grader_type(seq_locator)
self.assertEqual(GRADER_TYPES['ENTRANCE_EXAM'], section_grader_type['graderType'])
def test_contentstore_views_entrance_exam_get(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get
"""
resp = self.client.post(
self.exam_url,
{'entrance_exam_minimum_score_pct': settings.ENTRANCE_EXAM_MIN_SCORE_PCT},
http_accept='application/json'
)
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
def test_contentstore_views_entrance_exam_delete(self):
"""
Unit Test: test_contentstore_views_entrance_exam_delete
"""
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
resp = self.client.delete(self.exam_url)
self.assertEqual(resp.status_code, 204)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 404)
user = User.objects.create(
username='test_user',
email='[email protected]',
is_active=True,
)
user.set_password('test')
user.save()
milestones = milestones_helpers.get_course_milestones(unicode(self.course_key))
self.assertEqual(len(milestones), 1)
milestone_key = '{}.{}'.format(milestones[0]['namespace'], milestones[0]['name'])
paths = milestones_helpers.get_course_milestones_fulfillment_paths(
unicode(self.course_key),
milestones_helpers.serialize_user(user)
)
# What we have now is a course milestone requirement and no valid fulfillment
# paths for the specified user. The LMS is going to have to ignore this situation,
# because we can't confidently prevent it from occuring at some point in the future.
# milestone_key_1 =
self.assertEqual(len(paths[milestone_key]), 0)
# Re-adding an entrance exam to the course should fix the missing link
# It wipes out any old entrance exam artifacts and inserts a new exam course chapter/module
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
# Confirm that we have only one Entrance Exam grader after re-adding the exam (validates SOL-475)
graders = CourseGradingModel.fetch(self.course_key).graders
count = 0
for grader in graders:
if grader['type'] == GRADER_TYPES['ENTRANCE_EXAM']:
count += 1
self.assertEqual(count, 1)
def test_contentstore_views_entrance_exam_delete_bogus_course(self):
"""
Unit Test: test_contentstore_views_entrance_exam_delete_bogus_course
"""
resp = self.client.delete('/course/bad/course/key/entrance_exam')
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_get_bogus_course(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get_bogus_course
"""
resp = self.client.get('/course/bad/course/key/entrance_exam')
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_get_bogus_exam(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get_bogus_exam
"""
resp = self.client.post(
self.exam_url,
{'entrance_exam_minimum_score_pct': '50'},
http_accept='application/json'
)
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
self.course = modulestore().get_course(self.course.id)
# Should raise an ItemNotFoundError and return a 404
updated_metadata = {'entrance_exam_id': 'i4x://org.4/course_4/chapter/ed7c4c6a4d68409998e2c8554c4629d1'}
CourseMetadata.update_from_dict(
updated_metadata,
self.course,
self.user,
)
self.course = modulestore().get_course(self.course.id)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 404)
# Should raise an InvalidKeyError and return a 404
updated_metadata = {'entrance_exam_id': '123afsdfsad90f87'}
CourseMetadata.update_from_dict(
updated_metadata,
self.course,
self.user,
)
self.course = modulestore().get_course(self.course.id)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 404)
def test_contentstore_views_entrance_exam_post_bogus_course(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post_bogus_course
"""
resp = self.client.post(
'/course/bad/course/key/entrance_exam',
{},
http_accept='application/json'
)
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_post_invalid_http_accept(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post_invalid_http_accept
"""
resp = self.client.post(
'/course/bad/course/key/entrance_exam',
{},
http_accept='text/html'
)
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_get_invalid_user(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get_invalid_user
"""
user = User.objects.create(
username='test_user',
email='[email protected]',
is_active=True,
)
user.set_password('test')
user.save()
self.client = AjaxEnabledTestClient()
self.client.login(username='test_user', password='test')
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 403)
def test_contentstore_views_entrance_exam_unsupported_method(self):
"""
Unit Test: test_contentstore_views_entrance_exam_unsupported_method
"""
resp = self.client.put(self.exam_url)
self.assertEqual(resp.status_code, 405)
def test_entrance_exam_view_direct_missing_score_setting(self):
"""
Unit Test: test_entrance_exam_view_direct_missing_score_setting
"""
user = UserFactory()
user.is_staff = True
request = RequestFactory()
request.user = user
resp = create_entrance_exam(request, self.course.id, None)
self.assertEqual(resp.status_code, 201)
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': False})
def test_entrance_exam_feature_flag_gating(self):
user = UserFactory()
user.is_staff = True
request = RequestFactory()
request.user = user
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 400)
resp = create_entrance_exam(request, self.course.id, None)
self.assertEqual(resp.status_code, 400)
resp = delete_entrance_exam(request, self.course.id)
self.assertEqual(resp.status_code, 400)
# No return, so we'll just ensure no exception is thrown
update_entrance_exam(request, self.course.id, {})
| agpl-3.0 | 2,910,304,563,596,529,000 | 43.865942 | 116 | 0.601954 | false |
jtauber/dcpu16py | terminals/curses_terminal.py | 3 | 2833 | import curses
class Terminal:
style_bold = False
keymap = {'A': 0x3, 'C': 0x2, 'D': 0x1}
def setup_colors(self):
curses.start_color()
curses.use_default_colors()
self.colors = {}
self.colors[(0, 0)] = 0
self.colors[(7, 0)] = 0
self.color_index = 1
self.win.bkgd(curses.color_pair(0))
def __init__(self, args):
if args.debug:
print("Curses conflicts with debugger")
raise SystemExit
self.win = curses.initscr()
self.win.nodelay(1)
self.win_height, self.win_width = self.win.getmaxyx()
curses.curs_set(0)
curses.noecho()
self.width = args.width
self.height = args.height
self.keys = []
self.setup_colors()
def get_color(self, fg, bg):
if (fg, bg) not in self.colors:
curses.init_pair(self.color_index, fg, bg)
self.colors[(fg, bg)] = self.color_index
self.color_index += 1
return self.colors[(fg, bg)]
def update_character(self, row, column, character, color=None):
try:
pair = 0
if color:
pair = self.get_color(*color)
color = curses.color_pair(pair)
if self.style_bold:
color |= curses.A_BOLD
self.win.addch(row, column, character, color)
except curses.error:
pass
def show(self):
color = curses.color_pair(self.get_color(3, -1))
if self.win_width > self.width:
try:
s = "." * (self.win_width - self.width)
for y in range(self.height):
self.win.addstr(y, self.width, s, color)
except curses.error:
pass
if self.win_height > self.height:
try:
s = "." * (self.win_width)
for y in range(self.height, self.win_height):
self.win.addstr(y, 0, s, color)
except curses.error:
pass
def updatekeys(self):
try:
# XXX: this is probably a bad place to check if the window has
# resized but there is no other opportunity to do this
win_height, win_width = self.win.getmaxyx()
if win_height != self.win_height or win_width != self.win_width:
self.win_height, self.win_width = win_height, win_width
self.show()
while(True):
char = self.win.getkey()
if len(char) == 1:
c = self.keymap[char] if char in self.keymap else ord(char)
self.keys.insert(0, c)
except curses.error:
pass
def redraw(self):
self.win.refresh()
def quit(self):
curses.endwin()
| mit | -2,002,509,363,815,710,000 | 29.793478 | 79 | 0.509354 | false |
laslabs/odoo-connector-carepoint | connector_carepoint/unit/mapper.py | 1 | 4874 | # -*- coding: utf-8 -*-
# Copyright 2015-2016 LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.addons.connector.unit.mapper import (mapping,
changed_by,
ImportMapper,
ExportMapper,
)
def trim(field):
""" A modifier intended to be used on the ``direct`` mappings.
Trim whitespace from field value
Example::
direct = [(trim('source'), 'target')]
:param field: name of the source field in the record
"""
def modifier(self, record, to_attr):
value = record.get(field)
if not value:
return False
return str(value).strip()
return modifier
def trim_and_titleize(field):
""" A modifier intended to be used on the ``direct`` mappings.
Trim whitespace from field value & title case
Example::
direct = [(trim_and_titleize('source'), 'target')]
:param field: name of the source field in the record
"""
def modifier(self, record, to_attr):
value = record.get(field)
if not value:
return False
return str(value).strip().title()
return modifier
def to_float(field):
""" A modifier intended to be used on the ``direct`` mappings.
Convert SQLAlchemy Decimal types to float
Example::
direct = [(to_float('source'), 'target')]
:param field: name of the source field in the record
"""
def modifier(self, record, to_attr):
value = record.get(field)
if not value:
return False
return float(value)
return modifier
def to_int(field):
""" A modifier intended to be used on the ``direct`` mappings.
Convert SQLAlchemy Decimal types to integer
Example::
direct = [(to_int('source'), 'target')]
:param field: name of the source field in the record
"""
def modifier(self, record, to_attr):
value = record.get(field)
if not value:
return False
return int(value)
return modifier
def add_to(field, number):
""" A modifier intended to be used on the ``direct`` mappings.
Add a number to the field value
Example::
direct = [(add_to('source', 1.5), 'target')]
:param field: (str) name of the source field in the record
:param number: (float|int) Number to add to source value
"""
def modifier(self, record, to_attr):
value = record[field]
return float(value) + number
return modifier
class CarepointImportMapper(ImportMapper):
@mapping
def backend_id(self, record):
return {'backend_id': self.backend_record.id}
@mapping
def company_id(self, record):
return {'company_id': self.backend_record.company_id.id}
class PartnerImportMapper(CarepointImportMapper):
@mapping
def tz(self, record):
return {'tz': self.backend_record.default_tz}
@mapping
def currency_id(self, record):
return {'currency_id': self.backend_record.company_id.currency_id.id}
@mapping
def property_account_payable_id(self, record):
return {
'property_account_payable_id':
self.backend_record.default_account_payable_id.id,
}
@mapping
def property_payment_term_id(self, record):
return {
'property_payment_term_id':
self.backend_record.default_customer_payment_term_id.id,
}
@mapping
def property_supplier_payment_term_id(self, record):
return {
'property_supplier_payment_term_id':
self.backend_record.default_supplier_payment_term_id.id,
}
@mapping
def property_account_receivable_id(self, record):
return {
'property_account_receivable_id':
self.backend_record.default_account_receivable_id.id,
}
class PersonImportMapper(PartnerImportMapper):
def _get_name(self, record):
# @TODO: Support other name parts (surname)
name = []
parts = ['fname', 'lname']
for part in parts:
if record.get(part):
name.append(record[part])
return ' '.join(name).title()
@mapping
def name(self, record):
return {'name': self._get_name(record)}
class PersonExportMapper(ExportMapper):
@mapping
@changed_by('name')
def names(self, record):
# @TODO: Support other name parts (surname)
if ' ' in record.name:
parts = record.name.split(' ', 1)
fname = parts[0]
lname = parts[1]
else:
fname = '-'
lname = record.name
return {'lname': lname,
'fname': fname,
}
| agpl-3.0 | 3,400,426,285,687,859,700 | 27.337209 | 77 | 0.576529 | false |
kurli/crosswalk | app/tools/android/gyp/jar.py | 4 | 1783 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=F0401
import fnmatch
import optparse
import os
import sys
from util import build_utils
from util import md5_check
def DoJar(options):
class_files = build_utils.FindInDirectory(options.classes_dir, '*.class')
for exclude in options.excluded_classes.split():
class_files = filter(
lambda f: not fnmatch.fnmatch(f, exclude), class_files)
jar_path = os.path.abspath(options.jar_path)
# The paths of the files in the jar will be the same as they are passed in to
# the command. Because of this, the command should be run in
# options.classes_dir so the .class file paths in the jar are correct.
jar_cwd = options.classes_dir
class_files_rel = [os.path.relpath(f, jar_cwd) for f in class_files]
jar_cmd = ['jar', 'cf0', jar_path] + class_files_rel
record_path = '%s.md5.stamp' % options.jar_path
md5_check.CallAndRecordIfStale(
lambda: build_utils.CheckCallDie(jar_cmd, cwd=jar_cwd),
record_path=record_path,
input_paths=class_files,
input_strings=jar_cmd)
build_utils.Touch(options.jar_path)
def main():
parser = optparse.OptionParser()
parser.add_option('--classes-dir', help='Directory containing .class files.')
parser.add_option('--jar-path', help='Jar output path.')
parser.add_option('--excluded-classes',
help='List of .class file patterns to exclude from the jar.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
DoJar(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 4,906,098,912,364,500,000 | 28.716667 | 79 | 0.701066 | false |
HumanExposure/factotum | dashboard/tests/unit/test_product_to_puc.py | 1 | 8439 | from django.db.utils import IntegrityError
from django.test import TestCase, tag
from dashboard.models import Product, PUC, ProductToPUC, ProductUberPuc, PUCKind
from dashboard.tests.loader import load_model_objects, fixtures_standard
from dashboard.views.product_curation import ProductForm
import time
@tag("puc")
class ProductToPUCTestWithSeedData(TestCase):
fixtures = fixtures_standard
def setUp(self):
self.client.login(username="Karyn", password="specialP@55word")
def test_uber_puc_update(self):
# Test that when a product-to-puc record is deleted or added or updated,
# the is_uber_puc attribute is reassigned to the correct row
p = Product.objects.get(pk=1866)
ptps = ProductToPUC.objects.filter(product=p)
# print(
# ptps.values_list(
# "product_id", "puc_id", "classification_method_id", "is_uber_puc"
# )
# )
# delete the MA uber PUC
ptps.filter(is_uber_puc=True).delete()
# confirm that the MB PUC assignment has inherited the uber status
ptp = ptps.get(is_uber_puc=True)
self.assertTrue(ptp.classification_method_id == "MB")
# reassign the 185 PUC as MA and confirm that it becomes the new uber PUC
ProductToPUC.objects.create(
product_id=1866, puc_id=185, classification_method_id="MA"
)
ptp = ptps.get(is_uber_puc=True)
self.assertTrue(ptp.classification_method_id == "MA")
@tag("loader", "puc")
class ProductToPUCTest(TestCase):
def setUp(self):
self.objects = load_model_objects()
def test_uber_puc_view(self):
prod = self.objects.p
# confirm that the new relationship returns None
self.assertTrue(prod.product_uber_puc == None)
# Create a three-part PUC hierarchy
puc1 = PUC.objects.create(
gen_cat="Grandparent gencat",
prod_fam="",
prod_type="",
description="Grandparent",
last_edited_by=self.objects.user,
kind=PUCKind.objects.get_or_create(name="Formulation", code="FO")[0],
)
puc2 = PUC.objects.create(
gen_cat="Grandparent gencat",
prod_fam="Parent prodfam",
prod_type="",
description="Parent",
last_edited_by=self.objects.user,
kind=PUCKind.objects.get_or_create(name="Formulation", code="FO")[0],
)
# assign the Grandparent PUC as a low-confidence "AU" classification_method
pp1 = ProductToPUC.objects.create(
product=prod, puc=puc1, classification_method_id="AU"
)
prod.refresh_from_db()
self.assertTrue(prod.product_uber_puc.puc == puc1)
# assign a higher-confidence method and check the uber puc
pp2 = ProductToPUC.objects.create(
product=prod, puc=puc2, classification_method_id="MB"
)
prod.refresh_from_db()
self.assertTrue(prod.product_uber_puc.puc == puc2)
def test_uber_puc(self):
# Test that when the product has no assigned PUC, the getter returns
# None
self.assertTrue(self.objects.p.uber_puc == None)
self.ppuc = ProductToPUC.objects.create(
product=self.objects.p,
puc=self.objects.puc,
puc_assigned_usr=self.objects.user,
classification_method_id="MA",
)
# Test that the get_uber_product_to_puc method returns expected values
uber_puc = self.objects.p.uber_puc
_str = "Test General Category - Test Product Family - Test Product Type"
self.assertEqual(_str, str(uber_puc)) # test str output
uber_puc.prod_fam = None # test str output *w/o* prod_fam
_str = "Test General Category - Test Product Type"
self.assertEqual(_str, str(uber_puc))
uber_puc.gen_cat = None # test str output *w/o* gen_cat or prod_fam
_str = "Test Product Type"
self.assertEqual(_str, str(uber_puc))
def test_get_classification_method(self):
# Test that when the product has no assigned classification method, the getter returns
# None
self.assertTrue(self.objects.p.get_classification_method == None)
self.rule_based_ppuc = ProductToPUC.objects.create(
product=self.objects.p,
puc=self.objects.puc,
puc_assigned_usr=self.objects.user,
classification_method_id="RU",
)
self.manual_assignment_ppuc = ProductToPUC.objects.create(
product=self.objects.p,
puc=self.objects.puc,
puc_assigned_usr=self.objects.user,
classification_method_id="MA",
)
self.manual_batch_ppuc = ProductToPUC.objects.create(
product=self.objects.p,
puc=self.objects.puc,
puc_assigned_usr=self.objects.user,
classification_method_id="MB",
)
self.automatic_ppuc = ProductToPUC.objects.create(
product=self.objects.p,
puc=self.objects.puc,
puc_assigned_usr=self.objects.user,
classification_method_id="AU",
)
self.bulk_assignment_ppuc = ProductToPUC.objects.create(
product=self.objects.p,
puc=self.objects.puc,
puc_assigned_usr=self.objects.user,
classification_method_id="BA",
)
# Order of confidence:
# "MA", "Manual"
# "RU", "Rule Based"
# "MB", "Manual Batch"
# "BA", "Bulk Assignment"
# "AU", "Automatic"
# Five product-to-puc relationships created, the highest confidence should always be selected
# Test that the get_classification_method method returns expected values
classification_method = self.objects.p.get_classification_method
_str = "Manual"
# classification method should be Manual (highest confidence)
self.assertEqual(_str, str(classification_method.name))
self.manual_assignment_ppuc.delete()
classification_method = self.objects.p.get_classification_method
_str = "Rule Based"
# classification method should be Rule Based since Manual was deleted
self.assertEqual(_str, str(classification_method.name))
self.rule_based_ppuc.delete()
classification_method = self.objects.p.get_classification_method
_str = "Manual Batch"
# classification method should be Manual Batch since Rule Based was deleted
self.assertEqual(_str, str(classification_method.name))
self.manual_batch_ppuc.delete()
classification_method = self.objects.p.get_classification_method
_str = "Bulk Assignment"
# classification method should be Bulk Assignment since Manual Batch was deleted
self.assertEqual(_str, str(classification_method.name))
self.bulk_assignment_ppuc.delete()
classification_method = self.objects.p.get_classification_method
_str = "Automatic"
# classification method should be Automatic since Bulk Assignment was deleted
self.assertEqual(_str, str(classification_method.name))
# it seems to be necessary to use the __dict__ and instance in order to load
# the form for testing, w/o I don't think the fields are bound, which will
# never validate!
def test_ProductForm_invalid(self):
form = ProductForm(self.objects.p.__dict__, instance=self.objects.p)
self.assertFalse(form.is_valid())
def test_ProductForm_valid(self):
self.objects.p.title = "Title Necessary"
self.objects.p.upc = "Upc Necessary"
self.objects.p.document_type = self.objects.dt.id
self.objects.p.save()
form = ProductForm(self.objects.p.__dict__, instance=self.objects.p)
self.assertTrue(form.is_valid())
def test_unique_constraint(self):
self.ppuc1 = ProductToPUC.objects.create(
product=self.objects.p,
puc=self.objects.puc,
puc_assigned_usr=self.objects.user,
classification_method_id="MA",
)
with self.assertRaises(IntegrityError):
self.ppuc2 = ProductToPUC.objects.create(
product=self.objects.p,
puc=self.objects.puc,
puc_assigned_usr=self.objects.user,
classification_method_id="MA",
)
| gpl-3.0 | -7,496,887,131,479,149,000 | 38.251163 | 101 | 0.627089 | false |
cmeeren/apexpy | setup.py | 1 | 3229 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
import io
import re
from glob import glob
from os import path, environ
from setuptools import find_packages
# Include extensions only when not on readthedocs.org
if environ.get('READTHEDOCS', None) == 'True':
from setuptools import setup
from distutils.core import Extension
extensions = []
else:
from numpy.distutils.core import setup, Extension
extensions = [
Extension(name='apexpy.fortranapex',
sources=['src/fortranapex/magfld.f', 'src/fortranapex/apex.f',
'src/fortranapex/makeapexsh.f90',
'src/fortranapex/apexsh.f90',
'src/fortranapex/checkapexsh.f90'])]
def read(*names, **kwargs):
return io.open(
path.join(path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
if __name__ == "__main__":
setup(
name='apexpy',
version='1.0.3',
license='MIT',
description='A Python wrapper for Apex coordinates',
long_description='%s\n%s' % (read('README.rst'),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``',
read('CHANGELOG.rst'))),
author='Christer van der Meeren; Angeline G. Burrell',
author_email='[email protected]',
url='https://github.com/aburrell/apexpy',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[path.splitext(path.basename(pp))[0]
for pp in glob('src/*.py')],
package_data={'apexpy': ['apexsh.dat']},
zip_safe=False,
classifiers=[
# complete classifier list:
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Utilities',
],
keywords=[
'apex',
'modified apex',
'quasi-dipole',
'quasi dipole',
'coordinates',
'magnetic coordinates',
'mlt',
'magnetic local time',
'conversion',
'converting',
],
install_requires=[
'numpy',
],
ext_modules=extensions,
entry_points={
'console_scripts': [
'apexpy = apexpy.__main__:main',
]
},
)
| mit | -2,962,109,356,124,107,300 | 34.097826 | 80 | 0.521214 | false |
chengjf/database-interface-doc-management | flask-demo/flask/Lib/site-packages/pip/_vendor/progress/helpers.py | 404 | 2894 | # Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
from __future__ import unicode_literals
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
| apache-2.0 | -7,961,596,642,957,417,000 | 30.456522 | 74 | 0.618521 | false |
letmaik/lensfunpy | test/tests.py | 2 | 5821 | import numpy as np
import lensfunpy as lensfun
import gc
from numpy.testing.utils import assert_equal
# the following strings were taken from the lensfun xml files
cam_maker = 'NIKON CORPORATION'
cam_model = 'NIKON D3S'
lens_maker = 'Nikon'
lens_model = 'Nikon AI-S Nikkor 28mm f/2.8'
def testDatabaseLoading():
db = lensfun.Database()
cams = db.find_cameras(cam_maker, cam_model)
print(cams)
assert_equal(len(cams), 1)
cam = cams[0]
assert_equal(cam.maker.lower(), cam_maker.lower())
assert_equal(cam.model.lower(), cam_model.lower())
assert len(str(cam)) > 0
lenses = db.find_lenses(cam, lens_maker, lens_model)
assert_equal(len(lenses), 1)
lens = lenses[0]
assert_equal(lens.maker.lower(), lens_maker.lower())
assert len(str(lens)) > 0
assert_equal(lens.model.lower(), lens_model.lower())
def testDatabaseXMLLoading():
xml = """
<lensdatabase>
<mount>
<name>Nikon F AF</name>
<compat>Nikon F</compat>
<compat>Nikon F AI</compat>
<compat>Nikon F AI-S</compat>
<compat>M42</compat>
<compat>T2</compat>
<compat>Generic</compat>
</mount>
<camera>
<maker>Nikon Corporation</maker>
<maker lang="en">Nikon</maker>
<model>Nikon D3S</model>
<model lang="en">D3S</model>
<mount>Nikon F AF</mount>
<cropfactor>1.0</cropfactor>
</camera>
<lens>
<maker>Nikon</maker>
<model>Nikon AI-S Nikkor 28mm f/2.8</model>
<model lang="en">Nikkor AI-S 28mm f/2.8</model>
<mount>Nikon F AI-S</mount>
<cropfactor>1</cropfactor>
<calibration>
<!-- Taken with Nikon D600 -->
<distortion model="ptlens" focal="28" a="0.00929" b="-0.02155" c="0.0021"/>
<tca model="poly3" focal="28" br="-0.0002306" vr="1.0006860" bb="0.0002350" vb="0.9995614"/>
</calibration>
</lens>
</lensdatabase>
"""
db = lensfun.Database(xml=xml, load_common=False, load_bundled=False)
assert_equal(len(db.cameras), 1)
assert_equal(len(db.lenses), 1)
assert_equal(len(db.mounts), 1)
cam = db.find_cameras(cam_maker, cam_model)[0]
lens = db.find_lenses(cam, lens_maker, lens_model)[0]
assert_equal(cam.maker.lower(), cam_maker.lower())
assert_equal(cam.model.lower(), cam_model.lower())
assert_equal(lens.maker.lower(), lens_maker.lower())
assert_equal(lens.model.lower(), lens_model.lower())
def testModifier():
db = lensfun.Database()
cam = db.find_cameras(cam_maker, cam_model)[0]
lens = db.find_lenses(cam, lens_maker, lens_model)[0]
focal_length = 28.0
aperture = 1.4
distance = 10
width = 4256
height = 2832
mod = lensfun.Modifier(lens, cam.crop_factor, width, height)
mod.initialize(focal_length, aperture, distance)
undistCoords = mod.apply_geometry_distortion()
assert undistCoords.shape[0] == height and undistCoords.shape[1] == width
# check if coordinates were actually transformed
y, x = np.mgrid[0:undistCoords.shape[0], 0:undistCoords.shape[1]]
coords = np.dstack((x,y))
assert np.any(undistCoords != coords)
undistCoords = mod.apply_subpixel_distortion()
assert undistCoords.shape[0] == height and undistCoords.shape[1] == width
assert np.any(undistCoords[:,:,0] != coords)
undistCoords = mod.apply_subpixel_geometry_distortion()
assert undistCoords.shape[0] == height and undistCoords.shape[1] == width
assert np.any(undistCoords[:,:,0] != coords)
def testVignettingCorrection():
cam_maker = 'NIKON CORPORATION'
cam_model = 'NIKON D3S'
lens_maker = 'Nikon'
lens_model = 'Nikkor AF 20mm f/2.8D'
focal_length = 20
aperture = 4
distance = 10
width = 4256
height = 2832
db = lensfun.Database()
cam = db.find_cameras(cam_maker, cam_model)[0]
lens = db.find_lenses(cam, lens_maker, lens_model)[0]
mod = lensfun.Modifier(lens, cam.crop_factor, width, height)
mod.initialize(focal_length, aperture, distance)
img = np.zeros((height, width, 3), np.uint8)
img[:] = 127
mod.apply_color_modification(img)
assert img.mean() > 127
def testDeallocationBug():
db = lensfun.Database()
cam = db.find_cameras(cam_maker, cam_model)[0]
lens = db.find_lenses(cam, lens_maker, lens_model)[0]
# By garbage collecting the database object, its queried objects
# were deallocated as well, which is not what we want.
# Now, all queried objects hold a reference to the Database object
# they came from. This way, the Database object is only deallocated
# when all queried objects were garbage collected.
del db
gc.collect()
assert_equal(cam.maker.lower(), cam_maker.lower())
assert_equal(lens.maker.lower(), lens_maker.lower())
def testXmlFormatException():
try:
lensfun.Database(xml='garbage')
except lensfun.XMLFormatError:
pass
else:
assert False
def testNewLensType():
# https://github.com/letmaik/lensfunpy/issues/10
# lensfun added new lens types which were not supported yet by lensfunpy.
# This test accesses one such lens type and was raising an exception previously.
db = lensfun.Database()
cam = db.find_cameras('NIKON CORPORATION', 'NIKON D3S')[0]
lenses = db.find_lenses(cam, 'Sigma', 'Sigma 8mm f/3.5 EX DG circular fisheye')
if lenses: # newer lens, only run test if lens actually exists
assert_equal(lenses[0].type, lensfun.LensType.FISHEYE_EQUISOLID)
else:
print('Skipping testNewLensType as lens not found')
# TODO lensfun's find* functions modify the score directly in the original db objects
# -> another invocation of find* will overwrite the old scores
| mit | 1,605,617,574,332,892,200 | 33.443787 | 104 | 0.645594 | false |
tjsavage/tmrwmedia | djangotoolbox/fields.py | 7 | 10758 | # All fields except for BlobField written by Jonas Haag <[email protected]>
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.importlib import import_module
__all__ = ('RawField', 'ListField', 'DictField', 'SetField',
'BlobField', 'EmbeddedModelField')
class _HandleAssignment(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class RawField(models.Field):
""" Generic field to store anything your database backend allows you to. """
def get_internal_type(self):
return 'RawField'
class AbstractIterableField(models.Field):
"""
Abstract field for fields for storing iterable data type like ``list``,
``set`` and ``dict``.
You can pass an instance of a field as the first argument.
If you do, the iterable items will be piped through the passed field's
validation and conversion routines, converting the items to the
appropriate data type.
"""
def __init__(self, item_field=None, *args, **kwargs):
if item_field is None:
item_field = RawField()
self.item_field = item_field
default = kwargs.get('default', None if kwargs.get('null') else ())
if default is not None and not callable(default):
# ensure a new object is created every time the default is accessed
kwargs['default'] = lambda: self._type(default)
super(AbstractIterableField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
self.item_field.model = cls
self.item_field.name = name
super(AbstractIterableField, self).contribute_to_class(cls, name)
metaclass = getattr(self.item_field, '__metaclass__', None)
if issubclass(metaclass, models.SubfieldBase):
setattr(cls, self.name, _HandleAssignment(self))
def db_type(self, connection):
item_db_type = self.item_field.db_type(connection=connection)
return '%s:%s' % (self.__class__.__name__, item_db_type)
def _convert(self, func, values, *args, **kwargs):
if isinstance(values, (list, tuple, set)):
return self._type(func(value, *args, **kwargs) for value in values)
return values
def to_python(self, value):
return self._convert(self.item_field.to_python, value)
def pre_save(self, model_instance, add):
class fake_instance(object):
pass
fake_instance = fake_instance()
def wrapper(value):
assert not hasattr(self.item_field, 'attname')
fake_instance.value = value
self.item_field.attname = 'value'
try:
return self.item_field.pre_save(fake_instance, add)
finally:
del self.item_field.attname
return self._convert(wrapper, getattr(model_instance, self.attname))
def get_db_prep_value(self, value, connection, prepared=False):
return self._convert(self.item_field.get_db_prep_value, value,
connection=connection, prepared=prepared)
def get_db_prep_save(self, value, connection):
return self._convert(self.item_field.get_db_prep_save,
value, connection=connection)
# TODO/XXX: Remove this once we have a cleaner solution
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
def validate(self, values, model_instance):
try:
iter(values)
except TypeError:
raise ValidationError('Value of type %r is not iterable' % type(values))
def formfield(self, **kwargs):
raise NotImplementedError('No form field implemented for %r' % type(self))
class ListField(AbstractIterableField):
"""
Field representing a Python ``list``.
If the optional keyword argument `ordering` is given, it must be a callable
that is passed to :meth:`list.sort` as `key` argument. If `ordering` is
given, the items in the list will be sorted before sending them to the
database.
"""
_type = list
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r" % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def _convert(self, func, values, *args, **kwargs):
values = super(ListField, self)._convert(func, values, *args, **kwargs)
if values is not None and self.ordering is not None:
values.sort(key=self.ordering)
return values
class SetField(AbstractIterableField):
"""
Field representing a Python ``set``.
"""
_type = set
class DictField(AbstractIterableField):
"""
Field representing a Python ``dict``.
The field type conversions described in :class:`AbstractIterableField`
only affect values of the dictionary, not keys.
Depending on the backend, keys that aren't strings might not be allowed.
"""
_type = dict
def _convert(self, func, values, *args, **kwargs):
if values is None:
return None
return dict((key, func(value, *args, **kwargs))
for key, value in values.iteritems())
def validate(self, values, model_instance):
if not isinstance(values, dict):
raise ValidationError('Value is of type %r. Should be a dict.' % type(values))
class BlobField(models.Field):
"""
A field for storing blobs of binary data.
The value might either be a string (or something that can be converted to
a string), or a file-like object.
In the latter case, the object has to provide a ``read`` method from which
the blob is read.
"""
def get_internal_type(self):
return 'BlobField'
def formfield(self, **kwargs):
# A file widget is provided, but use model FileField or ImageField
# for storing specific files most of the time
from .widgets import BlobWidget
from django.forms import FileField
defaults = {'form_class': FileField, 'widget': BlobWidget}
defaults.update(kwargs)
return super(BlobField, self).formfield(**defaults)
def get_db_prep_value(self, value, connection, prepared=False):
if hasattr(value, 'read'):
return value.read()
else:
return str(value)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
raise TypeError("BlobFields do not support lookups")
def value_to_string(self, obj):
return str(self._get_val_from_obj(obj))
class EmbeddedModelField(models.Field):
"""
Field that allows you to embed a model instance.
:param model: (optional) The model class that shall be embedded
(may also be passed as string similar to relation fields)
"""
__metaclass__ = models.SubfieldBase
def __init__(self, embedded_model=None, *args, **kwargs):
self.embedded_model = embedded_model
kwargs.setdefault('default', None)
super(EmbeddedModelField, self).__init__(*args, **kwargs)
def db_type(self, connection):
return 'DictField:RawField'
def _set_model(self, model):
# EmbeddedModelFields are not contribute[d]_to_class if using within
# ListFields (and friends), so we can only know the model field is
# used in when the IterableField sets our 'model' attribute in its
# contribute_to_class method.
# We need to know the model to generate a valid key for the lookup.
if model is not None and isinstance(self.embedded_model, basestring):
# The model argument passed to __init__ was a string, so we need
# to make sure to resolve that string to the corresponding model
# class, similar to relation fields. We abuse some of the
# relation fields' code to do the lookup here:
def _resolve_lookup(self_, resolved_model, model):
self.embedded_model = resolved_model
from django.db.models.fields.related import add_lazy_relation
add_lazy_relation(model, self, self.embedded_model, _resolve_lookup)
self._model = model
model = property(lambda self:self._model, _set_model)
def pre_save(self, model_instance, add):
embedded_instance = super(EmbeddedModelField, self).pre_save(model_instance, add)
if embedded_instance is None:
return None, None
if self.embedded_model is not None and \
not isinstance(embedded_instance, self.embedded_model):
raise TypeError("Expected instance of type %r, not %r"
% (type(self.embedded_model), type(embedded_instance)))
data = dict((field.name, field.pre_save(embedded_instance, add))
for field in embedded_instance._meta.fields)
return embedded_instance, data
def get_db_prep_value(self, (embedded_instance, embedded_dict), **kwargs):
if embedded_dict is None:
return None
values = dict()
for name, value in embedded_dict.iteritems():
field = embedded_instance._meta.get_field(name)
values[name] = field.get_db_prep_value(value, **kwargs)
if self.embedded_model is None:
values.update({'_module' : embedded_instance.__class__.__module__,
'_model' : embedded_instance.__class__.__name__})
return values
# TODO/XXX: Remove this once we have a cleaner solution
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
def to_python(self, values):
if not isinstance(values, dict):
return values
module, model = values.pop('_module', None), values.pop('_model', None)
if module is not None:
return getattr(import_module(module), model)(**values)
return self.embedded_model(**values)
| bsd-3-clause | -6,270,491,817,875,390,000 | 38.992565 | 90 | 0.631902 | false |
OndroNR/ga-bitbot | gts.py | 16 | 20392 |
"""
gts v0.01
genetic test sequencer
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
import traceback
import xmlrpclib
import json
import gene_server_config
import time
import sys
import random
import subprocess
import __main__
import paths
from genetic import *
from load_config import *
random.seed(time.time())
if __name__ == "__main__":
__appversion__ = "0.01a"
print "Genetic Test Sequencer v%s"%__appversion__
# connect to the xml server
#
__server__ = gene_server_config.__server__
__port__ = str(gene_server_config.__port__)
#make sure the port number matches the server.
server = xmlrpclib.Server('http://' + __server__ + ":" + __port__)
multicall = xmlrpclib.MultiCall(server)
print "gts: connected to gene_server ",__server__,":",__port__
#the variable values below are superceded by the configuration loaded from the
#configuration file global_config.json
#!!!!!!!! to change the values edit the json configuration file NOT the variables below !!!!!!!!
max_length = 60 * 24 * 60
load_throttle = 1 #go easy on cpu usage
load_throttle_sleep_interval = 0.10#seconds
calibrate = 1 #set to one to adjust the population size to maintain a one min test cycle
cycle_time = 60 * 1#time in seconds to test the entire population
min_cycle_time = 30
cycle_time_step = 2
pid_update_rate = 20 #reset watchdog after every n seconds
enable_flash_crash_protection = False
flash_crash_protection_delay = 60 * 3 #three hours
trusted_keys_path = "./config/trusted_keys/"
config_loaded = 0
#!!!!!!!!!!!!!!!!end of loaded config values!!!!!!!!
#define the module exit function
profile = False
def gts_exit(msg,pid=None):
global profile
if pid != None:
server.pid_msg(pid,msg)
server.pid_exit(pid)
if profile == True:
print "gts: profiler saving gts_call_graph.png to ./report/"
pycallgraph.make_dot_graph('./report/gts_call_graph.png')
print msg
sys.exit()
#load config
try:
__main__ = load_config_file_into_object('global_config.json',__main__)
except:
gts_exit("gts: error detected while loading the configuration. the application will now exit.")
else:
if config_loaded == False:
gts_exit("gts: configuration failed to load. the application will now exit.")
else:
print "gts: configuration loaded."
#internal variables
quartile_cycle = False
quartile = ''
bs = ''
verbose = False
run_once = False
get_config = False
get_default_config = False
score_only = False
profile = False
pid = None
g = genepool()
gd = "UNDEFINED"
if len(sys.argv) >= 3:
# Convert the two arguments from strings into numbers
quartile = sys.argv[1]
bs = sys.argv[2]
if len(sys.argv) > 3:
for i in range(3,len(sys.argv)):
if sys.argv[i] == 'v':
verbose = True
if sys.argv[i] == 'run_once':
#use with gal.py to auto reset (to address pypy memory leaks)
#exit after first local optima found
#or in the case of 'all' quartiles being tested, reset after once cycle through the quartiles
run_once = True
if sys.argv[i] == 'get_default_config':
#if set the default gene_def config will be loaded from the server
get_default_config = True
get_config = True
if sys.argv[i] == 'get_config':
#if set the gene_def config will be randomly loaded from the server
get_config = True
if sys.argv[i] == 'score_only':
#if set the gene_def config will be randomly loaded from the server
score_only = True
if sys.argv[i] == 'profile':
try:
import pycallgraph
except:
print "gts: pycallgraph module not installed. Profiling disabled."
else:
pycallgraph.start_trace()
profile = True
print "gts: running pycallgraph profiler"
if sys.argv[i] == 'pid':
#set the pid from the command line
try:
pid = sys.argv[i + 1]
except:
pass
if pid == None:
#if the pid is not set from the command line then
#use the genetic class object id
pid = g.id
#which quartile group to test
while not (quartile in ['1','2','3','4','all']):
print "Which quartile group to test? (1,2,3,4):"
quartile = raw_input()
if quartile != 'all':
quartile = int(quartile)
else:
quartile = 1
quartile_cycle = True
update_all_scores = True
if score_only:
update_all_scores = True
else:
update_all_scores = False
#configure the gene pool
if get_config == True:
print "gts: Loading gene_def from the server."
while gd == "UNDEFINED" and get_config == True:
#get the gene def config list from the server
gdhl = json.loads(server.get_gene_def_hash_list())
if get_default_config == True:
gdh = json.loads(server.get_default_gene_def_hash())
gdhl = [gdh,gdh,gdh] #create a dummy list with the same (default) hash
if len(gdhl) < 2:
#the default config isn't defined
#if there are less then two genes registered then switch to the local config.
get_config = False
break
#pick one at random
gdh = random.choice(gdhl)
#get the gene_def
gd = server.get_gene_def(gdh)
#print gd
if gd != "UNDEFINED":
try:
gd = json.loads(gd)
#load the remote config
g = load_config_into_object(gd,g)
#only need to register the client with the existing gene_def hash
server.pid_register_client(pid,gdh)
print "gts: gene_def_hash:",gdh
print "gts: name",gd['name']
print "gts: description",gd['description']
print "gts: gene_def load complete."
except:
print "gts: gene_def load error:",gd
gd = "UNDEFINED"
get_config = False #force load local gen_def.json config
else:
time.sleep(5) #default config is undefined so just wait and try again....
#the script will remain in this loop until the default config is set
if get_config == False:
gd = load_config_from_file("gene_def.json")
g = load_config_into_object(gd,g)
#register the gene_def file and link to this client using the gene pool id as the PID (GUID)
f = open('./config/gene_def.json','r')
gdc = f.read()
f.close()
gdh = server.pid_register_gene_def(pid,gdc)
server.pid_register_client(pid,gdh)
#reset the process watchdog
server.pid_alive(pid)
#send a copy of the command line args
server.pid_msg(pid,str(sys.argv))
ff = None
if gd.has_key('fitness_script'):
#check for an updated signed package on the gene_server
#pypy probably wont have pycrypto installed - fall back to python in a subprocess to sync
#fitness module names in the gene_def exclude the .py file extention
#but signed packages use the extention. check for extention, if none exists then add .py
print "gts: synchronizing signed code"
if len(gd['fitness_script'].split('.')) == 1:
sync_filename = gd['fitness_script'] + '.py'
subprocess.call(('python','cpsu.py','get',sync_filename,trusted_keys_path))
print "gts: loading the fitness module",gd['fitness_script']
ff = __import__(gd['fitness_script'])
else:
print "gts: no fitness module defined, loading default (bct)"
ff = __import__('bct')
te = ff.trade_engine()
#apply global configs
te.max_length = max_length
te.enable_flash_crash_protection = enable_flash_crash_protection
te.flash_crash_protection_delay = flash_crash_protection_delay
#load the gene_def fitness_config, if available
if gd.has_key('fitness_config'):
te = load_config_into_object(gd['fitness_config'],te)
te.score_only = True
print "gts: initializing the fitness function"
te.initialize()
#bootstrap the population with the winners available from the gene_pool server
while not(bs == 'y' or bs == 'n'):
print "Bootstrap from the gene_server? (y/n)"
bs = raw_input()
if bs == 'y':
bob_simulator = True
g.local_optima_trigger = 10
bootstrap_bobs = json.loads(server.get_bobs(quartile,pid))
bootstrap_all = json.loads(server.get_all(quartile,pid))
if (type(bootstrap_bobs) == type([])) and (type(bootstrap_all) == type([])):
g.seed()
if len(bootstrap_all) > 100:
g.pool = []
g.insert_genedict_list(bootstrap_bobs)
g.insert_genedict_list(bootstrap_all)
g.pool_size = len(g.pool)
if update_all_scores == True:
#reset the scores for retesting
g.reset_scores()
else:
#mate the genes before testing
g.next_gen()
else: #if no BOBS or high scores..seed with a new population
print "gts: no BOBs or high scores available...seeding new pool."
g.seed()
print "gts: Update all scores:",update_all_scores
print "gts: %s BOBs loaded"%len(bootstrap_bobs)
print "gts: %s high scores loaded"%len(bootstrap_all)
print "gts: Pool size: %s"%len(g.pool)
else:
bob_simulator = False
#update_all_scores = False
g.local_optima_trigger = 5
print "gts: Seeding the initial population"
g.seed()
#the counters are all incremented at the same time but are reset by different events:
test_count = 0 #used to reset the pool after so many loop cycles
total_count = 0 #used to calculate overall performance
loop_count = 0 # used to trigger pool size calibration and data reload
max_score = -100000
max_score_id = -1
max_gene = None
multicall_count = 0
start_time = time.time()
watchdog_reset_time = time.time()
server.pid_alive(pid)
print "gts: running the test sequencer"
while 1:
test_count += 1
total_count += 1
loop_count += 1
if load_throttle == 1:
time.sleep(load_throttle_sleep_interval)
if (time.time() - watchdog_reset_time) >= pid_update_rate: #total_count%pid_update_rate == 0:
#periodicaly reset the watchdog monitor
print "gts: resetting watchdog timer"
watchdog_reset_time = time.time()
server.pid_alive(pid)
if loop_count > g.pool_size:
if score_only: #quartile_cycle == True and bob_simulator == True:
#force a state jump to load the next quartile to retest the genes
#in this mode the only function of the client is to cycle through the quartiles to retest existing genes
g.local_optima_reached = True
#update_all_scores = False #on the first pass only, bob clients need to resubmit updated scores for every gene
loop_count = 0
#reset the watchdog monitor
#server.pid_alive(pid)
#benchmark the cycle speed
current_time = time.time()
elapsed_time = current_time - start_time
gps = total_count / elapsed_time
#pid_update_rate = int(gps * 40)
if calibrate == 1:
print "gts: recalibrating pool size..."
g.pool_size = int(gps * cycle_time)
cycle_time -= cycle_time_step
if cycle_time < min_cycle_time:
cycle_time = min_cycle_time
if g.pool_size > 10000:
g.pool_size = 10000
kss = (gps*te.input_data_length)/1000.0
performance_metrics = "gts: ","%.2f"%gps,"G/S; ","%.2f"%kss,"KS/S;"," Pool Size: ",g.pool_size," Total Processed: ",total_count
performance_metrics = " ".join(map(str,performance_metrics))
print performance_metrics
pmd = {'channel':'gts_metric','gps':gps,'kss':kss,'pool':g.pool_size,'total':total_count}
server.pid_msg(pid,json.dumps(pmd))
if g.local_optima_reached:
test_count = 0
#initialize fitness function (load updated data)
te.initialize()
if score_only: #quartile_cycle == True and bob_simulator == True:
#jump to the next quartile and skip the bob submission
update_all_scores = True
quartile += 1
if quartile > 4:
quartile = 1
if run_once:
print "gts: flushing xmlrpc multicall buffer."
multicall() #send any batched calls to the server
print "gts: run once done."
gts_exit("gts: run once done.",pid)
elif max_gene != None:
#debug
print "gts: ",max_gene
#end debug
print "gts: submit BOB for id:%s to server (%.2f)"%(str(max_gene['id']),max_gene['score'])
server.put_bob(json.dumps(max_gene),quartile,pid)
if quartile_cycle == True:
#if cycling is enabled then
#the client will cycle through the quartiles as local optimas are found
#jump to the next quartile
quartile += 1
if quartile > 4:
quartile = 1
if run_once:
gts_exit("gts: run once done.",pid)
else:
if max_score > -1000:
print "gts: **WARNING** MAX_GENE is gone.: ID",max_score_id
print "*"*80
print "gts: GENE DUMP:"
for ag in g.pool:
print ag['id'],ag['score']
print "*"*80
gts_exit("gts: HALTED.",pid)
max_gene = None #clear the max gene
max_score = -100000 #reset the high score
if quartile_cycle == False and run_once:
print "gts: flushing xmlrpc multicall buffer."
multicall() #send any batched calls to the server
print "gts: run once done."
gts_exit("gts: run once done.",pid)
if bob_simulator:
#update_all_scores = True #on the first pass only, bob clients need to resubmit updated scores for every gene
bootstrap_bobs = json.loads(server.get_bobs(quartile,pid))
bootstrap_all = json.loads(server.get_all(quartile,pid))
g.pool_size = len(g.pool)
if (type(bootstrap_bobs) == type([])) and (type(bootstrap_all) == type([])):
g.seed()
g.pool = []
g.insert_genedict_list(bootstrap_bobs)
g.insert_genedict_list(bootstrap_all)
if quartile_cycle == True:
#reset the scores for retesting
g.reset_scores()
else:
#mate the genes before testing
g.next_gen()
else: #if no BOBS or high scores..seed with a new population
#print "no BOBs or high scores available...seeding new pool."
g.seed()
else:
g.seed()
if test_count > (g.pool_size * 10):
test_count = 0
print "gts: reseting scores to force retest of winners..."
test_count = 0
max_score = 0 #knock the high score down to prevent blocking
#latest scoring data which may fall due to
#the latest price data
g.next_gen()
g.reset_scores()
#create/reset the trade engine
te.reset()
#get the next gene
ag = g.get_next()
#configure the trade engine
te = load_config_into_object({'set':ag},te)
#set the quartile to test
te.test_quartile(quartile)
#run the fitness function
try:
te.run()
except Exception, err:
#kill off any genes that crash the trade engine (div by 0 errors for instance)
print "gts: ***** GENE FAULT *****"
print Exception,err
print traceback.format_exc()
print "gts: ***** END GENE FAULT *****"
g.set_score(ag['id'],g.kill_score)
else:
#return the score to the gene pool
try:
score = te.score()
except Exception, err:
#kill off any genes that crash the trade engine (div by 0 errors for instance)
print "gts: ***** GENE SCORE FAULT *****"
print Exception,err
print traceback.format_exc()
print "gts: ***** END GENE SCORE FAULT *****"
g.set_score(ag['id'],g.kill_score)
else:
if verbose:
print "gts: ",ag['gene'],"\t".join(["%.5f"%max_score,"%.5f"%score,"%.3f"%g.prune_threshold])
g.set_score(ag['id'],score)
#g.set_message(ag['id'],"Balance: " + str(te.balance) +"; Wins: " + str(te.wins)+ "; Loss:" + str(te.loss) + "; Positions: " + str(len(te.positions)))
g.set_message(ag['id'],te.text_summary)
if score > 1000 and profile == True:
gts_exit("gts: profiling complete")
#if a new high score is found submit the gene to the server
if score > max_score and update_all_scores == False:
print "gts: submit high score for quartile:%s id:%s to server (%.5f)"%(str(quartile),str(ag['id']),score)
max_score = score
max_score_id = ag['id']
max_gene = ag.copy() #g.get_by_id(max_score_id)
if max_gene != None:
server.put(json.dumps(max_gene),quartile,pid)
else:
print "gts: MAX_GENE is None!!"
if update_all_scores == True:
print "gts: updating score for quartile:%s id:%s to server, multicall deffered (%.5f)"%(str(quartile),str(ag['id']),score)
agene = g.get_by_id(ag['id'])
if agene != None:
multicall_count += 1
multicall.mc_put(json.dumps(agene),quartile,pid)
if multicall_count > 40:
multicall_count = 0
print "gts: flushing xmlrpc multicall buffer."
multicall()
else:
print "gts: updating gene error: gene is missing!!"
| gpl-3.0 | -182,186,091,381,218,530 | 39.540755 | 167 | 0.54286 | false |
SUSE/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/policy/v2016_04_01/operations/policy_assignments_operations.py | 2 | 30831 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class PolicyAssignmentsOperations(object):
"""PolicyAssignmentsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for the operation. Constant value: "2016-04-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-04-01"
self.config = config
def delete(
self, scope, policy_assignment_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy assignment.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to
delete.
:type policy_assignment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_04_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}'
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'policyAssignmentName': self._serialize.url("policy_assignment_name", policy_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, scope, policy_assignment_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates a policy assignment.
Policy assignments are inherited by child resources. For example, when
you apply a policy to a resource group that policy is assigned to all
resources in the group.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment.
:type policy_assignment_name: str
:param parameters: Parameters for the policy assignment.
:type parameters: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_04_01.models.PolicyAssignment>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_04_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}'
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'policyAssignmentName': self._serialize.url("policy_assignment_name", policy_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicyAssignment')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, scope, policy_assignment_name, custom_headers=None, raw=False, **operation_config):
"""Gets a policy assignment.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to
get.
:type policy_assignment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_04_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}'
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'policyAssignmentName': self._serialize.url("policy_assignment_name", policy_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_for_resource_group(
self, resource_group_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets policy assignments for the resource group.
:param resource_group_name: The name of the resource group that
contains policy assignments.
:type resource_group_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignmentPaged
<azure.mgmt.resource.policy.v2016_04_01.models.PolicyAssignmentPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_for_resource(
self, resource_group_name, resource_provider_namespace, parent_resource_path, resource_type, resource_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets policy assignments for a resource.
:param resource_group_name: The name of the resource group containing
the resource. The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource
provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource path.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource with policy
assignments.
:type resource_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignmentPaged
<azure.mgmt.resource.policy.v2016_04_01.models.PolicyAssignmentPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyassignments'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets all the policy assignments for a subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignmentPaged
<azure.mgmt.resource.policy.v2016_04_01.models.PolicyAssignmentPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyassignments'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def delete_by_id(
self, policy_assignment_id, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy assignment by ID.
When providing a scope for the assigment, use
'/subscriptions/{subscription-id}/' for subscriptions,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}'
for resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to
delete. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_04_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{policyAssignmentId}'
path_format_arguments = {
'policyAssignmentId': self._serialize.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_by_id(
self, policy_assignment_id, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates a policy assignment by ID.
Policy assignments are inherited by child resources. For example, when
you apply a policy to a resource group that policy is assigned to all
resources in the group. When providing a scope for the assigment, use
'/subscriptions/{subscription-id}/' for subscriptions,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}'
for resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to
create. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:param parameters: Parameters for policy assignment.
:type parameters: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_04_01.models.PolicyAssignment>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_04_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{policyAssignmentId}'
path_format_arguments = {
'policyAssignmentId': self._serialize.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicyAssignment')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_by_id(
self, policy_assignment_id, custom_headers=None, raw=False, **operation_config):
"""Gets a policy assignment by ID.
When providing a scope for the assigment, use
'/subscriptions/{subscription-id}/' for subscriptions,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}'
for resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to get.
Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_04_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{policyAssignmentId}'
path_format_arguments = {
'policyAssignmentId': self._serialize.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit | 6,938,475,820,851,304,000 | 45.362406 | 231 | 0.639389 | false |
ipashchenko/uvmod | uvmod/utils.py | 1 | 8522 | import math
import psycopg2
import numpy as np
mas_to_rad = 4.8481368 * 1E-09
n_q = 0.637
vfloat = np.vectorize(float)
band_cm_dict = {'c': 6., 'l': 18., 'p': 94., 'k': 1.35}
SEFD_dict = {'RADIO-AS': {'K': {'L': 46700., 'R': 36800},
'C': {'L': 11600., 'R': None},
'L': {'L': 2760., 'R': 2930.}},
'GBT-VLBA': {'K': {'L': 23., 'R': 23.},
'C': {'L': 8., 'R': 8.},
'L': {'L': 10., 'R': 10.}},
'EFLSBERG': {'C': {'L': 20., 'R': 20.},
'L': {'L': 19., 'R': 19.}},
'YEBES40M': {'C': {'L': 160., 'R': 160.},
'L': {'L': None, 'R': None}},
'ZELENCHK': {'C': {'L': 400., 'R': 400.},
'L': {'L': 300., 'R': 300.}},
'EVPTRIYA': {'C': {'L': 44., 'R': 44.},
'L': {'L': 44., 'R': 44.}},
'SVETLOE': {'C': {'L': 250., 'R': 250.},
'L': {'L': 360., 'R': 360.}},
'BADARY': {'C': {'L': 200., 'R': 200.},
'L': {'L': 330., 'R': 330.}},
'TORUN': {'C': {'L': 220., 'R': 220.},
'L': {'L': 300., 'R': 300.}},
'ARECIBO': {'C': {'L': 5., 'R': 5.},
'L': {'L': 3., 'R': 3.}},
'WSTRB-07': {'C': {'L': 120., 'R': 120.},
'L': {'L': 40., 'R': 40.}},
'VLA-N8': {'C': {'L': None, 'R': None},
'L': {'L': None, 'R': None}},
# Default values for KL
'KALYAZIN': {'C': {'L': 150., 'R': 150.},
'L': {'L': 140., 'R': 140.}},
'MEDICINA': {'C': {'L': 170., 'R': 170.},
'L': {'L': 700., 'R': 700.}},
'NOTO': {'C': {'L': 260., 'R': 260.},
'L': {'L': 784., 'R': 784.}},
'HARTRAO': {'C': {'L': 650., 'R': 650.},
'L': {'L': 430., 'R': 430.}},
'HOBART26': {'C': {'L': 640., 'R': 640.},
'L': {'L': 470., 'R': 470.}},
'MOPRA': {'C': {'L': 350., 'R': 350.},
'L': {'L': 340., 'R': 340.},
'K': {'L': 900., 'R': 900.}},
'WARK12M': {'C': {'L': None, 'R': None},
'L': {'L': None, 'R': None}},
'TIDBIN64': {'C': {'L': None, 'R': None},
'L': {'L': None, 'R': None}},
'DSS63': {'C': {'L': 24., 'R': 24.},
'L': {'L': 24., 'R': 24.}},
'PARKES': {'C': {'L': 110., 'R': 110.},
'L': {'L': 40., 'R': 40.},
'K': {'L': 810., 'R': 810.}},
'USUDA64': {'C': {'L': None, 'R': None},
'L': {'L': None, 'R': None}},
'JODRELL2': {'C': {'L': 320., 'R': 320.},
'L': {'L': 320., 'R': 320.}},
'ATCA104': {'C': {'L': None, 'R': None},
'L': {'L': None, 'R': None}}}
dtype_converter_dict = {'integer': 'int', 'smallint': 'int', 'character': '|S',
'character varying': '|S', 'real': '<f8',
'timestamp without time zone': np.object}
def ed_to_uv(r, lambda_cm=18.):
return r * 12742. * 100000. / lambda_cm
def uv_to_ed(u, lambda_cm=18.):
return u * lambda_cm / (12742. * 100000)
def dtype_converter(data_type, char_length):
"""
Converts psycopg2 data types to python data types.
:param data_type:
Psycopg2 data type.
:param char_length:
If not ``None``, then shows char length.
:return:
"""
result = dtype_converter_dict[data_type]
if char_length:
result += str(char_length)
return result
def get_source_array_from_dbtable(source, band, host='odin.asc.rssi.ru',
port='5432', db='ra_results', user=None,
password=None,
table_name='pima_observations'):
"""
Function that returns numpy structured array from user-specified db table.
:param host:
:param port:
:param db:
:param user:
:param password:
:param table_name:
:return:
"""
connection = psycopg2.connect(host=host, port=port, dbname=db,
password=password, user=user)
cursor = connection.cursor()
# First know column names
cursor.execute("select column_name, data_type, character_maximum_length from\
information_schema.columns where table_schema = \'public\'\
and table_name=\'" + table_name + "\'")
result = cursor.fetchall()
dtype = list()
#column_names, data_types, char_lengths = zip(*result):
for column_name, data_type, char_length in result:
dtype.append((column_name, dtype_converter(data_type, char_length)))
# Convert to numpy data types
# Now read the table and put to structured array
cursor.execute('SELECT *\
FROM pima_observations WHERE source = %(source)s AND\
band = %(band)s', \
{'source': source, 'band': band})
table_data = cursor.fetchall()
struct_array = np.zeros(len(table_data), dtype=dtype)
for i, (column_name, data_type, char_length,) in enumerate(result):
struct_array[column_name] = zip(*table_data)[i]
return struct_array
def s_thr_from_obs_row(row, raise_ra=True, n_q=0.637, dnu=16. * 10 ** 6, n=2):
"""
Function that calculates sigma of detection from structured array row.
:param row:
Row of 2D structured array. Actually, an object with __getitem__ method
and corresponding keys.
:return:
Sigma for detection using upper and lower bands.
"""
rt1 = row['st1']
rt2 = row['st2']
polar = row['polar']
band = row['band'].upper()
try:
SEFD_rt1 = SEFD_dict[rt1][band.upper()][polar[0]]
except KeyError:
print "There's no entry for " + row['st1'] + " for band " +\
band.upper() + " in utils.SEFD_dict!"
return None
except TypeError:
print "There's no SEFD data for " + row['exper_name'] + " " + \
row['st1'] + " for band " + band.upper() + " !"
return None
try:
SEFD_rt2 = SEFD_dict[rt2][band.upper()][polar[1]]
except KeyError:
print "There's no entry for " + row['st2'] + " for band " + \
band.upper() + " in utils.SEFD_dict!"
return None
except TypeError:
print "There's no SEFD data for " + row['exper_name'] + " " + \
row['st2'] + " for band " + band.upper() + " !"
return None
try:
result = (1. / n_q) * math.sqrt((SEFD_rt1 * SEFD_rt2) / (n * dnu *
row['solint']))
except TypeError:
return None
return result
def gauss_1d(p, x):
"""
:param p:
Parameter vector (amplitude, major axis).
:param x:
Numpy array of x-coordinates.
:return:
Numpy array of value(s) of gaussian at point(s) (x).
"""
return p[0] * np.exp(-x ** 2. / (2. * p[1] ** 2.))
def gauss_2d_isotropic(p, x, y):
"""
:param p:
Parameter vector (amplitude, major axis).
:param x:
Numpy array of x-coordinates.
:return:
Numpy array of value(s) of gaussian at point(s) (x, y).
"""
return p[0] * np.exp(-(x ** 2. + y ** 2.) ** 2. / (2. * p[1] ** 2.))
def gauss_2d_anisotropic(p, x, y):
"""
:param p:
Parameter vector (amplitude, major axis, e, rotation angle [from x to
y]).
:param x:
Numpy array of x-coordinates.
:param y:
Numpy array of y-coordinates.
:return:
Numpy array of value(s) of gaussian at point(s) (x, y).
"""
a = math.cos(p[3]) ** 2. / (2. * p[1] ** 2.) + math.sin(p[3]) ** 2. /\
(2. * (p[1] * p[2]) ** 2.)
b = -math.sin(2. * p[3]) / (4. * p[1] ** 2.) + math.sin(2. * p[3]) /\
(4. * (p[1] * p[2]) ** 2.)
c = math.sin(p[3]) ** 2. / (2. * p[1] ** 2.) + math.cos(p[3]) ** 2. / \
(2. * (p[1] * p[2]) ** 2.)
return p[0] * np.exp(-(a * x ** 2. + 2. * b * x * y + c * y ** 2.))
| mit | -3,182,100,353,803,740,000 | 37.044643 | 81 | 0.417625 | false |
hslee16/ansible-modules-extras | monitoring/rollbar_deployment.py | 138 | 3898 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Max Riveiro, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rollbar_deployment
version_added: 1.6
author: "Max Riveiro (@kavu)"
short_description: Notify Rollbar about app deployments
description:
- Notify Rollbar about app deployments
(see https://rollbar.com/docs/deploys_other/)
options:
token:
description:
- Your project access token.
required: true
environment:
description:
- Name of the environment being deployed, e.g. 'production'.
required: true
revision:
description:
- Revision number/sha being deployed.
required: true
user:
description:
- User who deployed.
required: false
rollbar_user:
description:
- Rollbar username of the user who deployed.
required: false
comment:
description:
- Deploy comment (e.g. what is being deployed).
required: false
url:
description:
- Optional URL to submit the notification to.
required: false
default: 'https://api.rollbar.com/api/1/deploy/'
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated.
This should only be used on personally controlled sites using
self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
- rollbar_deployment: token=AAAAAA
environment='staging'
user='ansible'
revision=4.2,
rollbar_user='admin',
comment='Test Deploy'
'''
import urllib
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
environment=dict(required=True),
revision=dict(required=True),
user=dict(required=False),
rollbar_user=dict(required=False),
comment=dict(required=False),
url=dict(
required=False,
default='https://api.rollbar.com/api/1/deploy/'
),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
if module.check_mode:
module.exit_json(changed=True)
params = dict(
access_token=module.params['token'],
environment=module.params['environment'],
revision=module.params['revision']
)
if module.params['user']:
params['local_username'] = module.params['user']
if module.params['rollbar_user']:
params['rollbar_username'] = module.params['rollbar_user']
if module.params['comment']:
params['comment'] = module.params['comment']
url = module.params.get('url')
try:
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
except Exception, e:
module.fail_json(msg='Unable to notify Rollbar: %s' % e)
else:
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 | 3,346,562,037,111,369,700 | 28.089552 | 97 | 0.631606 | false |
bartvm/pylearn2 | pylearn2/scripts/papers/jia_huang_wkshp_11/fit_final_model.py | 44 | 3913 | from __future__ import print_function
import numpy as np
from optparse import OptionParser
from pylearn2.models.independent_multiclass_logistic import IndependentMulticlassLogistic
from galatea.s3c.feature_loading import get_features
from pylearn2.utils import serial
from pylearn2.datasets.cifar10 import CIFAR10
from pylearn2.datasets.cifar100 import CIFAR100
from theano.compat.six.moves import xrange
import gc
gc.collect()
def train(fold_train_X, fold_train_y, C):
model = IndependentMulticlassLogistic(C).fit(fold_train_X, fold_train_y)
gc.collect()
return model
def get_labels_and_fold_indices(cifar10, cifar100, stl10):
assert stl10 or cifar10 or cifar100
assert stl10+cifar10+cifar100 == 1
if stl10:
print('loading entire stl-10 train set just to get the labels and folds')
stl10 = serial.load("${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/train.pkl")
train_y = stl10.y
fold_indices = stl10.fold_indices
elif cifar10 or cifar100:
if cifar10:
print('loading entire cifar10 train set just to get the labels')
cifar = CIFAR10(which_set = 'train')
else:
assert cifar100
print('loading entire cifar100 train set just to get the labels')
cifar = CIFAR100(which_set = 'train')
cifar.y = cifar.y_fine
train_y = cifar.y
assert train_y is not None
fold_indices = np.zeros((5,40000),dtype='uint16')
idx_list = np.cast['uint16'](np.arange(1,50001)) #mimic matlab format of stl10
for i in xrange(5):
mask = idx_list < i * 10000 + 1
mask += idx_list >= (i+1) * 10000 + 1
fold_indices[i,:] = idx_list[mask]
assert fold_indices.min() == 1
assert fold_indices.max() == 50000
return train_y, fold_indices
def main(train_path,
out_path,
dataset,
standardize,
C,
**kwargs):
stl10 = dataset == 'stl10'
cifar10 = dataset == 'cifar10'
cifar100 = dataset == 'cifar100'
assert stl10 + cifar10 + cifar100 == 1
print('getting labels and oflds')
train_y, fold_indices = get_labels_and_fold_indices(cifar10, cifar100, stl10)
gc.collect()
assert train_y is not None
print('loading training features')
train_X = get_features(train_path, split = False, standardize = standardize)
assert str(train_X.dtype) == 'float32'
if stl10:
assert train_X.shape[0] == 5000
if cifar10 or cifar100:
assert train_X.shape[0] == 50000
assert train_y.shape == (50000,)
print('training model')
model = train(train_X, train_y, C)
print('saving model')
serial.save(out_path, model)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-d", "--train",
action="store", type="string", dest="train")
parser.add_option("-o", "--out",
action="store", type="string", dest="out")
parser.add_option('-C', type='float', dest='C', action='store', default = None)
parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None)
parser.add_option('--standardize',action="store_true", dest="standardize", default=False)
parser.add_option('--fold', action='store', type='int', dest='fold', default = None)
#(options, args) = parser.parse_args()
#assert options.dataset is not None
#assert options.C is not None
#assert options.out is not None
#assert options.fold is not None
#log = open(options.out+'.log.txt','w')
#log.write('log file started succesfully\n')
#log.flush()
print('parsed the args')
main(train_path='features.npy',
out_path = 'final_model.pkl',
C = .01,
dataset = 'cifar100',
standardize = False,
#fold = options.fold,
#log = log
)
#log.close()
| bsd-3-clause | 6,836,480,216,472,130,000 | 30.304 | 99 | 0.623818 | false |
4eek/edx-platform | common/djangoapps/enrollment/tests/fake_data_api.py | 104 | 3705 | """
A Fake Data API for testing purposes.
"""
import copy
import datetime
_DEFAULT_FAKE_MODE = {
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": None,
"description": None
}
_ENROLLMENTS = []
_COURSES = []
_ENROLLMENT_ATTRIBUTES = []
# pylint: disable=unused-argument
def get_course_enrollments(student_id):
"""Stubbed out Enrollment data request."""
return _ENROLLMENTS
def get_course_enrollment(student_id, course_id):
"""Stubbed out Enrollment data request."""
return _get_fake_enrollment(student_id, course_id)
def create_course_enrollment(student_id, course_id, mode='honor', is_active=True):
"""Stubbed out Enrollment creation request. """
return add_enrollment(student_id, course_id, mode=mode, is_active=is_active)
def update_course_enrollment(student_id, course_id, mode=None, is_active=None):
"""Stubbed out Enrollment data request."""
enrollment = _get_fake_enrollment(student_id, course_id)
if enrollment and mode is not None:
enrollment['mode'] = mode
if enrollment and is_active is not None:
enrollment['is_active'] = is_active
return enrollment
def get_course_enrollment_info(course_id, include_expired=False):
"""Stubbed out Enrollment data request."""
return _get_fake_course_info(course_id)
def _get_fake_enrollment(student_id, course_id):
"""Get an enrollment from the enrollments array."""
for enrollment in _ENROLLMENTS:
if student_id == enrollment['student'] and course_id == enrollment['course']['course_id']:
return enrollment
def _get_fake_course_info(course_id):
"""Get a course from the courses array."""
for course in _COURSES:
if course_id == course['course_id']:
return course
def add_enrollment(student_id, course_id, is_active=True, mode='honor'):
"""Append an enrollment to the enrollments array."""
enrollment = {
"created": datetime.datetime.now(),
"mode": mode,
"is_active": is_active,
"course": _get_fake_course_info(course_id),
"student": student_id
}
_ENROLLMENTS.append(enrollment)
return enrollment
# pylint: disable=unused-argument
def add_or_update_enrollment_attr(user_id, course_id, attributes):
"""Add or update enrollment attribute array"""
for attribute in attributes:
_ENROLLMENT_ATTRIBUTES.append({
'namespace': attribute['namespace'],
'name': attribute['name'],
'value': attribute['value']
})
# pylint: disable=unused-argument
def get_enrollment_attributes(user_id, course_id):
"""Retrieve enrollment attribute array"""
return _ENROLLMENT_ATTRIBUTES
def add_course(course_id, enrollment_start=None, enrollment_end=None, invite_only=False, course_modes=None):
"""Append course to the courses array."""
course_info = {
"course_id": course_id,
"enrollment_end": enrollment_end,
"course_modes": [],
"enrollment_start": enrollment_start,
"invite_only": invite_only,
}
if not course_modes:
course_info['course_modes'].append(_DEFAULT_FAKE_MODE)
else:
for mode in course_modes:
new_mode = copy.deepcopy(_DEFAULT_FAKE_MODE)
new_mode['slug'] = mode
course_info['course_modes'].append(new_mode)
_COURSES.append(course_info)
def reset():
"""Set the enrollments and courses arrays to be empty."""
global _COURSES # pylint: disable=global-statement
_COURSES = []
global _ENROLLMENTS # pylint: disable=global-statement
_ENROLLMENTS = []
| agpl-3.0 | -1,222,491,505,215,174,100 | 28.879032 | 108 | 0.651822 | false |
russellgeoff/blog | Control/Controllers/gc.py | 4 | 2183 | '''
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import control
import numpy as np
class Control(control.Control):
"""
A class that holds the simulation and control dynamics for
a two link arm, with the dynamics carried out in Python.
"""
def __init__(self, **kwargs):
super(Control, self).__init__(**kwargs)
# generalized coordinates
self.target_gain = 2*np.pi
self.target_bias = -np.pi
def check_distance(self, arm):
"""Checks the distance to target"""
return np.sum(abs(arm.q - self.target))
def control(self, arm, q_des=None):
"""Generate a control signal to move the arm through
joint space to the desired joint angle position"""
# calculated desired joint angle acceleration
if q_des is None:
prop_val = ((self.target.reshape(1,-1) - arm.q) + np.pi) % \
(np.pi*2) - np.pi
else:
# if a desired location is specified on input
prop_val = q_des - arm.q
# add in velocity compensation
q_des = (self.kp * prop_val + \
self.kv * -arm.dq).reshape(-1,)
Mq = arm.gen_Mq()
# tau = Mq * q_des + tau_grav, but gravity = 0
self.u = np.dot(Mq, q_des).reshape(-1,)
return self.u
def gen_target(self, arm):
"""Generate a random target"""
self.target = np.random.random(size=(len(arm.L),)) * \
self.target_gain + self.target_bias
return self.target
| gpl-3.0 | 1,348,096,549,485,841,400 | 31.58209 | 73 | 0.615667 | false |
willingc/oh-mainline | vendor/packages/django-extensions/django_extensions/management/commands/passwd.py | 41 | 1180 | from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
import getpass
class Command(BaseCommand):
help = "Clone of the UNIX program ``passwd'', for django.contrib.auth."
requires_model_validation = False
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("need exactly one or zero arguments for username")
if args:
username, = args
else:
username = getpass.getuser()
try:
u = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("user %s does not exist" % username)
print "Changing password for user", u.username
p1 = p2 = ""
while "" in (p1, p2) or p1 != p2:
p1 = getpass.getpass()
p2 = getpass.getpass("Password (again): ")
if p1 != p2:
print "Passwords do not match, try again"
elif "" in (p1, p2):
raise CommandError("aborted")
u.set_password(p1)
u.save()
return "Password changed successfully for user %s\n" % u.username
| agpl-3.0 | -363,040,011,538,661,600 | 30.052632 | 81 | 0.582203 | false |
dorianpula/paramiko | demos/demo.py | 36 | 5396 | #!/usr/bin/env python
# Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import base64
from binascii import hexlify
import getpass
import os
import select
import socket
import sys
import time
import traceback
from paramiko.py3compat import input
import paramiko
try:
import interactive
except ImportError:
from . import interactive
def agent_auth(transport, username):
"""
Attempt to authenticate to the given transport using any of the private
keys available from an SSH agent.
"""
agent = paramiko.Agent()
agent_keys = agent.get_keys()
if len(agent_keys) == 0:
return
for key in agent_keys:
print('Trying ssh-agent key %s' % hexlify(key.get_fingerprint()))
try:
transport.auth_publickey(username, key)
print('... success!')
return
except paramiko.SSHException:
print('... nope.')
def manual_auth(username, hostname):
default_auth = 'p'
auth = input('Auth by (p)assword, (r)sa key, or (d)ss key? [%s] ' % default_auth)
if len(auth) == 0:
auth = default_auth
if auth == 'r':
default_path = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')
path = input('RSA key [%s]: ' % default_path)
if len(path) == 0:
path = default_path
try:
key = paramiko.RSAKey.from_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass('RSA key password: ')
key = paramiko.RSAKey.from_private_key_file(path, password)
t.auth_publickey(username, key)
elif auth == 'd':
default_path = os.path.join(os.environ['HOME'], '.ssh', 'id_dsa')
path = input('DSS key [%s]: ' % default_path)
if len(path) == 0:
path = default_path
try:
key = paramiko.DSSKey.from_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass('DSS key password: ')
key = paramiko.DSSKey.from_private_key_file(path, password)
t.auth_publickey(username, key)
else:
pw = getpass.getpass('Password for %s@%s: ' % (username, hostname))
t.auth_password(username, pw)
# setup logging
paramiko.util.log_to_file('demo.log')
username = ''
if len(sys.argv) > 1:
hostname = sys.argv[1]
if hostname.find('@') >= 0:
username, hostname = hostname.split('@')
else:
hostname = input('Hostname: ')
if len(hostname) == 0:
print('*** Hostname required.')
sys.exit(1)
port = 22
if hostname.find(':') >= 0:
hostname, portstr = hostname.split(':')
port = int(portstr)
# now connect
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
except Exception as e:
print('*** Connect failed: ' + str(e))
traceback.print_exc()
sys.exit(1)
try:
t = paramiko.Transport(sock)
try:
t.start_client()
except paramiko.SSHException:
print('*** SSH negotiation failed.')
sys.exit(1)
try:
keys = paramiko.util.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
except IOError:
try:
keys = paramiko.util.load_host_keys(os.path.expanduser('~/ssh/known_hosts'))
except IOError:
print('*** Unable to open host keys file')
keys = {}
# check server's host key -- this is important.
key = t.get_remote_server_key()
if hostname not in keys:
print('*** WARNING: Unknown host key!')
elif key.get_name() not in keys[hostname]:
print('*** WARNING: Unknown host key!')
elif keys[hostname][key.get_name()] != key:
print('*** WARNING: Host key has changed!!!')
sys.exit(1)
else:
print('*** Host key OK.')
# get username
if username == '':
default_username = getpass.getuser()
username = input('Username [%s]: ' % default_username)
if len(username) == 0:
username = default_username
agent_auth(t, username)
if not t.is_authenticated():
manual_auth(username, hostname)
if not t.is_authenticated():
print('*** Authentication failed. :(')
t.close()
sys.exit(1)
chan = t.open_session()
chan.get_pty()
chan.invoke_shell()
print('*** Here we go!\n')
interactive.interactive_shell(chan)
chan.close()
t.close()
except Exception as e:
print('*** Caught exception: ' + str(e.__class__) + ': ' + str(e))
traceback.print_exc()
try:
t.close()
except:
pass
sys.exit(1)
| lgpl-2.1 | -5,611,977,076,138,889,000 | 28.648352 | 88 | 0.614715 | false |
hyqneuron/pylearn2-maxsom | pylearn2/training_algorithms/tests/test_learning_rule.py | 12 | 9942 | import numpy as np
from theano.compat.six.moves import zip as izip
from pylearn2.costs.cost import SumOfCosts
from pylearn2.testing.cost import SumOfOneHalfParamsSquared
from pylearn2.testing.cost import SumOfParams
from pylearn2.testing.datasets import ArangeDataset
from pylearn2.training_algorithms.sgd import SGD
from pylearn2.training_algorithms.learning_rule import Momentum
from pylearn2.training_algorithms.learning_rule import AdaDelta
from pylearn2.training_algorithms.learning_rule import AdaGrad
from pylearn2.training_algorithms.learning_rule import RMSProp
from test_sgd import DummyCost, DummyModel
# used by all learning rule tests
scales = [.01, .02, .05, 1., 5.]
shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]
learning_rate = .001
def test_momentum():
"""
Make sure that learning_rule.Momentum obtains the same parameter values as
with a hand-crafted sgd w/ momentum implementation, given a dummy model and
learning rate scaler for each parameter.
"""
# We include a cost other than SumOfParams so that data is actually
# queried from the training set, and the expected number of updates
# are applied.
cost = SumOfCosts([SumOfParams(), (0., DummyCost())])
model = DummyModel(shapes, lr_scalers=scales)
dataset = ArangeDataset(1)
momentum = 0.5
sgd = SGD(cost=cost,
learning_rate=learning_rate,
learning_rule=Momentum(momentum),
batch_size=1)
sgd.setup(model=model, dataset=dataset)
manual = [param.get_value() for param in model.get_params()]
inc = [-learning_rate * scale for scale in scales]
manual = [param + i for param, i in izip(manual, inc)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in izip(manual, model.get_params()))
manual = [param - learning_rate * scale + i * momentum
for param, scale, i in izip(manual, scales, inc)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in izip(manual, model.get_params()))
def test_nesterov_momentum():
"""
Make sure that learning_rule.Momentum obtains the same parameter values as
with a hand-crafted sgd w/ momentum implementation, given a dummy model and
learning rate scaler for each parameter.
"""
# We include a cost other than SumOfParams so that data is actually
# queried from the training set, and the expected number of updates
# are applied.
cost = SumOfCosts([SumOfParams(), (0., DummyCost())])
model = DummyModel(shapes, lr_scalers=scales)
dataset = ArangeDataset(1)
momentum = 0.5
sgd = SGD(cost=cost,
learning_rate=learning_rate,
learning_rule=Momentum(momentum, nesterov_momentum=True),
batch_size=1)
sgd.setup(model=model, dataset=dataset)
manual = [param.get_value() for param in model.get_params()]
vel = [-learning_rate * scale for scale in scales]
updates = [-learning_rate * scale + v * momentum
for scale, v in izip(scales, vel)]
manual = [param + update for param, update in izip(manual, updates)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in izip(manual, model.get_params()))
vel = [-learning_rate * scale + i * momentum
for scale, i in izip(scales, vel)]
updates = [-learning_rate * scale + v * momentum
for scale, v in izip(scales, vel)]
manual = [param + update for param, update in izip(manual, updates)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in izip(manual, model.get_params()))
def test_adadelta():
"""
Make sure that learning_rule.AdaDelta obtains the same parameter values as
with a hand-crafted AdaDelta implementation, given a dummy model and
learning rate scaler for each parameter.
Reference:
"AdaDelta: An Adaptive Learning Rate Method", Matthew D. Zeiler.
"""
# We include a cost other than SumOfParams so that data is actually
# queried from the training set, and the expected number of updates
# are applied.
cost = SumOfCosts([SumOfOneHalfParamsSquared(), (0., DummyCost())])
model = DummyModel(shapes, lr_scalers=scales)
dataset = ArangeDataset(1)
decay = 0.95
sgd = SGD(cost=cost,
learning_rate=learning_rate,
learning_rule=AdaDelta(decay),
batch_size=1)
sgd.setup(model=model, dataset=dataset)
state = {}
for param in model.get_params():
param_shape = param.get_value().shape
state[param] = {}
state[param]['g2'] = np.zeros(param_shape)
state[param]['dx2'] = np.zeros(param_shape)
def adadelta_manual(model, state):
inc = []
rval = []
for scale, param in izip(scales, model.get_params()):
pstate = state[param]
param_val = param.get_value()
# begin adadelta
pstate['g2'] = decay * pstate['g2'] + (1 - decay) * param_val ** 2
rms_g_t = np.sqrt(pstate['g2'] + scale * learning_rate)
rms_dx_tm1 = np.sqrt(pstate['dx2'] + scale * learning_rate)
dx_t = -rms_dx_tm1 / rms_g_t * param_val
pstate['dx2'] = decay * pstate['dx2'] + (1 - decay) * dx_t ** 2
rval += [param_val + dx_t]
return rval
manual = adadelta_manual(model, state)
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in izip(manual, model.get_params()))
manual = adadelta_manual(model, state)
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param in
izip(manual, model.get_params()))
def test_adagrad():
"""
Make sure that learning_rule.AdaGrad obtains the same parameter values as
with a hand-crafted AdaGrad implementation, given a dummy model and
learning rate scaler for each parameter.
Reference:
"Adaptive subgradient methods for online learning and
stochastic optimization", Duchi J, Hazan E, Singer Y.
"""
# We include a cost other than SumOfParams so that data is actually
# queried from the training set, and the expected number of updates
# are applied.
cost = SumOfCosts([SumOfOneHalfParamsSquared(), (0., DummyCost())])
model = DummyModel(shapes, lr_scalers=scales)
dataset = ArangeDataset(1)
sgd = SGD(cost=cost,
learning_rate=learning_rate,
learning_rule=AdaGrad(),
batch_size=1)
sgd.setup(model=model, dataset=dataset)
state = {}
for param in model.get_params():
param_shape = param.get_value().shape
state[param] = {}
state[param]['sg2'] = np.zeros(param_shape)
def adagrad_manual(model, state):
rval = []
for scale, param in izip(scales, model.get_params()):
pstate = state[param]
param_val = param.get_value()
# begin adadelta
pstate['sg2'] += param_val ** 2
dx_t = - (scale * learning_rate
/ np.sqrt(pstate['sg2'])
* param_val)
rval += [param_val + dx_t]
return rval
manual = adagrad_manual(model, state)
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in izip(manual, model.get_params()))
manual = adagrad_manual(model, state)
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param in
izip(manual, model.get_params()))
def test_rmsprop():
"""
Make sure that learning_rule.RMSProp obtains the same parameter values as
with a hand-crafted RMSProp implementation, given a dummy model and
learning rate scaler for each parameter.
"""
# We include a cost other than SumOfParams so that data is actually
# queried from the training set, and the expected number of updates
# are applied.
cost = SumOfCosts([SumOfOneHalfParamsSquared(), (0., DummyCost())])
scales = [.01, .02, .05, 1., 5.]
shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]
model = DummyModel(shapes, lr_scalers=scales)
dataset = ArangeDataset(1)
learning_rate = .001
decay = 0.90
max_scaling = 1e5
sgd = SGD(cost=cost,
learning_rate=learning_rate,
learning_rule=RMSProp(decay),
batch_size=1)
sgd.setup(model=model, dataset=dataset)
state = {}
for param in model.get_params():
param_shape = param.get_value().shape
state[param] = {}
state[param]['g2'] = np.zeros(param_shape)
def rmsprop_manual(model, state):
inc = []
rval = []
epsilon = 1. / max_scaling
for scale, param in izip(scales, model.get_params()):
pstate = state[param]
param_val = param.get_value()
# begin rmsprop
pstate['g2'] = decay * pstate['g2'] + (1 - decay) * param_val ** 2
rms_g_t = np.maximum(np.sqrt(pstate['g2']), epsilon)
dx_t = - scale * learning_rate / rms_g_t * param_val
rval += [param_val + dx_t]
return rval
manual = rmsprop_manual(model, state)
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in izip(manual, model.get_params()))
| bsd-3-clause | 197,006,328,474,794,600 | 34.255319 | 79 | 0.621304 | false |
cuongnv23/ansible | lib/ansible/modules/network/nxos/nxos_vxlan_vtep_vni.py | 22 | 12128 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vxlan_vtep_vni
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Creates a Virtual Network Identifier member (VNI)
description:
- Creates a Virtual Network Identifier member (VNI) for an NVE
overlay interface.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- default, where supported, restores params default value.
options:
interface:
description:
- Interface name for the VXLAN Network Virtualization Endpoint.
required: true
vni:
description:
- ID of the Virtual Network Identifier.
required: true
assoc_vrf:
description:
- This attribute is used to identify and separate processing VNIs
that are associated with a VRF and used for routing. The VRF
and VNI specified with this command must match the configuration
of the VNI under the VRF.
required: false
choices: ['true','false']
default: null
ingress_replication:
description:
- Specifies mechanism for host reachability advertisement.
required: false
choices: ['bgp','static']
default: null
multicast_group:
description:
- The multicast group (range) of the VNI. Valid values are
string and keyword 'default'.
required: false
default: null
peer_list:
description:
- Set the ingress-replication static peer list. Valid values
are an array, a space-separated string of ip addresses,
or the keyword 'default'.
required: false
default: null
suppress_arp:
description:
- Suppress arp under layer 2 VNI.
required: false
choices: ['true','false']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vxlan_vtep_vni:
interface: nve1
vni: 6000
ingress_replication: default
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface nve1", "member vni 6000"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
BOOL_PARAMS = [
'assoc_vrf',
'suppress_arp',
]
PARAM_TO_COMMAND_KEYMAP = {
'assoc_vrf': 'associate-vrf',
'interface': 'interface',
'vni': 'member vni',
'ingress_replication': 'ingress-replication protocol',
'multicast_group': 'mcast-group',
'peer_list': 'peer-ip',
'suppress_arp': 'suppress-arp'
}
def get_value(arg, config, module):
command = PARAM_TO_COMMAND_KEYMAP[arg]
command_val_re = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(command), re.M)
if arg in BOOL_PARAMS:
command_re = re.compile(r'\s+{0}\s*$'.format(command), re.M)
value = False
if command_re.search(config):
value = True
elif arg == 'peer_list':
has_command_val = command_val_re.findall(config, re.M)
value = []
if has_command_val:
value = has_command_val
else:
value = ''
has_command_val = command_val_re.search(config, re.M)
if has_command_val:
value = has_command_val.group('value')
return value
def check_interface(module, netcfg):
config = str(netcfg)
has_interface = re.search(r'(?:interface nve)(?P<value>.*)$', config, re.M)
value = ''
if has_interface:
value = 'nve{0}'.format(has_interface.group('value'))
return value
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
interface_exist = check_interface(module, netcfg)
if interface_exist:
parents = ['interface {0}'.format(interface_exist)]
temp_config = netcfg.get_section(parents)
if 'member vni {0} associate-vrf'.format(module.params['vni']) in temp_config:
parents.append('member vni {0} associate-vrf'.format(module.params['vni']))
config = netcfg.get_section(parents)
elif "member vni {0}".format(module.params['vni']) in temp_config:
parents.append('member vni {0}'.format(module.params['vni']))
config = netcfg.get_section(parents)
else:
config = {}
if config:
for arg in args:
if arg not in ['interface', 'vni']:
existing[arg] = get_value(arg, config, module)
existing['interface'] = interface_exist
existing['vni'] = module.params['vni']
return existing, interface_exist
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if key == 'associate-vrf':
command = 'member vni {0} {1}'.format(module.params['vni'], key)
if not value:
command = 'no {0}'.format(command)
commands.append(command)
elif key == 'peer-ip' and value != 'default':
for peer in value:
commands.append('{0} {1}'.format(key, peer))
elif key == 'mcast-group' and value != existing_commands.get(key):
commands.append('no {0}'.format(key))
commands.append('{0} {1}'.format(key, value))
elif value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
if key == 'peer-ip':
for peer in existing_value:
commands.append('no {0} {1}'.format(key, peer))
else:
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
commands.append('no {0}'.format(key.lower()))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
vni_command = 'member vni {0}'.format(module.params['vni'])
ingress_replication_command = 'ingress-replication protocol static'
interface_command = 'interface {0}'.format(module.params['interface'])
if ingress_replication_command in commands:
static_level_cmds = [cmd for cmd in commands if 'peer' in cmd]
parents = [interface_command, vni_command, ingress_replication_command]
candidate.add(static_level_cmds, parents=parents)
commands = [cmd for cmd in commands if 'peer' not in cmd]
if vni_command in commands:
parents = [interface_command]
commands.remove(vni_command)
if module.params['assoc_vrf'] is None:
parents.append(vni_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
if existing['assoc_vrf']:
commands = ['no member vni {0} associate-vrf'.format(
module.params['vni'])]
else:
commands = ['no member vni {0}'.format(module.params['vni'])]
parents = ['interface {0}'.format(module.params['interface'])]
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
vni=dict(required=True, type='str'),
assoc_vrf=dict(required=False, type='bool'),
multicast_group=dict(required=False, type='str'),
peer_list=dict(required=False, type='list'),
suppress_arp=dict(required=False, type='bool'),
ingress_replication=dict(required=False, type='str', choices=['bgp', 'static', 'default']),
state=dict(choices=['present', 'absent'], default='present', required=False),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'commands': [], 'warnings': warnings}
if module.params['assoc_vrf']:
mutually_exclusive_params = ['multicast_group',
'suppress_arp',
'ingress_replication']
for param in mutually_exclusive_params:
if module.params[param]:
module.fail_json(msg='assoc_vrf cannot be used with '
'{0} param'.format(param))
if module.params['peer_list']:
if module.params['ingress_replication'] != 'static':
module.fail_json(msg='ingress_replication=static is required '
'when using peer_list param')
else:
peer_list = module.params['peer_list']
if peer_list[0] == 'default':
module.params['peer_list'] = 'default'
else:
stripped_peer_list = map(str.strip, peer_list)
module.params['peer_list'] = stripped_peer_list
state = module.params['state']
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing, interface_exist = get_existing(module, args)
if state == 'present':
if not interface_exist:
module.fail_json(msg="The proposed NVE interface does not exist. Use nxos_interface to create it first.")
elif interface_exist != module.params['interface']:
module.fail_json(msg='Only 1 NVE interface is allowed on the switch.')
elif state == 'absent':
if interface_exist != module.params['interface']:
module.exit_json(**result)
elif existing and existing['vni'] != module.params['vni']:
module.fail_json(
msg="ERROR: VNI delete failed: Could not find vni node for {0}".format(module.params['vni']),
existing_vni=existing['vni']
)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface' and existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
elif existing and state == 'absent':
state_absent(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
result['changed'] = True
result['commands'] = candidate
if not module.check_mode:
load_config(module, candidate)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,137,585,488,418,320,000 | 34.153623 | 117 | 0.611065 | false |
nushio3/ghc | .arc-linters/check-cpp.py | 6 | 1090 | #!/usr/bin/env python
# A linter to warn for ASSERT macros which are separated from their argument
# list by a space, which Clang's CPP barfs on
import sys
import logging
import os
import json
import re
def setup_logging(logger):
"""
``arc lint`` makes it quite tricky to catch debug output from linters.
Log to a file to work around this.
"""
hdlr = logging.FileHandler('linter.log', 'w')
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
logger = logging.getLogger()
#setup_logging(logger)
logger.debug(sys.argv)
path = sys.argv[1]
warnings = []
if os.path.isfile(path):
with open(path) as f:
for lineno, line in enumerate(f):
if re.search('ASSERT \(', line) is not None:
warning = {
'severity': 'warning',
'message': 'CPP macros should not have a space between the macro name and their argument list',
'line': lineno+1,
}
warnings.append(warning)
logger.debug(warnings)
print(json.dumps(warnings))
| bsd-3-clause | 1,687,938,360,267,883,500 | 26.25 | 115 | 0.626606 | false |
cldmnky/salt-vault | _states/vault.py | 2 | 2512 | # -*- coding: utf-8 -*-
'''
:maintainer: Calle Pettersson <[email protected]>
:maturity: new
:depends: python-requests
:platform: all
Interact with Hashicorp Vault
'''
import logging
import difflib
import salt.exceptions
log = logging.getLogger(__name__)
def policy_present(name, rules):
url = "v1/sys/policy/{0}".format(name)
response = __utils__['vault.make_request']('GET', url)
try:
if response.status_code == 200:
return _handle_existing_policy(name, rules, response.json()['rules'])
elif response.status_code == 404:
return _create_new_policy(name, rules)
else:
response.raise_for_status()
except Exception as e:
return {
'name': name,
'changes': None,
'result': False,
'comment': 'Failed to get policy: {0}'.format(e)
}
def _create_new_policy(name, rules):
if __opts__['test']:
return {
'name': name,
'changes': { name: { 'old': '', 'new': rules } },
'result': None,
'comment': 'Policy would be created'
}
payload = { 'rules': rules }
url = "v1/sys/policy/{0}".format(name)
response = __utils__['vault.make_request']('PUT', url, json=payload)
if response.status_code != 204:
return {
'name': name,
'changes': None,
'result': False,
'comment': 'Failed to create policy: {0}'.format(response.reason)
}
return {
'name': name,
'result': True,
'changes': { name: { 'old': None, 'new': rules } },
'comment': 'Policy was created'
}
def _handle_existing_policy(name, new_rules, existing_rules):
ret = { 'name': name }
if new_rules == existing_rules:
ret['result'] = True
ret['changes'] = None
ret['comment'] = 'Policy exists, and has the correct content'
return ret
change = ''.join(difflib.unified_diff(existing_rules.splitlines(True), new_rules.splitlines(True)))
if __opts__['test']:
ret['result'] = None
ret['changes'] = { name: { 'change': change } }
ret['comment'] = 'Policy would be changed'
return ret
payload = { 'rules': new_rules }
url = "v1/sys/policy/{0}".format(name)
response = __utils__['vault.make_request']('PUT', url, json=payload)
if response.status_code != 204:
return {
'name': name,
'changes': None,
'result': False,
'comment': 'Failed to change policy: {0}'.format(response.reason)
}
ret['result'] = True
ret['changes'] = { name: { 'change': change } }
ret['comment'] = 'Policy was updated'
return ret
| apache-2.0 | 5,613,529,509,486,607,000 | 25.723404 | 101 | 0.596338 | false |
johren/RackHD | test/util/display_node_firmware_versions.py | 12 | 10942 | '''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
Purpose:
This is a utility to display variuos node firmware and manufacturer info.
'''
import fit_path # NOQA: unused import
import json
import pprint
import fit_common
import test_api_utils
# Globals
NODELIST = fit_common.node_select()
if NODELIST == []:
print "No nodes found on stack"
exit
fit_common.VERBOSITY = 1 # this is needed for suppressing debug messages to make reports readable
def mon_get_ip_info(node):
'''
This routine will grab the IP information from the compute node
'''
# Get RackHD node info
nodeurl = "/api/2.0/nodes/" + node
nodedata = fit_common.rackhdapi(nodeurl, action="get")
nodeinfo = nodedata['json']
result = nodedata['status']
if result != 200:
print "Error on node command ", nodeurl
fit_common.TEST_CASE["test_error"] += 1
return
# get RackHD BMC info
monurl = "/api/2.0/nodes/" + node + "/catalogs/bmc"
mondata = fit_common.rackhdapi(monurl, action="get")
catalog = mondata['json']
result = mondata['status']
if result != 200:
print "Error on catalog/bmc command ", monurl
else:
print " BMC MAC Address: " + catalog["data"]["MAC Address"]
bmc_ip_value = catalog["data"].get("IP Address")
print " Shared NIC BMC IP Address: " + bmc_ip_value
print " Shared NIC BMC IP Address Source: " + catalog["data"]["IP Address Source"]
# Check BMC IP vs OBM IP setting
try:
obmlist = nodeinfo["obmSettings"]
except:
print "ERROR: Node has no OBM settings configured"
else:
if fit_common.VERBOSITY >= 3:
print " OBM Settings:"
print fit_common.json.dumps(obmlist, indent=4)
try:
obmlist[0]["config"]["host"]
except:
print "ERROR: Invalid or empty OBM setting"
# Get RackHD RMM info
monurl = "/api/2.0/nodes/" + node + "/catalogs/rmm"
mondata = fit_common.rackhdapi(monurl, action="get")
catalog = mondata['json']
result = mondata['status']
if result != 200:
print "\nNo RMM catalog for node"
else:
print " RMM MAC Address: " + catalog["data"].get("MAC Address")
print " RMM IP Address: " + catalog["data"].get("IP Address")
print " RMM IP Address Source: " + catalog["data"].get("IP Address Source")
def redfish_simple_storage_members(node_id):
"""
To get the device ids from simple storage for a given node_id
:param nodeid: node id
"""
on_url = "/redfish/v1/Systems/" + str(node_id) + '/SimpleStorage'
on_data = fit_common.rackhdapi(url_cmd=on_url)
# To get a list of devices
dev_ids = []
try:
members = on_data['json']["Members"]
except KeyError:
members = []
for member in members:
href_id = member["@odata.id"]
dev_id = href_id.split('/')[-1]
dev_ids.append(dev_id)
return dev_ids
class display_node_firmware(fit_common.unittest.TestCase):
# This test displays the BMC and BIOS firmware versions from the
# RackHD catalog data and the onrack redfish calls
# No asserts are used in this test, avoiding early exit on errors
def test_display_bmc_bios(self):
ora_name = ""
inode = 1
for node in NODELIST:
print "==== Displaying BMC BIOS ===="
# Print the SKU info from onrack
print "\nNode " + str(inode) + ": " + node
print "Redfish SKU data:"
# Redfish 1.0
on_url = "/redfish/v1/Systems/" + node
on_data = fit_common.rackhdapi(url_cmd=on_url)
if on_data['status'] == 200:
sysdata = on_data['json']
ora_name = sysdata.get("Name", "")
print "Name: ", ora_name
print "AssetTag: ", sysdata.get("AssetTag", "")
print "SKU: ", sysdata.get("SKU", "")
print "BiosVersion: ", sysdata.get("BiosVersion", "")
print "PowerState: ", sysdata.get("PowerState", "")
print "SerialNumber: ", sysdata.get("SerialNumber", "")
print " Model: ", sysdata.get("Model", "")
else:
print "Status: ", on_data['status']
# Print the related system info from RackHD
print "\nRackHD System Info from DMI:"
monurl = "/api/2.0/nodes/" + node + "/catalogs/dmi"
mondata = fit_common.rackhdapi(monurl, action="get")
catalog = mondata['json']
result = mondata['status']
# increment the error counter, but don't exit with an assert if no DMI catalog
if result != 200:
print "Error on catalog/dmi command"
else:
print " ID: " + catalog["id"]
print " Product Name : ", catalog["data"]["System Information"].get("Product Name", "")
print " Serial Number: ", catalog["data"]["System Information"].get("Serial Number", "")
print " UUID : ", catalog["data"]["System Information"].get("UUID", "")
print " BMC FW Revision : ", catalog["data"]["BIOS Information"].get("Firmware Revision", "")
print " Release Date : ", catalog["data"]["BIOS Information"].get("Release Date", "")
print " BIOS FW Package : ", catalog["data"]["BIOS Information"].get("Version", "")
print " BIOS Vendor : ", catalog["data"]["BIOS Information"].get("Vendor", "")
print "ORA Name : ", ora_name
print "\nIP Info:"
mon_get_ip_info(node)
inode += 1
print "=========================================================\n"
def test_display_bmc_mc_info(self):
# This test displays the BMC MC info from the compute node via
# IPMI call ipmitool mc info
# No asserts are used in this test, avoiding early exit on errors
inode = 1
for node in NODELIST:
print "==== Displaying BMC MC info ===="
nodetype = test_api_utils.get_rackhd_nodetype(node)
print "\nNode " + str(inode) + ": " + node
print "Type: ", nodetype
if nodetype != "unknown" and nodetype != "Unmanaged":
nodeurl = "/api/2.0/nodes/" + node
nodedata = fit_common.rackhdapi(nodeurl, action="get")
nodeinfo = nodedata['json']
result = nodedata['status']
if result != 200:
print "Error on node command" + nodeurl
else:
try:
obmlist = nodeinfo["obmSettings"]
except:
print "ERROR: Node has no OBM settings configured"
else:
if obmlist:
bmc_ip = test_api_utils.get_compute_bmc_ip(node)
if bmc_ip in [1, 2]:
print "No BMC IP found"
elif bmc_ip.startswith('192.168'):
print "ERROR: BAD BMC Value: ", bmc_ip
else:
user_cred = test_api_utils.get_compute_node_username(node)
if user_cred in [1, 2, 3, 4]:
print "Unable to get user credetials for node_id", node
else:
mc_data = test_api_utils.run_ipmi_command(bmc_ip, 'mc info', user_cred)
if mc_data['exitcode'] == 0:
print "MC Data: "
print mc_data['stdout']
else:
print "ERROR: Node has no OBM settings configured"
inode += 1
print "=========================================================\n"
def test_display_raid_controller_firmware(self):
# This test displays the MegaRaid controller firmware data from the compute
# node if it exists. It then displays the controller info contained in the
# RackHD redfish managed system data
# No asserts are used in this test, avoiding early exit on errors
inode = 1
for node in NODELIST:
print "==== Displaying MegaRAID and Controller info ===="
source_set = []
nodetype = test_api_utils.get_rackhd_nodetype(node)
print "\nNode " + str(inode) + ": " + node
print "Type: ", nodetype
if nodetype != "unknown" and nodetype != "Unmanaged":
monurl = "/api/2.0/nodes/" + node + "/catalogs"
mondata = fit_common.rackhdapi(monurl, action="get")
catalog = mondata['json']
result = mondata['status']
if result != 200:
print "ERROR: failed catalog request"
else:
source_set = test_api_utils.get_node_source_id_list(node)
if 'megaraid-controllers' in source_set:
print "Source: megaraid-controllers\n"
raidurl = "/api/2.0/nodes/" + node + "/catalogs/megaraid-controllers"
raiddata = fit_common.rackhdapi(raidurl, action="get")
catalog = raiddata['json']
result = raiddata['status']
print " Basics: ", catalog["data"]["Controllers"][0]["Command Status"]
print " Version: ",
pprint.pprint(catalog["data"]["Controllers"][0]["Response Data"]["Version"])
else:
print "Info: monorail catalog did not contain megraid-controllers source"
# display controller data if available, no firmware revs are present in the output
device_ids = redfish_simple_storage_members(node)
for dev_id in device_ids:
devurl = "/redfish/v1/Systems/" + node + "/SimpleStorage/" + dev_id
devdata = fit_common.rackhdapi(url_cmd=devurl)
controller = devdata['json']
result = devdata['status']
if result == 200:
controller = devdata['json']
print "Controller: " + str(dev_id) + " Name: " + str(controller.get('Name', ""))
print "Description: ", json.dumps(controller.get('Description', ""), indent=4)
inode += 1
print "========================================================="
if __name__ == '__main__':
fit_common.unittest.main()
| apache-2.0 | -2,759,054,859,170,837,000 | 43.844262 | 109 | 0.512886 | false |
probablytom/tomwallis.net | venv/lib/python2.7/site-packages/django/db/models/fields/related.py | 21 | 105184 | from __future__ import unicode_literals
from operator import attrgetter
from django.apps import apps
from django.core import checks
from django.db import connection, connections, router, transaction
from django.db.backends import utils
from django.db.models import signals, Q
from django.db.models.deletion import SET_NULL, SET_DEFAULT, CASCADE
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.lookups import IsNull
from django.db.models.related import RelatedObject, PathInfo
from django.db.models.query import QuerySet
from django.db.models.sql.datastructures import Col
from django.utils.encoding import force_text, smart_text
from django.utils import six
from django.utils.deprecation import RenameMethodsBase, RemovedInDjango18Warning
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry, cached_property
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
else:
# it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
try:
model = cls._meta.apps.get_registered_model(app_label, model_name)
except LookupError:
key = (app_label, model_name)
value = (cls, field, operation)
cls._meta.apps._pending_lookups.setdefault(key, []).append(value)
else:
operation(field, model, cls)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in sender._meta.apps._pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
class RelatedField(Field):
def check(self, **kwargs):
errors = super(RelatedField, self).check(**kwargs)
errors.extend(self._check_relation_model_exists())
errors.extend(self._check_referencing_to_swapped_model())
errors.extend(self._check_clashes())
return errors
def _check_relation_model_exists(self):
rel_is_missing = self.rel.to not in apps.get_models()
rel_is_string = isinstance(self.rel.to, six.string_types)
model_name = self.rel.to if rel_is_string else self.rel.to._meta.object_name
if rel_is_missing and (rel_is_string or not self.rel.to._meta.swapped):
return [
checks.Error(
("Field defines a relation with model '%s', which "
"is either not installed, or is abstract.") % model_name,
hint=None,
obj=self,
id='fields.E300',
)
]
return []
def _check_referencing_to_swapped_model(self):
if (self.rel.to not in apps.get_models() and
not isinstance(self.rel.to, six.string_types) and
self.rel.to._meta.swapped):
model = "%s.%s" % (
self.rel.to._meta.app_label,
self.rel.to._meta.object_name
)
return [
checks.Error(
("Field defines a relation with the model '%s', "
"which has been swapped out.") % model,
hint="Update the relation to point at 'settings.%s'." % self.rel.to._meta.swappable,
obj=self,
id='fields.E301',
)
]
return []
def _check_clashes(self):
""" Check accessor and reverse query name clashes. """
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# `f.rel.to` may be a string instead of a model. Skip if model name is
# not resolved.
if not isinstance(self.rel.to, ModelBase):
return []
# If the field doesn't install backward relation on the target model (so
# `is_hidden` returns True), then there are no clashes to check and we
# can skip these fields.
if self.rel.is_hidden():
return []
try:
self.related
except AttributeError:
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
rel_opts = self.rel.to._meta
# rel_opts.object_name == "Target"
rel_name = self.related.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
field_name = "%s.%s" % (opts.object_name,
self.name) # i. e. "Model.field"
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i. e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
clash_name = "%s.%s" % (rel_opts.object_name,
clash_field.name) # i. e. "Target.model_set"
if clash_field.name == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E302',
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E303',
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = rel_opts.get_all_related_many_to_many_objects()
potential_clashes += rel_opts.get_all_related_objects()
potential_clashes = (r for r in potential_clashes
if r.field is not self)
for clash_field in potential_clashes:
clash_name = "%s.%s" % ( # i. e. "Model.m2m"
clash_field.model._meta.object_name,
clash_field.field.name)
if clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E304',
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E305',
)
)
return errors
def db_type(self, connection):
'''By default related field will not have a column
as it relates columns to another table'''
return None
def contribute_to_class(self, cls, name, virtual_only=False):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name, virtual_only=virtual_only)
if not cls._meta.abstract and self.rel.related_name:
related_name = force_text(self.rel.related_name) % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.rel.related_name = related_name
other = self.rel.to
if isinstance(other, six.string_types) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
@property
def swappable_setting(self):
"""
Gets the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.rel.to, six.string_types):
to_string = self.rel.to
else:
to_string = "%s.%s" % (
self.rel.to._meta.app_label,
self.rel.to._meta.object_name,
)
# See if anything swapped/swappable matches
for model in apps.get_models(include_swapped=True):
if model._meta.swapped:
if model._meta.swapped == to_string:
return model._meta.swappable
if ("%s.%s" % (model._meta.app_label, model._meta.object_name)) == to_string and model._meta.swappable:
return model._meta.swappable
return None
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.set_field_name()
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def get_limit_choices_to(self):
"""Returns 'limit_choices_to' for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.rel.limit_choices_to):
return self.rel.limit_choices_to()
return self.rel.limit_choices_to
def formfield(self, **kwargs):
"""Passes ``limit_choices_to`` to field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.rel, 'get_related_field'):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.rel.limit_choices_to
defaults.update({
'limit_choices_to': limit_choices_to,
})
defaults.update(kwargs)
return super(RelatedField, self).formfield(**defaults)
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_query_name or self.rel.related_name or self.opts.model_name
class RenameRelatedObjectDescriptorMethods(RenameMethodsBase):
renamed_methods = (
('get_query_set', 'get_queryset', RemovedInDjango18Warning),
('get_prefetch_query_set', 'get_prefetch_queryset', RemovedInDjango18Warning),
)
class SingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ReverseSingleRelatedObjectDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
# Gotcha: we return a `Manager` instance (i.e. not a `QuerySet`)!
return self.related.model._base_manager.db_manager(hints=hints)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
# Despite its name `get_queryset()` returns an instance of
# `Manager`, therefore we call `all()` to normalize to `QuerySet`.
queryset = self.get_queryset().all()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
params = {}
for lh_field, rh_field in self.related.field.related_fields:
params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname)
try:
rel_obj = self.get_queryset(instance=instance).get(**params)
except self.related.model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' % (
instance._meta.object_name,
self.related.get_accessor_name(),
)
)
elif value is not None and not isinstance(value, self.related.model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.opts.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
if None in related_pk:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, instance._meta.object_name)
)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.to` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.rel.to.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
rel_mgr = self.field.rel.to._default_manager.db_manager(hints=hints)
# If the related manager indicates that it should be used for
# related fields, respect that.
if getattr(rel_mgr, 'use_for_related_fields', False):
# Gotcha: we return a `Manager` instance (i.e. not a `QuerySet`)!
return rel_mgr
else:
return QuerySet(self.field.rel.to, hints=hints)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
# Despite its name `get_queryset()` may return an instance of
# `Manager`, therefore we call `all()` to normalize to `QuerySet`.
queryset = self.get_queryset().all()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.rel.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.rel.multiple:
rel_obj_cache_name = self.field.related.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
params = dict(
(rh_field.attname, getattr(instance, lh_field.attname))
for lh_field, rh_field in self.field.related_fields)
qs = self.get_queryset(instance=instance)
extra_filter = self.field.get_extra_descriptor_filter(instance)
if isinstance(extra_filter, dict):
params.update(extra_filter)
qs = qs.filter(**params)
else:
qs = qs.filter(extra_filter, **params)
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
if not self.field.rel.multiple:
setattr(rel_obj, self.field.related.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name)
)
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.rel.to._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.related.get_cache_name(), None)
# Set the value of the related field
for lh_field, rh_field in self.field.related_fields:
try:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
except AttributeError:
setattr(instance, lh_field.attname, None)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.rel.multiple:
setattr(value, self.field.related.get_cache_name(), instance)
def create_foreign_related_manager(superclass, rel_field, rel_model):
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.core_filters = {'%s__exact' % rel_field.name: instance}
self.model = rel_model
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_foreign_related_manager(manager.__class__, rel_field, rel_model)
return manager_class(self.instance)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
qs = super(RelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
qs = qs.filter(**self.core_filters)
for field in rel_field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
query = {'%s__in' % rel_field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs):
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.commit_on_success_unless_managed(
using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" %
(self.model._meta.object_name, obj))
setattr(obj, rel_field.name, self.instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = rel_field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if rel_field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance))
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
queryset.update(**{rel_field.name: None})
else:
with transaction.commit_on_success_unless_managed(using=db, savepoint=False):
for obj in queryset:
setattr(obj, rel_field.name, None)
obj.save(update_fields=[rel_field.name])
_clear.alters_data = True
return RelatedManager
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's default
# manager.
return create_foreign_related_manager(
self.related.model._default_manager.__class__,
self.related.field,
self.related.model,
)
def create_many_related_manager(superclass, rel):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,
source_field_name=None, target_field_name=None, reverse=False,
through=None, prefetch_cache_name=None):
super(ManyRelatedManager, self).__init__()
self.model = model
self.query_field_name = query_field_name
source_field = through._meta.get_field(source_field_name)
source_related_fields = source_field.related_fields
self.core_filters = {}
for lh_field, rh_field in source_related_fields:
self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname)
self.instance = instance
self.symmetrical = symmetrical
self.source_field = source_field
self.target_field = through._meta.get_field(target_field_name)
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.reverse = reverse
self.through = through
self.prefetch_cache_name = prefetch_cache_name
self.related_val = source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_many_related_manager(manager.__class__, rel)
return manager_class(
model=self.model,
query_field_name=self.query_field_name,
instance=self.instance,
symmetrical=self.symmetrical,
source_field_name=self.source_field_name,
target_field_name=self.target_field_name,
reverse=self.reverse,
through=self.through,
prefetch_cache_name=self.prefetch_cache_name,
)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
qs = super(ManyRelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
return qs._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select=dict(
('_prefetch_related_val_%s' % f.attname,
'%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields))
return (queryset,
lambda result: tuple(getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields),
lambda inst: tuple(getattr(inst, f.attname) for f in fk.foreign_related_fields),
False,
self.prefetch_cache_name)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.related_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related
# model's default manager.
return create_many_related_manager(
self.related.model._default_manager.__class__,
self.related.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_model = self.related.model
manager = self.related_manager_cls(
model=rel_model,
query_field_name=self.related.field.name,
prefetch_cache_name=self.related.field.related_query_name(),
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True,
through=self.related.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's
# default manager.
return create_many_related_manager(
self.field.rel.to._default_manager.__class__,
self.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
manager = self.related_manager_cls(
model=self.field.rel.to,
query_field_name=self.field.related_query_name(),
prefetch_cache_name=self.field.name,
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False,
through=self.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
# clear() can change expected output of 'value' queryset, we force evaluation
# of queryset before clear; ticket #19816
value = tuple(value)
manager.clear()
manager.add(*value)
class ForeignObjectRel(object):
def __init__(self, field, to, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.field = field
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Sets the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookup_type,
raw_value):
return self.field.get_lookup_constraint(constraint_class, alias, targets, sources,
lookup_type, raw_value)
class ManyToOneRel(ForeignObjectRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(ManyToOneRel, self).__init__(
field, to, related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.field_name = field_name
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
def set_field_name(self):
self.field_name = self.field_name or self.to._meta.pk.name
class OneToOneRel(ManyToOneRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(OneToOneRel, self).__init__(field, to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None, through_fields=None,
db_constraint=True, related_query_name=None):
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
if through_fields and not through:
raise ValueError("Cannot specify through_fields without a through model")
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
self.through_fields = through_fields
self.db_constraint = db_constraint
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignObject(RelatedField):
requires_unique_target = True
generate_reverse_relation = True
related_accessor_class = ForeignRelatedObjectsDescriptor
def __init__(self, to, from_fields, to_fields, swappable=True, **kwargs):
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
if 'rel' not in kwargs:
kwargs['rel'] = ForeignObjectRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
super(ForeignObject, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ForeignObject, self).check(**kwargs)
errors.extend(self._check_unique_target())
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.rel.to, six.string_types)
if rel_is_string or not self.requires_unique_target:
return []
# Skip if the
try:
self.foreign_related_fields
except FieldDoesNotExist:
return []
try:
self.related
except AttributeError:
return []
has_unique_field = any(rel_field.unique
for rel_field in self.foreign_related_fields)
if not has_unique_field and len(self.foreign_related_fields) > 1:
field_combination = ', '.join("'%s'" % rel_field.name
for rel_field in self.foreign_related_fields)
model_name = self.rel.to.__name__
return [
checks.Error(
"None of the fields %s on model '%s' have a unique=True constraint." % (field_combination, model_name),
hint=None,
obj=self,
id='fields.E310',
)
]
elif not has_unique_field:
field_name = self.foreign_related_fields[0].name
model_name = self.rel.to.__name__
return [
checks.Error(
("'%s.%s' must set unique=True "
"because it is referenced by a foreign key.") % (model_name, field_name),
hint=None,
obj=self,
id='fields.E311',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ForeignObject, self).deconstruct()
kwargs['from_fields'] = self.from_fields
kwargs['to_fields'] = self.to_fields
if self.rel.related_name is not None:
kwargs['related_name'] = self.rel.related_name
if self.rel.related_query_name is not None:
kwargs['related_query_name'] = self.rel.related_query_name
if self.rel.on_delete != CASCADE:
kwargs['on_delete'] = self.rel.on_delete
if self.rel.parent_link:
kwargs['parent_link'] = self.rel.parent_link
# Work out string form of "to"
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError("Cannot deconstruct a ForeignKey pointing to a model that is swapped in place of more than one model (%s and %s)" % (kwargs['to'].setting_name, swappable_setting))
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.rel.to, six.string_types):
raise ValueError('Related model %r cannot be resolved' % self.rel.to)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field_by_name(from_field_name)[0])
to_field = (self.rel.to._meta.pk if to_field_name is None
else self.rel.to._meta.get_field_by_name(to_field_name)[0])
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (not possible_parent_link or
possible_parent_link.primary_key or
possible_parent_link.model._meta.abstract):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Returns an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Returns a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(qn, connection) method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.rel.to._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookups,
raw_value):
from django.db.models.sql.where import SubqueryConstraint, AND, OR
root_constraint = constraint_class()
assert len(targets) == len(sources)
if len(lookups) > 1:
raise exceptions.FieldError('Relation fields do not support nested lookups')
lookup_type = lookups[0]
def get_normalized_value(value):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
for source in sources:
# Account for one-to-one relations when sent a different model
while not isinstance(value, source.model) and source.rel:
source = source.rel.to._meta.get_field(source.rel.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
elif not isinstance(value, tuple):
return (value,)
return value
is_multicolumn = len(self.related_fields) > 1
if (hasattr(raw_value, '_as_sql') or
hasattr(raw_value, 'get_compiler')):
root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets],
[source.name for source in sources], raw_value),
AND)
elif lookup_type == 'isnull':
root_constraint.add(IsNull(Col(alias, targets[0], sources[0]), raw_value), AND)
elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte']
and not is_multicolumn)):
value = get_normalized_value(raw_value)
for target, source, val in zip(targets, sources, value):
lookup_class = target.get_lookup(lookup_type)
root_constraint.add(
lookup_class(Col(alias, target, source), val), AND)
elif lookup_type in ['range', 'in'] and not is_multicolumn:
values = [get_normalized_value(value) for value in raw_value]
value = [val[0] for val in values]
lookup_class = targets[0].get_lookup(lookup_type)
root_constraint.add(lookup_class(Col(alias, targets[0], sources[0]), value), AND)
elif lookup_type == 'in':
values = [get_normalized_value(value) for value in raw_value]
for value in values:
value_constraint = constraint_class()
for source, target, val in zip(sources, targets, value):
lookup_class = target.get_lookup('exact')
lookup = lookup_class(Col(alias, target, source), val)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
raise TypeError('Related Field got invalid lookup: %s' % lookup_type)
return root_constraint
@property
def attnames(self):
return tuple(field.attname for field in self.local_related_fields)
def get_defaults(self):
return tuple(field.get_default() for field in self.local_related_fields)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), self.related_accessor_class(related))
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
class ForeignKey(ForeignObject):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with pk %(pk)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel,
db_constraint=True, **kwargs):
try:
to._meta.model_name
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.db_constraint = db_constraint
kwargs['rel'] = rel_class(
self, to, to_field,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs)
def check(self, **kwargs):
errors = super(ForeignKey, self).check(**kwargs)
errors.extend(self._check_on_delete())
return errors
def _check_on_delete(self):
on_delete = getattr(self.rel, 'on_delete', None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=self,
id='fields.E320',
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=self,
id='fields.E321',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ForeignKey, self).deconstruct()
del kwargs['to_fields']
del kwargs['from_fields']
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.rel.to, "_meta", None)
if self.rel.field_name and (not to_meta or (to_meta.pk and self.rel.field_name != to_meta.pk.name)):
kwargs['to_field'] = self.rel.field_name
return name, path, args, kwargs
@property
def related_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'model': self.rel.to._meta.verbose_name, 'pk': value},
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.related_field.name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.related_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (value == '' and
(not self.related_field.empty_strings_allowed or
connection.features.interprets_empty_strings_as_nulls)):
return None
else:
return self.related_field.get_db_prep_save(value, connection=connection)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.rel.to, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.rel.to))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.related_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": []}
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
related_accessor_class = SingleRelatedObjectDescriptor
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(OneToOneField, self).deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, six.string_types):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.model_name
to = to.lower()
meta = type(str('Meta'), (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint),
to: models.ForeignKey(to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint)
})
class ManyToManyField(RelatedField):
description = _("Many-to-many relationship")
def __init__(self, to, db_constraint=True, swappable=True, **kwargs):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
# Class names must be ASCII in Python 2.x, so we forcibly coerce it here to break early if there's a problem.
to = str(to)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
through_fields=kwargs.pop('through_fields', None),
db_constraint=db_constraint,
)
self.swappable = swappable
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(ManyToManyField, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ManyToManyField, self).check(**kwargs)
errors.extend(self._check_unique(**kwargs))
errors.extend(self._check_relationship_model(**kwargs))
return errors
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=self,
id='fields.E330',
)
]
return []
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.rel.through, '_meta'):
qualified_model_name = "%s.%s" % (
self.rel.through._meta.app_label, self.rel.through.__name__)
else:
qualified_model_name = self.rel.through
errors = []
if self.rel.through not in apps.get_models(include_auto_created=True):
# The relationship model is not installed.
errors.append(
checks.Error(
("Field specifies a many-to-many relation through model "
"'%s', which has not been installed.") %
qualified_model_name,
hint=None,
obj=self,
id='fields.E331',
)
)
else:
assert from_model is not None, \
"ManyToManyField with intermediate " \
"tables cannot be checked if you don't pass the model " \
"where the field is attached to."
# Set some useful local variables
to_model = self.rel.to
from_model_name = from_model._meta.object_name
if isinstance(to_model, six.string_types):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.rel.through._meta.object_name
self_referential = from_model == to_model
# Check symmetrical attribute.
if (self_referential and self.rel.symmetrical and
not self.rel.through._meta.auto_created):
errors.append(
checks.Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=self,
id='fields.E332',
)
)
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(from_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
if seen_self > 2 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=("Use through_fields to specify which two "
"foreign keys Django should use."),
obj=self.rel.through,
id='fields.E333',
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(from_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
seen_to = sum(to_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
if seen_from > 1 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=('If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, '
'through="%s").') % relationship_model_name,
obj=self,
id='fields.E334',
)
)
if seen_to > 1 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, to_model_name),
hint=('If you want to create a recursive '
'relationship, use ForeignKey("self", '
'symmetrical=False, through="%s").') % relationship_model_name,
obj=self,
id='fields.E335',
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'.") % (
self, from_model_name, to_model_name
),
hint=None,
obj=self.rel.through,
id='fields.E336',
)
)
# Validate `through_fields`
if self.rel.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy"
if not (len(self.rel.through_fields) >= 2 and
self.rel.through_fields[0] and self.rel.through_fields[1]):
errors.append(
checks.Error(
("Field specifies 'through_fields' but does not "
"provide the names of the two link fields that should be "
"used for the relation through model "
"'%s'.") % qualified_model_name,
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=self,
id='fields.E337',
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models
else:
assert from_model is not None, \
"ManyToManyField with intermediate " \
"tables cannot be checked if you don't pass the model " \
"where the field is attached to."
source, through, target = from_model, self.rel.through, self.rel.to
source_field_name, target_field_name = self.rel.through_fields[:2]
for field_name, related_model in ((source_field_name, source),
(target_field_name, target)):
possible_field_names = []
for f in through._meta.fields:
if hasattr(f, 'rel') and getattr(f.rel, 'to', None) == related_model:
possible_field_names.append(f.name)
if possible_field_names:
hint = ("Did you mean one of the following foreign "
"keys to '%s': %s?") % (related_model._meta.object_name,
', '.join(possible_field_names))
else:
hint = None
try:
field = through._meta.get_field(field_name)
except FieldDoesNotExist:
errors.append(
checks.Error(
("The intermediary model '%s' has no field '%s'.") % (
qualified_model_name, field_name),
hint=hint,
obj=self,
id='fields.E338',
)
)
else:
if not (hasattr(field, 'rel') and
getattr(field.rel, 'to', None) == related_model):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'." % (
through._meta.object_name, field_name,
related_model._meta.object_name),
hint=hint,
obj=self,
id='fields.E339',
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super(ManyToManyField, self).deconstruct()
# Handle the simpler arguments
if self.db_table is not None:
kwargs['db_table'] = self.db_table
if self.rel.db_constraint is not True:
kwargs['db_constraint'] = self.rel.db_constraint
if self.rel.related_name is not None:
kwargs['related_name'] = self.rel.related_name
if self.rel.related_query_name is not None:
kwargs['related_query_name'] = self.rel.related_query_name
# Rel needs more work.
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
if getattr(self.rel, 'through', None) is not None:
if isinstance(self.rel.through, six.string_types):
kwargs['through'] = self.rel.through
elif not self.rel.through._meta.auto_created:
kwargs['through'] = "%s.%s" % (self.rel.through._meta.app_label, self.rel.through._meta.object_name)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError("Cannot deconstruct a ManyToManyField pointing to a model that is swapped in place of more than one model (%s and %s)" % (kwargs['to'].setting_name, swappable_setting))
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False):
"""
Called by both direct and indirect m2m traversal.
"""
pathinfos = []
int_model = self.rel.through
linkfield1 = int_model._meta.get_field_by_name(self.m2m_field_name())[0]
linkfield2 = int_model._meta.get_field_by_name(self.m2m_reverse_field_name())[0]
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return utils.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.rel.through_fields is not None:
link_field_name = self.rel.through_fields[0]
else:
link_field_name = None
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.model and \
(link_field_name is None or link_field_name == f.name):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.rel.through_fields is not None:
link_field_name = self.rel.through_fields[1]
else:
link_field_name = None
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.parent_model:
if link_field_name is None and related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, six.string_types):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db),
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
| artistic-2.0 | 4,376,366,457,037,427,000 | 44.357482 | 228 | 0.573576 | false |
1st/django | django/contrib/auth/tests/custom_user.py | 463 | 3662 | from django.contrib.auth.models import (
AbstractBaseUser, AbstractUser, BaseUserManager, Group, Permission,
PermissionsMixin, UserManager,
)
from django.db import models
# The custom User uses email as the unique identifier, and requires
# that every user provide a date of birth. This lets us test
# changes in username datatype, and non-text required fields.
class CustomUserManager(BaseUserManager):
def create_user(self, email, date_of_birth, password=None):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_admin = True
u.save(using=self._db)
return u
class CustomUser(AbstractBaseUser):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
date_of_birth = models.DateField()
custom_objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
class Meta:
app_label = 'auth'
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __unicode__(self):
return self.email
# Maybe required?
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return set()
def has_perm(self, perm, obj=None):
return True
def has_perms(self, perm_list, obj=None):
return True
def has_module_perms(self, app_label):
return True
# Admin required fields
@property
def is_staff(self):
return self.is_admin
class RemoveGroupsAndPermissions(object):
"""
A context manager to temporarily remove the groups and user_permissions M2M
fields from the AbstractUser class, so they don't clash with the
related_name sets.
"""
def __enter__(self):
self._old_au_local_m2m = AbstractUser._meta.local_many_to_many
self._old_pm_local_m2m = PermissionsMixin._meta.local_many_to_many
groups = models.ManyToManyField(Group, blank=True)
groups.contribute_to_class(PermissionsMixin, "groups")
user_permissions = models.ManyToManyField(Permission, blank=True)
user_permissions.contribute_to_class(PermissionsMixin, "user_permissions")
PermissionsMixin._meta.local_many_to_many = [groups, user_permissions]
AbstractUser._meta.local_many_to_many = [groups, user_permissions]
def __exit__(self, exc_type, exc_value, traceback):
AbstractUser._meta.local_many_to_many = self._old_au_local_m2m
PermissionsMixin._meta.local_many_to_many = self._old_pm_local_m2m
# The extension user is a simple extension of the built-in user class,
# adding a required date_of_birth field. This allows us to check for
# any hard references to the name "User" in forms/handlers etc.
with RemoveGroupsAndPermissions():
class ExtensionUser(AbstractUser):
date_of_birth = models.DateField()
custom_objects = UserManager()
REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ['date_of_birth']
class Meta:
app_label = 'auth'
| bsd-3-clause | -5,431,487,198,908,008,000 | 31.40708 | 88 | 0.667941 | false |
gohin/django | tests/admin_views/models.py | 87 | 24003 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import os
import tempfile
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Section(models.Model):
"""
A simple section that links to articles, to test linking to related items
in admin views.
"""
name = models.CharField(max_length=100)
@property
def name_property(self):
"""
A property that simply returns the name. Used to test #24461
"""
return self.name
@python_2_unicode_compatible
class Article(models.Model):
"""
A simple article to test admin views. Test backwards compatibility.
"""
title = models.CharField(max_length=100)
content = models.TextField()
date = models.DateTimeField()
section = models.ForeignKey(Section, models.CASCADE, null=True, blank=True)
sub_section = models.ForeignKey(Section, models.SET_NULL, null=True, blank=True, related_name='+')
def __str__(self):
return self.title
def model_year(self):
return self.date.year
model_year.admin_order_field = 'date'
model_year.short_description = ''
def model_year_reversed(self):
return self.date.year
model_year_reversed.admin_order_field = '-date'
model_year_reversed.short_description = ''
@python_2_unicode_compatible
class Book(models.Model):
"""
A simple book that has chapters.
"""
name = models.CharField(max_length=100, verbose_name='¿Name?')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Promo(models.Model):
name = models.CharField(max_length=100, verbose_name='¿Name?')
book = models.ForeignKey(Book, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Chapter(models.Model):
title = models.CharField(max_length=100, verbose_name='¿Title?')
content = models.TextField()
book = models.ForeignKey(Book, models.CASCADE)
def __str__(self):
return self.title
class Meta:
# Use a utf-8 bytestring to ensure it works (see #11710)
verbose_name = '¿Chapter?'
@python_2_unicode_compatible
class ChapterXtra1(models.Model):
chap = models.OneToOneField(Chapter, models.CASCADE, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra1: %s' % self.xtra
@python_2_unicode_compatible
class ChapterXtra2(models.Model):
chap = models.OneToOneField(Chapter, models.CASCADE, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra2: %s' % self.xtra
class RowLevelChangePermissionModel(models.Model):
name = models.CharField(max_length=100, blank=True)
class CustomArticle(models.Model):
content = models.TextField()
date = models.DateTimeField()
@python_2_unicode_compatible
class ModelWithStringPrimaryKey(models.Model):
string_pk = models.CharField(max_length=255, primary_key=True)
def __str__(self):
return self.string_pk
def get_absolute_url(self):
return '/dummy/%s/' % self.string_pk
@python_2_unicode_compatible
class Color(models.Model):
value = models.CharField(max_length=10)
warm = models.BooleanField(default=False)
def __str__(self):
return self.value
# we replicate Color to register with another ModelAdmin
class Color2(Color):
class Meta:
proxy = True
@python_2_unicode_compatible
class Thing(models.Model):
title = models.CharField(max_length=20)
color = models.ForeignKey(Color, models.CASCADE, limit_choices_to={'warm': True})
pub_date = models.DateField(blank=True, null=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Actor(models.Model):
name = models.CharField(max_length=50)
age = models.IntegerField()
title = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Inquisition(models.Model):
expected = models.BooleanField(default=False)
leader = models.ForeignKey(Actor, models.CASCADE)
country = models.CharField(max_length=20)
def __str__(self):
return "by %s from %s" % (self.leader, self.country)
@python_2_unicode_compatible
class Sketch(models.Model):
title = models.CharField(max_length=100)
inquisition = models.ForeignKey(
Inquisition,
models.CASCADE,
limit_choices_to={
'leader__name': 'Palin',
'leader__age': 27,
'expected': False,
},
)
defendant0 = models.ForeignKey(
Actor,
models.CASCADE,
limit_choices_to={'title__isnull': False},
related_name='as_defendant0',
)
defendant1 = models.ForeignKey(
Actor,
models.CASCADE,
limit_choices_to={'title__isnull': True},
related_name='as_defendant1',
)
def __str__(self):
return self.title
def today_callable_dict():
return {"last_action__gte": datetime.datetime.today()}
def today_callable_q():
return models.Q(last_action__gte=datetime.datetime.today())
@python_2_unicode_compatible
class Character(models.Model):
username = models.CharField(max_length=100)
last_action = models.DateTimeField()
def __str__(self):
return self.username
@python_2_unicode_compatible
class StumpJoke(models.Model):
variation = models.CharField(max_length=100)
most_recently_fooled = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=today_callable_dict,
related_name="+",
)
has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+")
def __str__(self):
return self.variation
class Fabric(models.Model):
NG_CHOICES = (
('Textured', (
('x', 'Horizontal'),
('y', 'Vertical'),
)),
('plain', 'Smooth'),
)
surface = models.CharField(max_length=20, choices=NG_CHOICES)
@python_2_unicode_compatible
class Person(models.Model):
GENDER_CHOICES = (
(1, "Male"),
(2, "Female"),
)
name = models.CharField(max_length=100)
gender = models.IntegerField(choices=GENDER_CHOICES)
age = models.IntegerField(default=21)
alive = models.BooleanField(default=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Persona(models.Model):
"""
A simple persona associated with accounts, to test inlining of related
accounts which inherit from a common accounts class.
"""
name = models.CharField(blank=False, max_length=80)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Account(models.Model):
"""
A simple, generic account encapsulating the information shared by all
types of accounts.
"""
username = models.CharField(blank=False, max_length=80)
persona = models.ForeignKey(Persona, models.CASCADE, related_name="accounts")
servicename = 'generic service'
def __str__(self):
return "%s: %s" % (self.servicename, self.username)
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = 'foo'
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = 'bar'
@python_2_unicode_compatible
class Subscriber(models.Model):
name = models.CharField(blank=False, max_length=80)
email = models.EmailField(blank=False, max_length=175)
def __str__(self):
return "%s (%s)" % (self.name, self.email)
class ExternalSubscriber(Subscriber):
pass
class OldSubscriber(Subscriber):
pass
class Media(models.Model):
name = models.CharField(max_length=60)
class Podcast(Media):
release_date = models.DateField()
class Meta:
ordering = ('release_date',) # overridden in PodcastAdmin
class Vodcast(Media):
media = models.OneToOneField(Media, models.CASCADE, primary_key=True, parent_link=True)
released = models.BooleanField(default=False)
class Parent(models.Model):
name = models.CharField(max_length=128)
def clean(self):
if self.name == '_invalid':
raise ValidationError('invalid')
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE, editable=False)
name = models.CharField(max_length=30, blank=True)
def clean(self):
if self.name == '_invalid':
raise ValidationError('invalid')
@python_2_unicode_compatible
class EmptyModel(models.Model):
def __str__(self):
return "Primary key = %s" % self.id
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class Gallery(models.Model):
name = models.CharField(max_length=100)
class Picture(models.Model):
name = models.CharField(max_length=100)
image = models.FileField(storage=temp_storage, upload_to='test_upload')
gallery = models.ForeignKey(Gallery, models.CASCADE, related_name="pictures")
class Language(models.Model):
iso = models.CharField(max_length=5, primary_key=True)
name = models.CharField(max_length=50)
english_name = models.CharField(max_length=50)
shortlist = models.BooleanField(default=False)
class Meta:
ordering = ('iso',)
# a base class for Recommender and Recommendation
class Title(models.Model):
pass
class TitleTranslation(models.Model):
title = models.ForeignKey(Title, models.CASCADE)
text = models.CharField(max_length=100)
class Recommender(Title):
pass
class Recommendation(Title):
recommender = models.ForeignKey(Recommender, models.CASCADE)
class Collector(models.Model):
name = models.CharField(max_length=100)
class Widget(models.Model):
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class DooHickey(models.Model):
code = models.CharField(max_length=10, primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Grommet(models.Model):
code = models.AutoField(primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Whatsit(models.Model):
index = models.IntegerField(primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Doodad(models.Model):
name = models.CharField(max_length=100)
class FancyDoodad(Doodad):
owner = models.ForeignKey(Collector, models.CASCADE)
expensive = models.BooleanField(default=True)
@python_2_unicode_compatible
class Category(models.Model):
collector = models.ForeignKey(Collector, models.CASCADE)
order = models.PositiveIntegerField()
class Meta:
ordering = ('order',)
def __str__(self):
return '%s:o%s' % (self.id, self.order)
def link_posted_default():
return datetime.date.today() - datetime.timedelta(days=7)
class Link(models.Model):
posted = models.DateField(default=link_posted_default)
url = models.URLField()
post = models.ForeignKey("Post", models.CASCADE)
class PrePopulatedPost(models.Model):
title = models.CharField(max_length=100)
published = models.BooleanField(default=False)
slug = models.SlugField()
class PrePopulatedSubPost(models.Model):
post = models.ForeignKey(PrePopulatedPost, models.CASCADE)
subtitle = models.CharField(max_length=100)
subslug = models.SlugField()
class Post(models.Model):
title = models.CharField(max_length=100, help_text="Some help text for the title (with unicode ŠĐĆŽćžšđ)")
content = models.TextField(help_text="Some help text for the content (with unicode ŠĐĆŽćžšđ)")
posted = models.DateField(
default=datetime.date.today,
help_text="Some help text for the date (with unicode ŠĐĆŽćžšđ)"
)
public = models.NullBooleanField()
def awesomeness_level(self):
return "Very awesome."
# Proxy model to test overridden fields attrs on Post model so as not to
# interfere with other tests.
class FieldOverridePost(Post):
class Meta:
proxy = True
@python_2_unicode_compatible
class Gadget(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Villain(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class SuperVillain(Villain):
pass
@python_2_unicode_compatible
class FunkyTag(models.Model):
"Because we all know there's only one real use case for GFKs."
name = models.CharField(max_length=25)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Plot(models.Model):
name = models.CharField(max_length=100)
team_leader = models.ForeignKey(Villain, models.CASCADE, related_name='lead_plots')
contact = models.ForeignKey(Villain, models.CASCADE, related_name='contact_plots')
tags = GenericRelation(FunkyTag)
def __str__(self):
return self.name
@python_2_unicode_compatible
class PlotDetails(models.Model):
details = models.CharField(max_length=100)
plot = models.OneToOneField(Plot, models.CASCADE)
def __str__(self):
return self.details
@python_2_unicode_compatible
class SecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
villain = models.ForeignKey(Villain, models.CASCADE)
def __str__(self):
return self.location
@python_2_unicode_compatible
class SuperSecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
supervillain = models.ForeignKey(SuperVillain, models.CASCADE)
def __str__(self):
return self.location
@python_2_unicode_compatible
class CyclicOne(models.Model):
name = models.CharField(max_length=25)
two = models.ForeignKey('CyclicTwo', models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class CyclicTwo(models.Model):
name = models.CharField(max_length=25)
one = models.ForeignKey(CyclicOne, models.CASCADE)
def __str__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=20)
class Pizza(models.Model):
name = models.CharField(max_length=20)
toppings = models.ManyToManyField('Topping', related_name='pizzas')
class Album(models.Model):
owner = models.ForeignKey(User, models.SET_NULL, null=True, blank=True)
title = models.CharField(max_length=30)
class Employee(Person):
code = models.CharField(max_length=20)
class WorkHour(models.Model):
datum = models.DateField()
employee = models.ForeignKey(Employee, models.CASCADE)
class Question(models.Model):
question = models.CharField(max_length=20)
@python_2_unicode_compatible
class Answer(models.Model):
question = models.ForeignKey(Question, models.PROTECT)
answer = models.CharField(max_length=20)
def __str__(self):
return self.answer
class Reservation(models.Model):
start_date = models.DateTimeField()
price = models.IntegerField()
DRIVER_CHOICES = (
('bill', 'Bill G'),
('steve', 'Steve J'),
)
RESTAURANT_CHOICES = (
('indian', 'A Taste of India'),
('thai', 'Thai Pography'),
('pizza', 'Pizza Mama'),
)
class FoodDelivery(models.Model):
reference = models.CharField(max_length=100)
driver = models.CharField(max_length=100, choices=DRIVER_CHOICES, blank=True)
restaurant = models.CharField(max_length=100, choices=RESTAURANT_CHOICES, blank=True)
class Meta:
unique_together = (("driver", "restaurant"),)
@python_2_unicode_compatible
class CoverLetter(models.Model):
author = models.CharField(max_length=30)
date_written = models.DateField(null=True, blank=True)
def __str__(self):
return self.author
class Paper(models.Model):
title = models.CharField(max_length=30)
author = models.CharField(max_length=30, blank=True, null=True)
class ShortMessage(models.Model):
content = models.CharField(max_length=140)
timestamp = models.DateTimeField(null=True, blank=True)
@python_2_unicode_compatible
class Telegram(models.Model):
title = models.CharField(max_length=30)
date_sent = models.DateField(null=True, blank=True)
def __str__(self):
return self.title
class Story(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class OtherStory(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class ComplexSortedPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
is_employee = models.NullBooleanField()
class PluggableSearchPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
class PrePopulatedPostLargeSlug(models.Model):
"""
Regression test for #15938: a large max_length for the slugfield must not
be localized in prepopulated_fields_js.html or it might end up breaking
the javascript (ie, using THOUSAND_SEPARATOR ends up with maxLength=1,000)
"""
title = models.CharField(max_length=100)
published = models.BooleanField(default=False)
# `db_index=False` because MySQL cannot index large CharField (#21196).
slug = models.SlugField(max_length=1000, db_index=False)
class AdminOrderedField(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedModelMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
def some_order(self):
return self.order
some_order.admin_order_field = 'order'
class AdminOrderedAdminMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedCallable(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
@python_2_unicode_compatible
class Report(models.Model):
title = models.CharField(max_length=100)
def __str__(self):
return self.title
class MainPrepopulated(models.Model):
name = models.CharField(max_length=100)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(blank=True)
slug2 = models.SlugField(blank=True)
slug3 = models.SlugField(blank=True, allow_unicode=True)
class RelatedPrepopulated(models.Model):
parent = models.ForeignKey(MainPrepopulated, models.CASCADE)
name = models.CharField(max_length=75)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(max_length=50)
slug2 = models.SlugField(max_length=60)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #16819.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
class UndeletableObject(models.Model):
"""
Model whose show_delete in admin change_view has been disabled
Refs #10057.
"""
name = models.CharField(max_length=255)
class UnchangeableObject(models.Model):
"""
Model whose change_view is disabled in admin
Refs #20640.
"""
class UserMessenger(models.Model):
"""
Dummy class for testing message_user functions on ModelAdmin
"""
class Simple(models.Model):
"""
Simple model with nothing on it for use in testing
"""
class Choice(models.Model):
choice = models.IntegerField(blank=True, null=True,
choices=((1, 'Yes'), (0, 'No'), (None, 'No opinion')))
class ParentWithDependentChildren(models.Model):
"""
Issue #20522
Model where the validation of child foreign-key relationships depends
on validation of the parent
"""
some_required_info = models.PositiveIntegerField()
family_name = models.CharField(max_length=255, blank=False)
class DependentChild(models.Model):
"""
Issue #20522
Model that depends on validation of the parent class for one of its
fields to validate during clean
"""
parent = models.ForeignKey(ParentWithDependentChildren, models.CASCADE)
family_name = models.CharField(max_length=255)
class _Manager(models.Manager):
def get_queryset(self):
return super(_Manager, self).get_queryset().filter(pk__gt=1)
class FilteredManager(models.Model):
def __str__(self):
return "PK=%d" % self.pk
pk_gt_1 = _Manager()
objects = models.Manager()
class EmptyModelVisible(models.Model):
""" See ticket #11277. """
class EmptyModelHidden(models.Model):
""" See ticket #11277. """
class EmptyModelMixin(models.Model):
""" See ticket #11277. """
class State(models.Model):
name = models.CharField(max_length=100)
class City(models.Model):
state = models.ForeignKey(State, models.CASCADE)
name = models.CharField(max_length=100)
def get_absolute_url(self):
return '/dummy/%s/' % self.pk
class Restaurant(models.Model):
city = models.ForeignKey(City, models.CASCADE)
name = models.CharField(max_length=100)
def get_absolute_url(self):
return '/dummy/%s/' % self.pk
class Worker(models.Model):
work_at = models.ForeignKey(Restaurant, models.CASCADE)
name = models.CharField(max_length=50)
surname = models.CharField(max_length=50)
# Models for #23329
class ReferencedByParent(models.Model):
name = models.CharField(max_length=20, unique=True)
class ParentWithFK(models.Model):
fk = models.ForeignKey(
ReferencedByParent,
models.CASCADE,
to_field='name',
related_name='hidden+',
)
class ChildOfReferer(ParentWithFK):
pass
# Models for #23431
class ReferencedByInline(models.Model):
name = models.CharField(max_length=20, unique=True)
class InlineReference(models.Model):
fk = models.ForeignKey(
ReferencedByInline,
models.CASCADE,
to_field='name',
related_name='hidden+',
)
class InlineReferer(models.Model):
refs = models.ManyToManyField(InlineReference)
# Models for #23604 and #23915
class Recipe(models.Model):
rname = models.CharField(max_length=20, unique=True)
class Ingredient(models.Model):
iname = models.CharField(max_length=20, unique=True)
recipes = models.ManyToManyField(Recipe, through='RecipeIngredient')
class RecipeIngredient(models.Model):
ingredient = models.ForeignKey(Ingredient, models.CASCADE, to_field='iname')
recipe = models.ForeignKey(Recipe, models.CASCADE, to_field='rname')
# Model for #23839
class NotReferenced(models.Model):
# Don't point any FK at this model.
pass
# Models for #23934
class ExplicitlyProvidedPK(models.Model):
name = models.IntegerField(primary_key=True)
class ImplicitlyGeneratedPK(models.Model):
name = models.IntegerField(unique=True)
| bsd-3-clause | -8,335,779,341,034,002,000 | 24.745435 | 110 | 0.686053 | false |
mikeyarce/subscriptions-checkout-for-woocommerce | node_modules/node-gyp/gyp/tools/pretty_sln.py | 1831 | 5099 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 | 3,316,468,121,732,812,000 | 29.171598 | 80 | 0.569523 | false |
yoki/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/handlers/queuecharts.py | 122 | 6724 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import calendar
from datetime import datetime
import itertools
from time import time
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from config import logging, charts
from model.patchlog import PatchLog
from model.queues import Queue
from model.queuelog import QueueLog
class QueueCharts(webapp.RequestHandler):
def get(self, queue_name):
queue_name = queue_name.lower()
if not Queue.queue_with_name(queue_name):
self.error(404)
return
timestamp = self._get_timestamp()
view_range = self._get_view_range()
time_unit, time_unit_name = charts.get_time_unit(view_range)
all_queue_names = map(Queue.name, Queue.all())
template_values = {
"all_queue_names": all_queue_names,
"patch_data": self._get_patch_data(queue_name, timestamp, view_range),
"queue_data": self._get_queue_data(queue_name, timestamp, view_range),
"queue_name": queue_name,
"seconds_ago_min": 0,
"seconds_ago_max": view_range,
"time_unit_name": time_unit_name,
"time_unit": time_unit,
"timestamp": timestamp,
"view_range": view_range,
"view_range_choices": charts.view_range_choices,
}
self.response.out.write(template.render("templates/queuecharts.html", template_values))
@classmethod
def _get_min_med_max(cls, values, defaults=(0, 0, 0)):
if not values:
return defaults
length = len(values)
sorted_values = sorted(values)
return sorted_values[0], sorted_values[length / 2], sorted_values[length - 1]
def _get_patch_data(self, queue_name, timestamp, view_range):
patch_logs = self._get_patch_logs(queue_name, timestamp, view_range)
patch_data = []
for patch_log in patch_logs:
if patch_log.process_duration and patch_log.wait_duration:
patch_log_timestamp = calendar.timegm(patch_log.date.utctimetuple())
patch_data.append({
"attachment_id": patch_log.attachment_id,
"seconds_ago": timestamp - patch_log_timestamp,
"process_duration": patch_log.process_duration / charts.one_minute,
"retry_count": patch_log.retry_count,
"status_update_count": patch_log.status_update_count,
"wait_duration": patch_log.wait_duration / charts.one_minute,
})
return patch_data
def _get_patch_logs(self, queue_name, timestamp, view_range):
patch_log_query = PatchLog.all()
patch_log_query = patch_log_query.filter("queue_name =", queue_name)
patch_log_query = patch_log_query.filter("date >=", datetime.utcfromtimestamp(timestamp - view_range))
patch_log_query = patch_log_query.filter("date <=", datetime.utcfromtimestamp(timestamp))
patch_log_query = patch_log_query.order("date")
return patch_log_query.run(limit=charts.patch_log_limit)
def _get_queue_data(self, queue_name, timestamp, view_range):
queue_logs = self._get_queue_logs(queue_name, timestamp, view_range)
queue_data = []
for queue_log in queue_logs:
queue_log_timestamp = calendar.timegm(queue_log.date.utctimetuple())
p_min, p_med, p_max = self._get_min_med_max(queue_log.patch_process_durations)
w_min, w_med, w_max = self._get_min_med_max(queue_log.patch_wait_durations)
queue_data.append({
"bots_seen": len(queue_log.bot_ids_seen),
"seconds_ago": timestamp - queue_log_timestamp,
"patch_processing_min": p_min,
"patch_processing_med": p_med,
"patch_processing_max": p_max,
"patch_retry_count": queue_log.patch_retry_count,
"patch_waiting_min": w_min,
"patch_waiting_med": w_med,
"patch_waiting_max": w_max,
"patches_completed": len(queue_log.patch_process_durations),
"patches_waiting": queue_log.max_patches_waiting,
"status_update_count": queue_log.status_update_count,
})
return queue_data
def _get_queue_logs(self, queue_name, timestamp, view_range):
queue_logs = []
current_timestamp = timestamp - view_range
while current_timestamp <= timestamp:
queue_logs.append(QueueLog.get_at(queue_name, logging.queue_log_duration, current_timestamp))
current_timestamp += logging.queue_log_duration
return queue_logs
@classmethod
def _get_time_unit(cls, view_range):
if view_range > charts.one_day * 2:
return
def _get_timestamp(self):
timestamp = self.request.get("timestamp")
try:
return int(timestamp)
except ValueError:
return int(time())
def _get_view_range(self):
view_range = self.request.get("view_range")
try:
return int(view_range)
except ValueError:
return charts.default_view_range
| bsd-3-clause | -488,672,387,158,151,100 | 43.529801 | 110 | 0.644259 | false |
Cinntax/home-assistant | homeassistant/components/magicseaweed/sensor.py | 1 | 6433 | """Support for magicseaweed data from magicseaweed.com."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_KEY,
CONF_NAME,
CONF_MONITORED_CONDITIONS,
ATTR_ATTRIBUTION,
)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_HOURS = "hours"
CONF_SPOT_ID = "spot_id"
CONF_UNITS = "units"
DEFAULT_UNIT = "us"
DEFAULT_NAME = "MSW"
DEFAULT_ATTRIBUTION = "Data provided by magicseaweed.com"
ICON = "mdi:waves"
HOURS = ["12AM", "3AM", "6AM", "9AM", "12PM", "3PM", "6PM", "9PM"]
SENSOR_TYPES = {
"max_breaking_swell": ["Max"],
"min_breaking_swell": ["Min"],
"swell_forecast": ["Forecast"],
}
UNITS = ["eu", "uk", "us"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_SPOT_ID): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOURS, default=None): vol.All(
cv.ensure_list, [vol.In(HOURS)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNITS): vol.In(UNITS),
}
)
# Return cached results if last scan was less then this time ago.
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=30)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Magicseaweed sensor."""
name = config.get(CONF_NAME)
spot_id = config[CONF_SPOT_ID]
api_key = config[CONF_API_KEY]
hours = config.get(CONF_HOURS)
if CONF_UNITS in config:
units = config.get(CONF_UNITS)
elif hass.config.units.is_metric:
units = UNITS[0]
else:
units = UNITS[2]
forecast_data = MagicSeaweedData(api_key=api_key, spot_id=spot_id, units=units)
forecast_data.update()
# If connection failed don't setup platform.
if forecast_data.currently is None or forecast_data.hourly is None:
return
sensors = []
for variable in config[CONF_MONITORED_CONDITIONS]:
sensors.append(MagicSeaweedSensor(forecast_data, variable, name, units))
if "forecast" not in variable and hours is not None:
for hour in hours:
sensors.append(
MagicSeaweedSensor(forecast_data, variable, name, units, hour)
)
add_entities(sensors, True)
class MagicSeaweedSensor(Entity):
"""Implementation of a MagicSeaweed sensor."""
def __init__(self, forecast_data, sensor_type, name, unit_system, hour=None):
"""Initialize the sensor."""
self.client_name = name
self.data = forecast_data
self.hour = hour
self.type = sensor_type
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._name = SENSOR_TYPES[sensor_type][0]
self._icon = None
self._state = None
self._unit_system = unit_system
self._unit_of_measurement = None
@property
def name(self):
"""Return the name of the sensor."""
if self.hour is None and "forecast" in self.type:
return f"{self.client_name} {self._name}"
if self.hour is None:
return f"Current {self.client_name} {self._name}"
return f"{self.hour} {self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_system(self):
"""Return the unit system of this entity."""
return self._unit_system
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the entity weather icon, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attrs
def update(self):
"""Get the latest data from Magicseaweed and updates the states."""
self.data.update()
if self.hour is None:
forecast = self.data.currently
else:
forecast = self.data.hourly[self.hour]
self._unit_of_measurement = forecast.swell_unit
if self.type == "min_breaking_swell":
self._state = forecast.swell_minBreakingHeight
elif self.type == "max_breaking_swell":
self._state = forecast.swell_maxBreakingHeight
elif self.type == "swell_forecast":
summary = "{} - {}".format(
forecast.swell_minBreakingHeight, forecast.swell_maxBreakingHeight
)
self._state = summary
if self.hour is None:
for hour, data in self.data.hourly.items():
occurs = hour
hr_summary = "{} - {} {}".format(
data.swell_minBreakingHeight,
data.swell_maxBreakingHeight,
data.swell_unit,
)
self._attrs[occurs] = hr_summary
if self.type != "swell_forecast":
self._attrs.update(forecast.attrs)
class MagicSeaweedData:
"""Get the latest data from MagicSeaweed."""
def __init__(self, api_key, spot_id, units):
"""Initialize the data object."""
import magicseaweed
self._msw = magicseaweed.MSW_Forecast(api_key, spot_id, None, units)
self.currently = None
self.hourly = {}
# Apply throttling to methods using configured interval
self.update = Throttle(MIN_TIME_BETWEEN_UPDATES)(self._update)
def _update(self):
"""Get the latest data from MagicSeaweed."""
try:
forecasts = self._msw.get_future()
self.currently = forecasts.data[0]
for forecast in forecasts.data[:8]:
hour = dt_util.utc_from_timestamp(forecast.localTimestamp).strftime(
"%-I%p"
)
self.hourly[hour] = forecast
except ConnectionError:
_LOGGER.error("Unable to retrieve data from Magicseaweed")
| apache-2.0 | 4,374,490,604,535,614,000 | 31.489899 | 84 | 0.606404 | false |
SingTel-DataCo/incubator-superset | superset/views/base.py | 2 | 10977 | import functools
import json
import logging
import traceback
from flask import g, redirect, Response, flash, abort
from flask_babel import gettext as __
from flask_appbuilder import BaseView
from flask_appbuilder import ModelView
from flask_appbuilder.widgets import ListWidget
from flask_appbuilder.actions import action
from flask_appbuilder.models.sqla.filters import BaseFilter
from superset import appbuilder, conf, db, utils, sm, sql_parse
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.sqla.models import SqlaTable
def get_error_msg():
if conf.get("SHOW_STACKTRACE"):
error_msg = traceback.format_exc()
else:
error_msg = "FATAL ERROR \n"
error_msg += (
"Stacktrace is hidden. Change the SHOW_STACKTRACE "
"configuration setting to enable it")
return error_msg
def json_error_response(msg, status=None, stacktrace=None):
data = {'error': str(msg)}
if stacktrace:
data['stacktrace'] = stacktrace
status = status if status else 500
return Response(
json.dumps(data),
status=status, mimetype="application/json")
def api(f):
"""
A decorator to label an endpoint as an API. Catches uncaught exceptions and
return the response in the JSON format
"""
def wraps(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception as e:
logging.exception(e)
return json_error_response(get_error_msg())
return functools.update_wrapper(wraps, f)
def get_datasource_exist_error_mgs(full_name):
return __("Datasource %(name)s already exists", name=full_name)
def get_user_roles():
if g.user.is_anonymous():
public_role = conf.get('AUTH_ROLE_PUBLIC')
return [appbuilder.sm.find_role(public_role)] if public_role else []
return g.user.roles
class BaseSupersetView(BaseView):
def can_access(self, permission_name, view_name, user=None):
if not user:
user = g.user
return utils.can_access(
appbuilder.sm, permission_name, view_name, user)
def all_datasource_access(self, user=None):
return self.can_access(
"all_datasource_access", "all_datasource_access", user=user)
def database_access(self, database, user=None):
return (
self.can_access(
"all_database_access", "all_database_access", user=user) or
self.can_access("database_access", database.perm, user=user)
)
def schema_access(self, datasource, user=None):
return (
self.database_access(datasource.database, user=user) or
self.all_datasource_access(user=user) or
self.can_access("schema_access", datasource.schema_perm, user=user)
)
def datasource_access(self, datasource, user=None):
return (
self.schema_access(datasource, user=user) or
self.can_access("datasource_access", datasource.perm, user=user)
)
def datasource_access_by_name(
self, database, datasource_name, schema=None):
if self.database_access(database) or self.all_datasource_access():
return True
schema_perm = utils.get_schema_perm(database, schema)
if schema and self.can_access('schema_access', schema_perm):
return True
datasources = ConnectorRegistry.query_datasources_by_name(
db.session, database, datasource_name, schema=schema)
for datasource in datasources:
if self.can_access("datasource_access", datasource.perm):
return True
return False
def datasource_access_by_fullname(
self, database, full_table_name, schema):
table_name_pieces = full_table_name.split(".")
if len(table_name_pieces) == 2:
table_schema = table_name_pieces[0]
table_name = table_name_pieces[1]
else:
table_schema = schema
table_name = table_name_pieces[0]
return self.datasource_access_by_name(
database, table_name, schema=table_schema)
def rejected_datasources(self, sql, database, schema):
superset_query = sql_parse.SupersetQuery(sql)
return [
t for t in superset_query.tables if not
self.datasource_access_by_fullname(database, t, schema)]
def user_datasource_perms(self):
datasource_perms = set()
for r in g.user.roles:
for perm in r.permissions:
if (
perm.permission and
'datasource_access' == perm.permission.name):
datasource_perms.add(perm.view_menu.name)
return datasource_perms
def schemas_accessible_by_user(self, database, schemas):
if self.database_access(database) or self.all_datasource_access():
return schemas
subset = set()
for schema in schemas:
schema_perm = utils.get_schema_perm(database, schema)
if self.can_access('schema_access', schema_perm):
subset.add(schema)
perms = self.user_datasource_perms()
if perms:
tables = (
db.session.query(SqlaTable)
.filter(
SqlaTable.perm.in_(perms),
SqlaTable.database_id == database.id,
)
.all()
)
for t in tables:
if t.schema:
subset.add(t.schema)
return sorted(list(subset))
def accessible_by_user(self, database, datasource_names, schema=None):
if self.database_access(database) or self.all_datasource_access():
return datasource_names
if schema:
schema_perm = utils.get_schema_perm(database, schema)
if self.can_access('schema_access', schema_perm):
return datasource_names
user_perms = self.user_datasource_perms()
user_datasources = ConnectorRegistry.query_datasources_by_permissions(
db.session, database, user_perms)
if schema:
names = {
d.table_name
for d in user_datasources if d.schema == schema}
return [d for d in datasource_names if d in names]
else:
full_names = {d.full_name for d in user_datasources}
return [d for d in datasource_names if d in full_names]
class SupersetModelView(ModelView):
page_size = 100
class ListWidgetWithCheckboxes(ListWidget):
"""An alternative to list view that renders Boolean fields as checkboxes
Works in conjunction with the `checkbox` view."""
template = 'superset/fab_overrides/list_with_checkboxes.html'
def validate_json(form, field): # noqa
try:
json.loads(field.data)
except Exception as e:
logging.exception(e)
raise Exception("json isn't valid")
class DeleteMixin(object):
def _delete(self, pk):
"""
Delete function logic, override to implement diferent logic
deletes the record with primary_key = pk
:param pk:
record primary key to delete
"""
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
try:
self.pre_delete(item)
except Exception as e:
flash(str(e), "danger")
else:
view_menu = sm.find_view_menu(item.get_perm())
pvs = sm.get_session.query(sm.permissionview_model).filter_by(
view_menu=view_menu).all()
schema_view_menu = None
if hasattr(item, 'schema_perm'):
schema_view_menu = sm.find_view_menu(item.schema_perm)
pvs.extend(sm.get_session.query(
sm.permissionview_model).filter_by(
view_menu=schema_view_menu).all())
if self.datamodel.delete(item):
self.post_delete(item)
for pv in pvs:
sm.get_session.delete(pv)
if view_menu:
sm.get_session.delete(view_menu)
if schema_view_menu:
sm.get_session.delete(schema_view_menu)
sm.get_session.commit()
flash(*self.datamodel.message)
self.update_redirect()
@action(
"muldelete",
__("Delete"),
__("Delete all Really?"),
"fa-trash",
single=False
)
def muldelete(self, items):
if not items:
abort(404)
for item in items:
try:
self.pre_delete(item)
except Exception as e:
flash(str(e), "danger")
else:
self._delete(item.id)
self.update_redirect()
return redirect(self.get_redirect())
class SupersetFilter(BaseFilter):
"""Add utility function to make BaseFilter easy and fast
These utility function exist in the SecurityManager, but would do
a database round trip at every check. Here we cache the role objects
to be able to make multiple checks but query the db only once
"""
def get_user_roles(self):
return get_user_roles()
def get_all_permissions(self):
"""Returns a set of tuples with the perm name and view menu name"""
perms = set()
for role in self.get_user_roles():
for perm_view in role.permissions:
t = (perm_view.permission.name, perm_view.view_menu.name)
perms.add(t)
return perms
def has_role(self, role_name_or_list):
"""Whether the user has this role name"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(
[r.name in role_name_or_list for r in self.get_user_roles()])
def has_perm(self, permission_name, view_menu_name):
"""Whether the user has this perm"""
return (permission_name, view_menu_name) in self.get_all_permissions()
def get_view_menus(self, permission_name):
"""Returns the details of view_menus for a perm name"""
vm = set()
for perm_name, vm_name in self.get_all_permissions():
if perm_name == permission_name:
vm.add(vm_name)
return vm
def has_all_datasource_access(self):
return (
self.has_role(['Admin', 'Alpha']) or
self.has_perm('all_datasource_access', 'all_datasource_access'))
class DatasourceFilter(SupersetFilter):
def apply(self, query, func): # noqa
if self.has_all_datasource_access():
return query
perms = self.get_view_menus('datasource_access')
# TODO(bogdan): add `schema_access` support here
return query.filter(self.model.perm.in_(perms))
| apache-2.0 | -6,809,069,399,859,635,000 | 32.568807 | 79 | 0.596611 | false |
zycdragonball/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 | -3,118,896,128,274,725,000 | 31.921053 | 80 | 0.641886 | false |
distributed-system-analysis/sarjitsu | lib/backend/src/data_processor.py | 1 | 1591 | import os
import subprocess
import extract_sa
from app import app
from scripts.satools import oscode
def prepare(sessionID, target, sa_filename, q):
file_metadata = "file_metadata:%s:%s" % (sessionID, sa_filename)
SA_FILEPATH = os.path.join(target, sa_filename)
res = oscode.determine_version(file_path=SA_FILEPATH)
if res[0]:
sadf_type_res = res[1]
app.cache.hset(file_metadata, "sa_file_path", SA_FILEPATH)
else:
app.logger.warn("couldn't determine sysstat version for file..")
SA_FILEPATH_CONV = "%s_conv" % SA_FILEPATH
CMD_CONVERT = ['scripts/vos/analysis/bin/sadf-f23-64',
'-c', SA_FILEPATH]
p2 = subprocess.Popen(CMD_CONVERT, stdout=open(SA_FILEPATH_CONV ,'w'),
stderr=subprocess.PIPE, env={'LC_ALL': 'C'})
p2.wait()
err = p2.stderr
if err:
err = err.read().decode()
if "successfully" not in err:
app.logger.error(err)
app.logger.error("SAR data extraction *failed*!")
q[sa_filename] = (None, "Invalid", None)
return
sadf_type_res = "f23"
_tmp = p2.communicate()[0]
app.logger.warn(_tmp)
app.logger.info('sysstat version was incompatible but dealt with')
app.cache.hset(file_metadata, "sa_file_path_conv", SA_FILEPATH_CONV)
app.cache.hset(file_metadata, "sadf_type_det", sadf_type_res)
#FIXME: handle exceptons
q[sa_filename] = extract_sa.extract(sessionID, target, sa_filename)
return
| gpl-3.0 | 8,234,944,622,907,922,000 | 35.159091 | 78 | 0.595852 | false |
MilchReis/PicSort | src/core/input.py | 1 | 1653 | # -*- coding: utf-8 -*-
'''
@author: nick
'''
import pygame
class InputProcessor():
def __init__(self, appModel):
self.model = appModel
def process(self, event):
if event.type == pygame.QUIT:
self.model.closeWindow()
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.model.closeWindow()
if event.type == pygame.KEYUP and event.key == pygame.K_RIGHT:
self.model.getNextImage()
self.model.checkRotation()
if event.type == pygame.KEYUP and event.key == pygame.K_LEFT:
self.model.getPreviousImage()
self.model.checkRotation()
if event.type == pygame.KEYUP and (event.key == pygame.K_LCTRL or event.key == pygame.K_RCTRL):
self.model.renderer.setFullscreen(False)
self.model.changeTheme()
if event.type == pygame.KEYUP and event.key == pygame.K_SPACE:
self.model.copyImage()
self.model.getNextImage()
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
if self.model.renderer.isFullscreen:
self.model.renderer.setFullscreen(False)
else:
self.model.renderer.setFullscreen(True)
for plugin in self.model.pluginmanager.plugins:
try:
self.model.pluginmanager.plugins[plugin].onEvent(event, self.model)
except Exception, e:
self.model.logger.warn("Error while input for: " + plugin)
self.model.logger.warn(plugin + " -> " + str(e)) | gpl-2.0 | 5,375,138,786,331,119,000 | 32.755102 | 103 | 0.568058 | false |
luzpaz/QGIS | tests/src/python/test_qgsmessagelog.py | 15 | 3441 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMessageLog.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '18/06/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA
from qgis.core import (Qgis,
QgsApplication,
QgsMessageLog,
QgsMessageLogNotifyBlocker)
from qgis.PyQt.QtTest import QSignalSpy
from qgis.testing import start_app, unittest
from utilities import (unitTestDataPath)
app = start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsMessageLog(unittest.TestCase):
def testSignals(self):
app_log = QgsApplication.messageLog()
# signals should be emitted by application log
app_spy = QSignalSpy(app_log.messageReceived)
app_spy_received = QSignalSpy(app_log.messageReceived[bool])
QgsMessageLog.logMessage('test', 'tag', Qgis.Info, notifyUser=True)
self.assertEqual(len(app_spy), 1)
self.assertEqual(app_spy[-1], ['test', 'tag', Qgis.Info])
# info message, so messageReceived(bool) should not be emitted
self.assertEqual(len(app_spy_received), 0)
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(app_spy), 2)
self.assertEqual(app_spy[-1], ['test', 'tag', Qgis.Warning])
# warning message, so messageReceived(bool) should be emitted
self.assertEqual(len(app_spy_received), 1)
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=False)
self.assertEqual(len(app_spy), 3)
# notifyUser was False
self.assertEqual(len(app_spy_received), 1)
def testBlocker(self):
app_log = QgsApplication.messageLog()
spy = QSignalSpy(app_log.messageReceived)
spy_received = QSignalSpy(app_log.messageReceived[bool])
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(spy), 1)
self.assertEqual(spy[-1], ['test', 'tag', Qgis.Warning])
self.assertEqual(len(spy_received), 1)
# block notifications
b = QgsMessageLogNotifyBlocker()
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(spy), 2) # should not be blocked
self.assertEqual(len(spy_received), 1) # should be blocked
# another blocker
b2 = QgsMessageLogNotifyBlocker()
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(spy), 3) # should not be blocked
self.assertEqual(len(spy_received), 1) # should be blocked
del b
# still blocked because of b2
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(spy), 4) # should not be blocked
self.assertEqual(len(spy_received), 1) # should be blocked
del b2
# not blocked
QgsMessageLog.logMessage('test', 'tag', Qgis.Warning, notifyUser=True)
self.assertEqual(len(spy), 5) # should not be blocked
self.assertEqual(len(spy_received), 2) # should not be blocked
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 4,100,346,338,102,009,000 | 36.402174 | 79 | 0.657076 | false |
JioCloud/nova | nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py | 40 | 2107 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import network
authorize = extensions.soft_extension_authorizer('compute', 'extended_vif_net')
class ExtendedServerVIFNetController(wsgi.Controller):
def __init__(self):
super(ExtendedServerVIFNetController, self).__init__()
self.network_api = network.API()
@wsgi.extends
def index(self, req, resp_obj, server_id):
key = "%s:net_id" % Extended_virtual_interfaces_net.alias
context = req.environ['nova.context']
if authorize(context):
for vif in resp_obj.obj['virtual_interfaces']:
vif1 = self.network_api.get_vif_by_mac_address(context,
vif['mac_address'])
vif[key] = vif1['net_uuid']
class Extended_virtual_interfaces_net(extensions.ExtensionDescriptor):
"""Adds network id parameter to the virtual interface list."""
name = "ExtendedVIFNet"
alias = "OS-EXT-VIF-NET"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended-virtual-interfaces-net/api/v1.1")
updated = "2013-03-07T00:00:00Z"
def get_controller_extensions(self):
controller = ExtendedServerVIFNetController()
extension = extensions.ControllerExtension(self,
'os-virtual-interfaces',
controller)
return [extension]
| apache-2.0 | -8,212,399,059,415,874,000 | 38.754717 | 79 | 0.638348 | false |
farazaftab/sjhschool | node_modules/node-gyp/gyp/pylib/gyp/input.py | 578 | 116086 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
# Open the build file for read ('r') with universal-newlines mode ('U')
# to make sure platform specific newlines ('\r\n' or '\r') are converted to '\n'
# which otherwise will fail eval()
build_file_contents = open(build_file_path, 'rU').read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| mit | 920,323,894,205,539,600 | 39.071108 | 84 | 0.654653 | false |
khchine5/xl | lino_xl/lib/ledger/fixtures/minimal_ledger.py | 1 | 8612 | # -*- coding: UTF-8 -*-
# Copyright 2012-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""
Creates minimal accounting demo data:
- a minimal accounts chart
- some journals
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from django.conf import settings
from lino.api import dd, rt, _
from lino_xl.lib.accounts.utils import DEBIT, CREDIT
from lino_xl.lib.ledger import choicelists as pcmn
from lino_xl.lib.accounts.choicelists import CommonAccounts, AccountTypes
from lino.utils import Cycler
#accounts = dd.resolve_app('accounts')
vat = dd.resolve_app('vat')
sales = dd.resolve_app('sales')
ledger = dd.resolve_app('ledger')
finan = dd.resolve_app('finan')
bevat = dd.resolve_app('bevat')
bevats = dd.resolve_app('bevats')
#~ partners = dd.resolve_app('partners')
current_group = None
def objects():
JournalGroups = rt.models.ledger.JournalGroups
Company = rt.models.contacts.Company
def group(ref, type, name):
global current_group
current_group = rt.models.accounts.Group(
ref=ref,
account_type=AccountTypes.get_by_name(type),
**dd.str2kw('name', name))
return current_group
def Group(ref, type, fr, de, en, et=None):
if et is None:
et = en
global current_group
current_group = rt.models.accounts.Group(
ref=ref,
account_type=AccountTypes.get_by_name(type),
**dd.babel_values('name', de=de, fr=fr, en=en, et=et))
return current_group
def Account(ca):
# kw.update(dd.babel_values('name', de=de, fr=fr, en=en, et=et))
return rt.models.accounts.Account(
group=current_group,
ref=ca.value,
type=ca)
# yield Group('10', 'capital', "Capital", "Kapital", "Capital", "Kapitaal")
yield group('10', 'capital', _("Capital"))
yield Group('40', 'assets',
"Créances et dettes commerciales",
"Forderungen aus Lieferungen und Leistungen",
"Commercial assets & liabilities")
yield CommonAccounts.customers.create_object(group=current_group)
yield CommonAccounts.suppliers.create_object(group=current_group)
# yield Group('45', 'assets', "TVA à payer",
# "Geschuldete MWSt", "VAT to pay", "Käibemaksukonto")
yield group('47', 'assets', _("Tax office"))
yield CommonAccounts.vat_due.create_object(group=current_group)
yield CommonAccounts.vat_returnable.create_object(group=current_group)
yield CommonAccounts.vat_deductible.create_object(group=current_group)
yield CommonAccounts.due_taxes.create_object(group=current_group)
yield CommonAccounts.tax_offices.create_object(group=current_group)
yield group('49', 'assets', _("Other assets & liabilities"))
yield CommonAccounts.pending_po.create_object(group=current_group)
yield CommonAccounts.waiting.create_object(group=current_group)
# PCMN 55
yield Group('55', 'assets',
"Institutions financières", "Finanzinstitute", "Banks")
yield CommonAccounts.best_bank.create_object(group=current_group)
yield CommonAccounts.cash.create_object(group=current_group)
# yield Group('58', 'assets',
# "Transactions en cours", "Laufende Transaktionen",
# "Running transactions")
# yield Account(PO_BESTBANK_ACCOUNT, 'bank_accounts',
# "Ordres de paiement Bestbank",
# "Zahlungsaufträge Bestbank",
# "Payment Orders Bestbank",
# "Maksekorraldused Parimpank", clearable=True)
yield Group('6', 'expenses', u"Charges", u"Aufwendungen", "Expenses", "Kulud")
kwargs = dict(purchases_allowed=True, group=current_group)
if dd.is_installed('ana'):
kwargs.update(needs_ana=True)
# ANA_ACCS = Cycler(rt.models.ana.Account.objects.all())
# if dd.is_installed('ana'):
# kwargs.update(ana_account=ANA_ACCS.pop())
yield CommonAccounts.purchase_of_goods.create_object(**kwargs)
# if dd.is_installed('ana'):
# kwargs.update(ana_account=ANA_ACCS.pop())
yield CommonAccounts.purchase_of_services.create_object(**kwargs)
# if dd.is_installed('ana'):
# del kwargs['ana_account']
yield CommonAccounts.purchase_of_investments.create_object(**kwargs)
yield Group('7', 'incomes', "Produits", "Erträge", "Revenues", "Tulud")
kwargs = dict(sales_allowed=True, group=current_group)
yield CommonAccounts.sales.create_object(**kwargs)
# if sales:
# settings.SITE.site_config.update(sales_account=obj)
# JOURNALS
kw = dict(journal_group=JournalGroups.sales)
if sales:
MODEL = sales.VatProductInvoice
else:
MODEL = vat.VatAccountInvoice
kw.update(trade_type='sales')
kw.update(ref="SLS", dc=DEBIT)
kw.update(printed_name=_("Invoice"))
kw.update(dd.str2kw('name', _("Sales invoices")))
yield MODEL.create_journal(**kw)
kw.update(ref="SLC", dc=CREDIT)
kw.update(dd.str2kw('name', _("Sales credit notes")))
kw.update(printed_name=_("Credit note"))
yield MODEL.create_journal(**kw)
kw.update(journal_group=JournalGroups.purchases)
kw.update(trade_type='purchases', ref="PRC")
kw.update(dd.str2kw('name', _("Purchase invoices")))
kw.update(dc=CREDIT)
if dd.is_installed('ana'):
yield rt.models.ana.AnaAccountInvoice.create_journal(**kw)
else:
yield vat.VatAccountInvoice.create_journal(**kw)
if finan:
bestbank = Company(
name="Bestbank",
country=dd.plugins.countries.get_my_country())
yield bestbank
kw = dict(journal_group=JournalGroups.financial)
kw.update(dd.str2kw('name', _("Bestbank Payment Orders")))
# kw.update(dd.babel_values(
# 'name', de="Zahlungsaufträge", fr="Ordres de paiement",
# en="Payment Orders", et="Maksekorraldused"))
kw.update(
trade_type='bank_po',
partner=bestbank,
account=CommonAccounts.pending_po.get_object(),
ref="PMO")
kw.update(dc=CREDIT)
yield finan.PaymentOrder.create_journal(**kw)
kw = dict(journal_group=JournalGroups.financial)
# kw.update(trade_type='')
kw.update(dc=DEBIT)
kw.update(account=CommonAccounts.cash.get_object(), ref="CSH")
kw.update(dd.str2kw('name', _("Cash")))
# kw = dd.babel_values(
# 'name', en="Cash",
# de="Kasse", fr="Caisse",
# et="Kassa")
yield finan.BankStatement.create_journal(**kw)
kw.update(dd.str2kw('name', _("Bestbank")))
kw.update(account=CommonAccounts.best_bank.get_object(), ref="BNK")
kw.update(dc=DEBIT)
yield finan.BankStatement.create_journal(**kw)
kw.update(dd.str2kw('name', _("Miscellaneous Journal Entries")))
# kw = dd.babel_values(
# 'name', en="Miscellaneous Journal Entries",
# de="Diverse Buchungen", fr="Opérations diverses",
# et="Muud operatsioonid")
kw.update(account=CommonAccounts.cash.get_object(), ref="MSC")
kw.update(dc=DEBIT)
yield finan.JournalEntry.create_journal(**kw)
for m in (bevat, bevats):
if not m:
continue
kw = dict(journal_group=JournalGroups.vat)
kw.update(trade_type='taxes')
kw.update(dd.str2kw('name', _("VAT declarations")))
kw.update(must_declare=False)
kw.update(account=CommonAccounts.due_taxes.get_object(),
ref=m.DEMO_JOURNAL_NAME, dc=CREDIT)
yield m.Declaration.create_journal(**kw)
payments = []
if finan:
payments += [finan.BankStatement, finan.JournalEntry,
finan.PaymentOrder]
MatchRule = rt.models.ledger.MatchRule
for jnl in ledger.Journal.objects.all():
if jnl.voucher_type.model in payments:
yield MatchRule(
journal=jnl,
account=CommonAccounts.customers.get_object())
yield MatchRule(
journal=jnl,
account=CommonAccounts.suppliers.get_object())
a = CommonAccounts.wages.get_object()
if a:
yield MatchRule(journal=jnl, account=a)
elif jnl.trade_type:
a = jnl.trade_type.get_main_account()
if a:
yield MatchRule(journal=jnl, account=a)
| bsd-2-clause | -6,016,008,150,314,607,000 | 34.262295 | 82 | 0.620293 | false |
aksareen/balrog | auslib/web/public/base.py | 1 | 4237 | import cgi
import connexion
import logging
import re
import auslib.web
from os import path
from connexion import request
from flask import make_response, send_from_directory, Response
from raven.contrib.flask import Sentry
from auslib.AUS import AUS
from auslib.web.api_validator import BalrogParameterValidator
from auslib.util.swagger import SpecBuilder
from auslib.errors import BadDataError
log = logging.getLogger(__name__)
AUS = AUS()
sentry = Sentry()
validator_map = {
'parameter': BalrogParameterValidator
}
connexion_app = connexion.App(__name__,
specification_dir='.',
validator_map=validator_map)
app = connexion_app.app
current_dir = path.dirname(__file__)
web_dir = path.dirname(auslib.web.__file__)
spec = SpecBuilder().add_spec(path.join(current_dir, 'api.yml'))\
.add_spec(path.join(web_dir, 'common/releases_spec.yml'))\
.add_spec(path.join(web_dir, 'common/rules_spec.yml'))
connexion_app.add_api(spec,
validate_responses=True,
strict_validation=True)
@app.after_request
def apply_security_headers(response):
# There's no use cases for content served by Balrog to load additional content
# nor be embedded elsewhere, so we apply a strict Content Security Policy.
# We also need to set X-Content-Type-Options to nosniff for Firefox to obey this.
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1332829#c4 for background.
response.headers["Strict-Transport-Security"] = app.config.get("STRICT_TRANSPORT_SECURITY", "max-age=31536000;")
response.headers["X-Content-Type-Options"] = app.config.get("CONTENT_TYPE_OPTIONS", "nosniff")
if re.match("^/ui/", request.path):
# This enables swagger-ui to dynamically fetch and
# load the swagger specification JSON file containing API definition and examples.
response.headers['X-Frame-Options'] = 'SAMEORIGIN'
else:
response.headers["Content-Security-Policy"] = \
app.config.get("CONTENT_SECURITY_POLICY", "default-src 'none'; frame-ancestors 'none'")
return response
@app.errorhandler(404)
def fourohfour(error):
"""We don't return 404s in AUS. Instead, we return empty XML files"""
response = make_response('<?xml version="1.0"?>\n<updates>\n</updates>')
response.mimetype = 'text/xml'
return response
@app.errorhandler(Exception)
def generic(error):
"""Deals with any unhandled exceptions. If the exception is not a
BadDataError, it will be sent to Sentry, and a 400 will be returned,
because BadDataErrors are considered to be the client's fault.
Otherwise, the error is just re-raised (which causes a 500)."""
# Escape exception messages before replying with them, because they may
# contain user input.
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1332829 for background.
error.message = cgi.escape(error.message)
if isinstance(error, BadDataError):
return Response(status=400, mimetype="text/plain", response=error.message)
if sentry.client:
sentry.captureException()
return Response(status=500, mimetype="text/plain", response=error.message)
# Keeping static files endpoints here due to an issue when returning response for static files.
# Similar issue: https://github.com/zalando/connexion/issues/401
@app.route('/robots.txt')
def robots():
return send_from_directory(app.static_folder, "robots.txt")
@app.route('/contribute.json')
def contributejson():
return send_from_directory(app.static_folder, "contribute.json")
@app.before_request
def set_cache_control():
# By default, we want a cache that can be shared across requests from
# different users ("public").
# and a maximum age of 90 seconds, to keep our TTL low.
# We bumped this from 60s -> 90s in November, 2016.
setattr(app, 'cacheControl', app.config.get("CACHE_CONTROL", "public, max-age=90"))
@app.route('/debug/api.yml')
def get_yaml():
if app.config.get('SWAGGER_DEBUG', False):
import yaml
app_spec = yaml.dump(spec)
return Response(mimetype='text/plain', response=app_spec)
return Response(status=404)
| mpl-2.0 | -1,816,811,322,526,073,900 | 35.213675 | 116 | 0.695775 | false |
ales-erjavec/orange-bio | orangecontrib/bio/kegg/tests/test_api.py | 2 | 3591 | import unittest
import tempfile
import shutil
try:
from unittest import mock
except ImportError:
import backports.unittest_mock
backports.unittest_mock.install()
from unittest import mock
try:
from types import SimpleNamespace as namespace
except ImportError:
class namespace(object):
def __init__(self, **kwargs): self.__dict__.update(kwargs)
def __repr__(self):
contents = ",".join("{}={!r}".format(*it)
for it in sorted(self.__dict__.items()))
return "namespace(" + contents + ")"
import doctest
from orangecontrib.bio import kegg
from orangecontrib.bio.kegg import api as keggapi
from orangecontrib.bio.kegg import conf as keggconf
list_organism = """\
T01001\thsa\tHomo sapiens (human)\tEukaryotes;Animals;Vertebrates;Mammals
T00005\tsce\tSaccharomyces cerevisiae (budding yeast)\tEukaryotes;Fungi;Ascomycetes;Saccharomycetes
T00245\tddi\tDictyostelium discoideum (cellular slime mold)\tEukaryotes;Protists;Amoebozoa;Dictyostelium\
"""
list_pathway_hsa = """\
path:hsa00010\tGlycolysis / Gluconeogenesis - Homo sapiens (human)
path:hsa00020\tCitrate cycle (TCA cycle) - Homo sapiens (human)
path:hsa00030\tPentose phosphate pathway - Homo sapiens (human)\
"""
info_pathway = """\
pathway KEGG Pathway Database
path Release 81.0+/01-18, Jan 17
Kanehisa Laboratories
479,620 entries
"""
genome_T01001 = """\
ENTRY T01001 Complete Genome
NAME hsa, HUMAN, 9606
DEFINITION Homo sapiens (human)
ANNOTATION manual
TAXONOMY TAX:9606
LINEAGE Eukaryota; Metazoa; Chordata; Craniata; Vertebrata; Euteleostomi; Mammalia; Eutheria; Euarchontoglires; Primates; Haplorrhini; Catarrhini; Hominidae; Homo
DATA_SOURCE RefSeq (Assembly:GCF_000001405.31)
ORIGINAL_DB NCBI
OMIM
HGNC
HPRD
Ensembl
STATISTICS Number of protein genes: 20234
Number of RNA genes: 18981
///
"""
def mock_service():
s = namespace(
list=namespace(
organism=namespace(get=lambda: list_organism),
pathway=lambda org: {
"hsa": namespace(get=lambda: list_pathway_hsa)
}[org],
),
info=lambda db:
{"pathway": namespace(get=lambda: info_pathway)}[db],
get=lambda key: {
"genome:T01001": namespace(get=lambda: genome_T01001)
}[key],
)
return s
def mock_kegg_api():
api = keggapi.KeggApi()
api.service = mock_service()
return api
def load_tests(loader, tests, ignore):
def setUp(testcase):
# testcase._tmpdir = tempfile.TemporaryDirectory(prefix="kegg-tests")
testcase._tmpdir = tempfile.mkdtemp(prefix="kegg-tests")
testcase._old_cache_path = keggconf.params["cache.path"]
keggconf.params["cache.path"] = testcase._tmpdir
testcase._mock_ctx = mock.patch(
"orangecontrib.bio.kegg.api.web_service",
mock_service)
testcase._mock_ctx.__enter__()
s = keggapi.web_service()
assert isinstance(s, namespace)
def tearDown(testcase):
testcase._mock_ctx.__exit__(None, None, None)
keggconf.params["cache.path"] = testcase._old_cache_path
shutil.rmtree(testcase._tmpdir)
api = mock_kegg_api()
tests.addTests(
doctest.DocTestSuite(
keggapi, optionflags=doctest.ELLIPSIS,
extraglobs={"api": api},
setUp=setUp, tearDown=tearDown
)
)
return tests
| gpl-3.0 | -8,012,116,879,656,298,000 | 29.692308 | 166 | 0.63687 | false |
proxysh/Safejumper-for-Desktop | buildlinux/env32/lib/python2.7/site-packages/pip/_vendor/cachecontrol/heuristics.py | 490 | 4141 | import calendar
import time
from email.utils import formatdate, parsedate, parsedate_tz
from datetime import datetime, timedelta
TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT"
def expire_after(delta, date=None):
date = date or datetime.now()
return date + delta
def datetime_to_header(dt):
return formatdate(calendar.timegm(dt.timetuple()))
class BaseHeuristic(object):
def warning(self, response):
"""
Return a valid 1xx warning header value describing the cache
adjustments.
The response is provided too allow warnings like 113
http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need
to explicitly say response is over 24 hours old.
"""
return '110 - "Response is Stale"'
def update_headers(self, response):
"""Update the response headers with any new headers.
NOTE: This SHOULD always include some Warning header to
signify that the response was cached by the client, not
by way of the provided headers.
"""
return {}
def apply(self, response):
updated_headers = self.update_headers(response)
if updated_headers:
response.headers.update(updated_headers)
warning_header_value = self.warning(response)
if warning_header_value is not None:
response.headers.update({'Warning': warning_header_value})
return response
class OneDayCache(BaseHeuristic):
"""
Cache the response by providing an expires 1 day in the
future.
"""
def update_headers(self, response):
headers = {}
if 'expires' not in response.headers:
date = parsedate(response.headers['date'])
expires = expire_after(timedelta(days=1),
date=datetime(*date[:6]))
headers['expires'] = datetime_to_header(expires)
headers['cache-control'] = 'public'
return headers
class ExpiresAfter(BaseHeuristic):
"""
Cache **all** requests for a defined time period.
"""
def __init__(self, **kw):
self.delta = timedelta(**kw)
def update_headers(self, response):
expires = expire_after(self.delta)
return {
'expires': datetime_to_header(expires),
'cache-control': 'public',
}
def warning(self, response):
tmpl = '110 - Automatically cached for %s. Response might be stale'
return tmpl % self.delta
class LastModified(BaseHeuristic):
"""
If there is no Expires header already, fall back on Last-Modified
using the heuristic from
http://tools.ietf.org/html/rfc7234#section-4.2.2
to calculate a reasonable value.
Firefox also does something like this per
https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ
http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397
Unlike mozilla we limit this to 24-hr.
"""
cacheable_by_default_statuses = set([
200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501
])
def update_headers(self, resp):
headers = resp.headers
if 'expires' in headers:
return {}
if 'cache-control' in headers and headers['cache-control'] != 'public':
return {}
if resp.status not in self.cacheable_by_default_statuses:
return {}
if 'date' not in headers or 'last-modified' not in headers:
return {}
date = calendar.timegm(parsedate_tz(headers['date']))
last_modified = parsedate(headers['last-modified'])
if date is None or last_modified is None:
return {}
now = time.time()
current_age = max(0, now - date)
delta = date - calendar.timegm(last_modified)
freshness_lifetime = max(0, min(delta / 10, 24 * 3600))
if freshness_lifetime <= current_age:
return {}
expires = date + freshness_lifetime
return {'expires': time.strftime(TIME_FMT, time.gmtime(expires))}
def warning(self, resp):
return None
| gpl-2.0 | -349,287,363,617,834,800 | 29.007246 | 98 | 0.616518 | false |
ssteo/moviepy | moviepy/video/fx/freeze_region.py | 13 | 1920 | from moviepy.decorators import apply_to_mask
from .crop import crop
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
#@apply_to_mask
def freeze_region(clip, t=0, region=None, outside_region=None, mask=None):
""" Freezes one region of the clip while the rest remains animated.
You can choose one of three methods by providing either `region`,
`outside_region`, or `mask`.
Parameters
-----------
t
Time at which to freeze the freezed region.
region
A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels)
which will be freezed. You can provide outside_region or mask instead.
outside_region
A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels)
which will be the only non-freezed region.
mask
If not None, will overlay a freezed version of the clip on the current clip,
with the provided mask. In other words, the "visible" pixels in the mask
indicate the freezed region in the final picture.
"""
if region is not None:
x1, y1, x2, y2 = region
freeze = (clip.fx(crop, *region)
.to_ImageClip(t=t)
.set_duration(clip.duration)
.set_position((x1,y1)))
return CompositeVideoClip([clip, freeze])
elif outside_region is not None:
x1, y1, x2, y2 = outside_region
animated_region = (clip.fx(crop, *outside_region)
.set_position((x1,y1)))
freeze = (clip.to_ImageClip(t=t)
.set_duration(clip.duration))
return CompositeVideoClip([freeze, animated_region])
elif mask is not None:
freeze = (clip.to_ImageClip(t=t)
.set_duration(clip.duration)
.set_mask(mask))
return CompositeVideoClip([clip, freeze])
| mit | 9,020,259,678,958,154,000 | 33.285714 | 82 | 0.607813 | false |
hamzehd/edx-platform | common/djangoapps/student/tests/test_certificates.py | 8 | 7756 | """Tests for display of certificates on the student dashboard. """
import unittest
import ddt
import mock
from django.conf import settings
from django.core.urlresolvers import reverse
from mock import patch
from django.test.utils import override_settings
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from certificates.tests.factories import GeneratedCertificateFactory # pylint: disable=import-error
from certificates.api import get_certificate_url # pylint: disable=import-error
from course_modes.models import CourseMode
from student.models import LinkedInAddToProfileConfiguration
# pylint: disable=no-member
def _fake_is_request_in_microsite():
"""
Mocked version of microsite helper method to always return true
"""
return True
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CertificateDisplayTest(ModuleStoreTestCase):
"""Tests display of certificates on the student dashboard. """
USERNAME = "test_user"
PASSWORD = "password"
DOWNLOAD_URL = "http://www.example.com/certificate.pdf"
def setUp(self):
super(CertificateDisplayTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
self.course = CourseFactory()
self.course.certificates_display_behavior = "early_with_info"
self.update_course(self.course, self.user.username)
@ddt.data('verified', 'professional')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_display_verified_certificate(self, enrollment_mode):
self._create_certificate(enrollment_mode)
self._check_can_download_certificate()
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_display_verified_certificate_no_id(self):
"""
Confirm that if we get a certificate with a no-id-professional mode
we still can download our certificate
"""
self._create_certificate(CourseMode.NO_ID_PROFESSIONAL_MODE)
self._check_can_download_certificate_no_id()
@ddt.data('verified', 'honor')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_display_download_certificate_button(self, enrollment_mode):
"""
Tests if CERTIFICATES_HTML_VIEW is True
and course has enabled web certificates via cert_html_view_enabled setting
and no active certificate configuration available
then any of the Download certificate button should not be visible.
"""
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
self._create_certificate(enrollment_mode)
self._check_can_not_download_certificate()
@ddt.data('verified')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_linked_student_to_web_view_credential(self, enrollment_mode):
test_url = get_certificate_url(
user_id=self.user.id,
course_id=unicode(self.course.id)
)
self._create_certificate(enrollment_mode)
certificates = [
{
'id': 0,
'name': 'Test Name',
'description': 'Test Description',
'is_active': True,
'signatories': [],
'version': 1
}
]
self.course.certificates = {'certificates': certificates}
self.course.cert_html_view_enabled = True
self.course.save() # pylint: disable=no-member
self.store.update_item(self.course, self.user.id)
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'View Test_Certificate')
self.assertContains(response, test_url)
def test_post_to_linkedin_invisibility(self):
"""
Verifies that the post certificate to linked button
does not appear by default (when config is not set)
"""
self._create_certificate('honor')
# until we set up the configuration, the LinkedIn action
# button should not be visible
self._check_linkedin_visibility(False)
def test_post_to_linkedin_visibility(self):
"""
Verifies that the post certificate to linked button appears
as expected
"""
self._create_certificate('honor')
config = LinkedInAddToProfileConfiguration(
company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9',
enabled=True
)
config.save()
# now we should see it
self._check_linkedin_visibility(True)
@mock.patch("microsite_configuration.microsite.is_request_in_microsite", _fake_is_request_in_microsite)
def test_post_to_linkedin_microsite(self):
"""
Verifies behavior for microsites which disables the post to LinkedIn
feature (for now)
"""
self._create_certificate('honor')
config = LinkedInAddToProfileConfiguration(
company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9',
enabled=True
)
config.save()
# now we should not see it because we are in a microsite
self._check_linkedin_visibility(False)
def _check_linkedin_visibility(self, is_visible):
"""
Performs assertions on the Dashboard
"""
response = self.client.get(reverse('dashboard'))
if is_visible:
self.assertContains(response, u'Add Certificate to LinkedIn Profile')
else:
self.assertNotContains(response, u'Add Certificate to LinkedIn Profile')
def _create_certificate(self, enrollment_mode):
"""Simulate that the user has a generated certificate. """
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, mode=enrollment_mode)
GeneratedCertificateFactory(
user=self.user,
course_id=self.course.id,
mode=enrollment_mode,
download_url=self.DOWNLOAD_URL,
status="downloadable",
grade=0.98,
)
def _check_can_download_certificate(self):
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'Download Your ID Verified')
self.assertContains(response, self.DOWNLOAD_URL)
def _check_can_download_certificate_no_id(self):
"""
Inspects the dashboard to see if a certificate for a non verified course enrollment
is present
"""
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'Download')
self.assertContains(response, u'(PDF)')
self.assertContains(response, self.DOWNLOAD_URL)
def _check_can_not_download_certificate(self):
"""
Make sure response does not have any of the download certificate buttons
"""
response = self.client.get(reverse('dashboard'))
self.assertNotContains(response, u'View Test_Certificate')
self.assertNotContains(response, u'Download Your Test_Certificate (PDF)')
self.assertNotContains(response, u'Download Test_Certificate (PDF)')
self.assertNotContains(response, self.DOWNLOAD_URL)
| agpl-3.0 | 6,241,288,495,151,705,000 | 37.78 | 107 | 0.667741 | false |
zkidkid/ssdb | deps/cpy/engine.py | 8 | 13607 | # encoding=utf-8
#################################
# Author: ideawu
# Link: http://www.ideawu.net/
#################################
import sys, os, shutil, datetime
import antlr3
import antlr3.tree
from ExprLexer import ExprLexer
from ExprParser import ExprParser
class CpyEngine:
found_files = set()
def find_imports(self, srcfile, base_dir):
#print ' file', srcfile
srcfile = os.path.realpath(srcfile)
if srcfile in self.found_files:
return set()
self.found_files.add(srcfile)
fp = open(srcfile, 'rt')
lines = fp.readlines()
fp.close()
imports = []
for line in lines:
if line.find('import') == -1:
continue
line = line.strip().strip(';');
ps = line.split();
if ps[0] != 'import':
continue
for p in ps[ 1 :]:
p = p.strip(',')
imports.append(p);
for p in imports:
#print 'import ' + p
self.find_files(p, base_dir);
return self.found_files
def find_files(self, member, base_dir):
ps = member.split('.')
last = ps.pop(-1)
path = base_dir + '/' + '/'.join(ps)
if last == '*':
if os.path.isdir(path):
fs = os.listdir(path)
for f in fs:
if f.endswith('.cpy'):
file = os.path.realpath(path + '/' + f)
self.find_imports(file, path)
else:
file = path + '/' + last + '.cpy'
if os.path.isfile(file):
self.find_imports(file, path)
def compile(self, srcfile, base_dir, output_dir):
srcfile = os.path.realpath(srcfile)
base_dir = os.path.realpath(base_dir)
output_dir = os.path.realpath(output_dir)
# files = self.find_imports(srcfile, base_dir)
# files.remove(srcfile)
# if len(files) > 0:
# files = list(files)
# files.sort()
# #print ' ' + '\n '.join(files)
#
# shead, stail = os.path.split(srcfile)
# slen = len(shead)
# for f in files:
# head, tail = os.path.split(f)
# rel_dir = head[slen :]
# self._compile(f, base_dir, output_dir + rel_dir)
dstfile = self._compile(srcfile, base_dir, output_dir)
return dstfile
def _compile(self, srcfile, base_dir, output_dir):
head, tail = os.path.split(srcfile)
dstfile = os.path.normpath(output_dir + '/' + tail.split('.')[0] + '.py')
if os.path.exists(dstfile):
src_mtime = os.path.getmtime(srcfile)
dst_mtime = os.path.getmtime(dstfile)
#print src_mtime, dst_mtime
if src_mtime < dst_mtime:
return dstfile
#print 'compile: %-30s=> %s' % (srcfile, dstfile)
#print 'compile: %-30s=> %s' % (srcfile[len(base_dir)+1:], dstfile[len(base_dir)+1:])
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_dir + '/__init__.py'):
fp = open(output_dir + '/__init__.py', 'w')
fp.close()
#fp = codecs.open(sys.argv[1], 'r', 'utf-8')
fp = open(srcfile, 'r')
char_stream = antlr3.ANTLRInputStream(fp)
lexer = ExprLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = ExprParser(tokens)
r = parser.prog()
# this is the root of the AST
root = r.tree
#print (root.toStringTree())
#print '-------'
nodes = antlr3.tree.CommonTreeNodeStream(root)
nodes.setTokenStream(tokens)
from Eval import Eval
eval = Eval(nodes)
#######################################
cpy = CpyBuilder(dstfile, base_dir, output_dir)
eval.prog(cpy)
return dstfile
class CpyBuilder:
compiled_files = set()
def __init__(self, dstfile, base_dir, output_dir):
self.vars = -1
self.if_depth = 0
self.block_depth = 0
self.switch_expr_stack = []
self.switch_continue_stack = []
self.class_stack = []
self.class_names = [];
self.constructed = False;
self.base_dir = base_dir
self.output_dir = output_dir
self.fp = open(dstfile, 'w')
self.write('# encoding=utf-8\n')
self.write('# Generated by cpy\n');
self.write('# ' + datetime.datetime.now().isoformat(' ') + '\n');
self.write('import os, sys\n')
self.write('from sys import stdin, stdout\n\n')
def tmp_var(self, name = ''):
self.vars += 1
return '_cpy_%s_%d' %(name, self.vars)
def close(self):
self.fp.close()
def write(self, text):
text = text.encode('utf-8')
self.fp.write(text)
# debug
#sys.stdout.write(text)
def indent(self):
return '\t' * self.block_depth
def _compile_dir(self, rel_path):
mods = []
files = os.listdir(self.base_dir + '/' + rel_path)
for f in files:
if f.endswith('.cpy'):
mods.append(f[0: -4])
if f.endswith('.py'):
mods.append(f[0: -3])
self._compile(rel_path, f)
return mods
def _compile(self, rel_path, f):
base_dir = os.path.normpath(self.base_dir + '/' + rel_path)
srcfile = os.path.normpath(base_dir + '/' + f)
output_dir = os.path.normpath(self.output_dir + '/' + rel_path)
#print base_dir, output_dir, f, rel_path;
if f.endswith('.py'):
head, tail = os.path.split(f)
#print 'copy: %-30s=> %s' % (srcfile, output_dir + '/' + tail)
shutil.copy(srcfile, output_dir + '/' + tail)
elif f.endswith('.cpy'):
self.write('#### start cpy import ###\n');
self.write(self.indent())
self.write('from engine import CpyEngine\n')
self.write(self.indent())
self.write('cpy = CpyEngine()\n')
self.write(self.indent())
self.write('dstfile = cpy.compile(\'' + srcfile + '\', \'' + rel_path + '\', \'' + output_dir + '\')\n')
self.write('#### end cpy import ###\n');
if srcfile in self.compiled_files:
return
self.compiled_files.add(srcfile)
#e = CpyEngine()
#d = e.compile(srcfile, base_dir, output_dir)
def op_import(self, member, all):
ps = member.split('.')
package = []
while True:
if len(ps) == 0:
break
p = ps.pop(0)
package.append(p)
rel_path = '/'.join(package);
path = self.base_dir + '/' + rel_path
if os.path.isdir(path):
if len(ps) == 0:
mods = self._compile_dir(rel_path)
if all == '*':
for m in mods:
self.write(self.indent())
self.write('from %s import %s\n' %(member, m))
else:
self.write(self.indent())
self.write('import %s\n' % member)
break
elif os.path.isfile(path + '.cpy') or os.path.isfile(path + '.py'):
filename = os.path.basename(path)
rel_path = '/'.join(package[ : -1]);
if os.path.isfile(path + '.cpy'):
self._compile(rel_path, filename + '.cpy')
else:
self._compile(rel_path, filename + '.py')
if len(ps) == 0:
if all == '*':
self.write(self.indent())
self.write('from %s import *\n' % member)
else:
self.write(self.indent())
self.write('import %s\n' % member)
break
elif len(ps) == 1:
mod = '.'.join(package)
cls = ps[-1]
self.write(self.indent())
self.write('from %s import %s\n' %(mod, cls))
else:
# error
print ("Cpy error: invalid module '%s'" % member)
sys.exit(0)
break
else:
self.write(self.indent())
if all == '*':
self.write('from %s import *\n' % member)
else:
ps = member.split('.')
if len(ps) == 1:
self.write('import %s\n' % member)
else:
self.write('from %s import %s\n' %('.'.join(ps[0 : -1]), ps[-1]))
break
def block_enter(self):
self.block_depth += 1
self.write(self.indent() + 'pass\n')
def block_leave(self):
self.block_depth -= 1
def if_enter(self):
self.write('\n')
self.write(self.indent())
self.if_depth += 1
def if_leave(self):
self.if_depth -= 1
def op_if(self, expr):
self.write('if %s:\n' % expr)
def op_else(self):
self.write(self.indent() + 'else:\n')
def op_else_if(self):
self.write(self.indent() + 'el')
def stmt(self, text):
self.write(self.indent() + text + '\n')
def op_assign(self, id, val, op):
text = '%s %s %s' % (id, op, val)
return text
def op_inc(self, id):
return id + ' += 1';
def op_dec(self, id):
return id + ' -= 1';
def op_call(self, text):
self.write(self.indent() + text + '\n')
def op_print(self, text):
self.write(self.indent())
self.write('print %s\n' % text)
def op_printf(self, format, text):
self.write(self.indent())
if text == None:
self.write('sys.stdout.write(%s)\n' % (format))
else:
self.write('sys.stdout.write(%s %% (%s))\n' % (format, text))
def op_while(self, expr):
self.write('\n')
self.write(self.indent())
self.write('while %s:\n' % expr)
def op_do_while_enter(self):
self.write('\n')
self.write(self.indent())
self.write('while True:\n')
def op_do_while_leave(self, expr):
self.write('\n')
self.block_depth += 1
self.write(self.indent())
self.write('if %s:\n' % expr)
self.block_depth += 1
self.write(self.indent())
self.write('continue')
self.block_depth -= 1
self.write('\n')
self.write(self.indent())
self.write('break')
self.block_depth -= 1
def op_switch_enter(self, expr):
self.write('\n')
self.switch_expr_stack.append(expr)
var = '_continue_%d' % len(self.switch_expr_stack)
self.switch_continue_stack.append(var)
self.write(self.indent() + '# {{{ switch: ' + expr + '\n')
self.write(self.indent())
self.write(var + ' = False\n')
self.write(self.indent())
self.write('while True:\n')
self.block_depth += 1
def op_switch_leave(self):
self.write(self.indent() + 'break\n')
var = self.switch_continue_stack[-1]
self.write(self.indent())
self.write('if %s:\n' % var)
self.block_depth += 1
self.write(self.indent())
self.write('continue\n')
self.block_depth -= 1
self.block_depth -= 1
self.write(self.indent() + '# }}} switch\n\n')
self.switch_expr_stack.pop()
self.switch_continue_stack.pop()
def op_case_enter(self):
self.write(self.indent())
self.write('if False')
self.block_depth += 1
def op_case_test(self, expr):
self.write(' or ((%s) == %s)' % (self.switch_expr_stack[-1], expr))
def op_case(self):
self.write(':\n')
self.write(self.indent())
self.write('pass\n')
def op_case_leave(self):
self.block_depth -= 1
def op_break(self):
self.write(self.indent())
self.write('break\n')
def op_continue(self):
if self.switch_expr_stack:
var = self.switch_continue_stack[-1]
self.write(self.indent())
self.write(var + ' = True\n')
self.write(self.indent())
self.write('break\n')
else:
self.write(self.indent())
self.write('continue\n')
def op_return(self, expr):
self.write(self.indent())
if expr == None: expr = ''
self.write('return %s\n' % expr)
def op_default_enter(self):
self.write(self.indent() + '### default\n')
def op_default_leave(self):
pass
def op_function(self, id, params):
self.write('\n')
if len(self.class_stack) > 0:
# in class
if params == None or params == '':
params = 'this'
else:
params = 'this, ' + params
else:
if params == None:
params = ''
self.write(self.indent() + 'def ' + id + '(' + params + '):\n')
def op_foreach(self, expr, k, vals):
self.write('\n')
tmp_var_ref = self.tmp_var('r')
tmp_var_l = self.tmp_var('l')
tmp_var_k = self.tmp_var('k')
tmp_var_is_dict = self.tmp_var('b')
self.write(self.indent())
self.write('%s = %s = %s\n' %(tmp_var_ref, tmp_var_l, expr))
self.write(self.indent())
self.write('if type(%s).__name__ == \'dict\': %s=True; %s=%s.iterkeys()\n' %(tmp_var_ref, tmp_var_is_dict, tmp_var_l, tmp_var_ref))
self.write(self.indent())
self.write('else: %s=False;' %tmp_var_is_dict)
if k != None:
self.write('%s=-1' %k)
self.write('\n')
self.write(self.indent())
self.write('for %s in %s:\n' %(tmp_var_k, tmp_var_l))
if k == None:
self.block_depth += 1
self.write(self.indent())
self.write('if %s: %s=%s[%s]\n' %(tmp_var_is_dict, vals, tmp_var_ref, tmp_var_k))
self.write(self.indent())
self.write('else: %s=%s\n' %(vals, tmp_var_k))
self.block_depth -= 1
else:
self.block_depth += 1
self.write(self.indent())
self.write('if %s: %s=%s; %s=%s[%s]\n' %(tmp_var_is_dict, k, tmp_var_k, vals, tmp_var_ref, tmp_var_k))
self.write(self.indent())
self.write('else: %s += 1; %s=%s\n' %(k, vals, tmp_var_k))
self.block_depth -= 1
def op_throw(self, expr):
self.write(self.indent())
self.write('raise %s\n' % expr)
def op_try(self):
self.write(self.indent())
self.write('try:\n')
def op_catch(self, type, var):
self.write(self.indent())
if var == None:
self.write('except %s:\n' % type)
else:
self.write('except %s , %s:\n' %(type, var))
def op_finally(self):
self.write(self.indent())
self.write('finally:\n')
def op_class_enter(self, name, parent):
self.class_stack.append([])
self.class_names.append(name)
self.constructed = False;
self.parent = parent;
self.write(self.indent())
if parent == None:
self.write('class %s(object):\n' % name)
else:
self.write('class %s(%s):\n' % (name, parent))
self.block_depth += 1
self.write(self.indent())
self.write('pass\n')
def op_class_leave(self):
if not self.constructed:
self.op_construct('');
self.class_stack.pop()
self.class_names.pop()
self.write('\n')
self.block_depth -= 1
def op_var_def(self, is_static, id, val):
if is_static:
self.write(self.indent())
if val == None:
s = '%s = None' % id
else:
s = '%s = %s' % (id, val)
self.write(s)
else:
if val == None:
s = 'this.%s = None' % id
else:
s = 'this.%s = %s' % (id, val)
self.class_stack[-1].append(s)
def op_construct(self, params):
self.constructed = True;
self.write('\n')
self.op_function('__init__', params)
self.block_depth += 1
if self.parent and self.parent != 'object':
self.write(self.indent())
self.write('super(' + self.class_names[-1] + ', this).__init__(' + params + ')\n')
for s in self.class_stack[-1]:
self.write(self.indent())
self.write(s + '\n')
self.block_depth -= 1
| bsd-3-clause | -2,541,154,176,707,485,000 | 24.918095 | 133 | 0.59462 | false |
ktosiek/spacewalk | client/tools/rhncfg/actions/ModeControllerCreator.py | 2 | 2565 | #
# Copyright (c) 2008--2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
import string
import ModeController
import Modes
class ModeControllerCreator:
#if mode_list isn't set in the constructor, the populate_list is going to have to be called before create_controller.
def __init__(self, mode_list=None):
self.mode_list = mode_list or []
#A reference to a class obj. This is the type of controller that will be returned by create_controller.
self.controller_class = ModeController.ModeController
#Sets the class that the controller will be instantiated as. The constructor for the class shouldn't have
#to take any parameters.
def set_controller_class(self, controller_class):
self.controller_class = controller_class
#Populates self.mode_list with concrete implementations of Modes.
def populate_list(self, mode_list):
self.mode_list = mode_list
#using the Modes in self.mode_list, create, populate, and return a ModeController
def create_controller(self):
controller = self.controller_class()
for m in self.mode_list:
controller.add_mode(m)
return controller
def get_controller_creator():
if string.find(sys.platform, 'sunos') > -1:
mode_list = [Modes.SolarisDeployMode(), Modes.SolarisDiffMode(), Modes.SolarisUploadMode(), Modes.SolarisMTimeUploadMode(), Modes.SolarisAllMode()]
else:
mode_list = [Modes.DeployMode(), Modes.DiffMode(), Modes.UploadMode(), Modes.MTimeUploadMode(), Modes.AllMode()]
controller = ModeControllerCreator(mode_list=mode_list)
controller.set_controller_class(ModeController.ConfigFilesModeController)
return controller
def get_run_controller_creator():
if string.find(sys.platform, 'sunos') > -1:
mode_list = [Modes.SolarisRunMode(), Modes.SolarisRunAllMode()]
else:
mode_list = [Modes.RunMode(), Modes.RunAllMode()]
controller = ModeControllerCreator(mode_list=mode_list)
return controller
| gpl-2.0 | -7,244,102,569,133,604,000 | 39.078125 | 155 | 0.723587 | false |
WatanabeYasumasa/edx-platform | cms/urls.py | 6 | 6663 | from django.conf import settings
from django.conf.urls import patterns, include, url
from xmodule.modulestore import parsers
# There is a course creators admin table.
from ratelimitbackend import admin
admin.autodiscover()
urlpatterns = patterns('', # nopep8
url(r'^transcripts/upload$', 'contentstore.views.upload_transcripts', name='upload_transcripts'),
url(r'^transcripts/download$', 'contentstore.views.download_transcripts', name='download_transcripts'),
url(r'^transcripts/check$', 'contentstore.views.check_transcripts', name='check_transcripts'),
url(r'^transcripts/choose$', 'contentstore.views.choose_transcripts', name='choose_transcripts'),
url(r'^transcripts/replace$', 'contentstore.views.replace_transcripts', name='replace_transcripts'),
url(r'^transcripts/rename$', 'contentstore.views.rename_transcripts', name='rename_transcripts'),
url(r'^transcripts/save$', 'contentstore.views.save_transcripts', name='save_transcripts'),
url(r'^preview/xblock/(?P<usage_id>.*?)/handler/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$',
'contentstore.views.preview_handler', name='preview_handler'),
url(r'^xblock/(?P<usage_id>.*?)/handler/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$',
'contentstore.views.component_handler', name='component_handler'),
url(r'^xblock/resource/(?P<block_type>[^/]*)/(?P<uri>.*)$',
'contentstore.views.xblock.xblock_resource', name='xblock_resource_url'),
# temporary landing page for a course
url(r'^edge/(?P<org>[^/]+)/(?P<course>[^/]+)/course/(?P<coursename>[^/]+)$',
'contentstore.views.landing', name='landing'),
url(r'^not_found$', 'contentstore.views.not_found', name='not_found'),
url(r'^server_error$', 'contentstore.views.server_error', name='server_error'),
# temporary landing page for edge
url(r'^edge$', 'contentstore.views.edge', name='edge'),
# noop to squelch ajax errors
url(r'^event$', 'contentstore.views.event', name='event'),
url(r'^xmodule/', include('pipeline_js.urls')),
url(r'^heartbeat$', include('heartbeat.urls')),
url(r'^user_api/', include('user_api.urls')),
url(r'^lang_pref/', include('lang_pref.urls')),
)
# User creation and updating views
urlpatterns += patterns(
'',
url(r'^create_account$', 'student.views.create_account', name='create_account'),
url(r'^activate/(?P<key>[^/]*)$', 'student.views.activate_account', name='activate'),
# ajax view that actually does the work
url(r'^login_post$', 'student.views.login_user', name='login_post'),
url(r'^logout$', 'student.views.logout_user', name='logout'),
url(r'^embargo$', 'student.views.embargo', name="embargo"),
)
# restful api
urlpatterns += patterns(
'contentstore.views',
url(r'^$', 'howitworks', name='homepage'),
url(r'^howitworks$', 'howitworks'),
url(r'^signup$', 'signup', name='signup'),
url(r'^signin$', 'login_page', name='login'),
url(r'^request_course_creator$', 'request_course_creator'),
# (?ix) == ignore case and verbose (multiline regex)
url(r'(?ix)^course_team/{}(/)?(?P<email>.+)?$'.format(parsers.URL_RE_SOURCE), 'course_team_handler'),
url(r'(?ix)^course_info/{}$'.format(parsers.URL_RE_SOURCE), 'course_info_handler'),
url(
r'(?ix)^course_info_update/{}(/)?(?P<provided_id>\d+)?$'.format(parsers.URL_RE_SOURCE),
'course_info_update_handler'
),
url(r'(?ix)^course($|/){}$'.format(parsers.URL_RE_SOURCE), 'course_handler'),
url(r'(?ix)^subsection($|/){}$'.format(parsers.URL_RE_SOURCE), 'subsection_handler'),
url(r'(?ix)^unit($|/){}$'.format(parsers.URL_RE_SOURCE), 'unit_handler'),
url(r'(?ix)^container($|/){}$'.format(parsers.URL_RE_SOURCE), 'container_handler'),
url(r'(?ix)^checklists/{}(/)?(?P<checklist_index>\d+)?$'.format(parsers.URL_RE_SOURCE), 'checklists_handler'),
url(r'(?ix)^orphan/{}$'.format(parsers.URL_RE_SOURCE), 'orphan_handler'),
url(r'(?ix)^assets/{}(/)?(?P<asset_id>.+)?$'.format(parsers.URL_RE_SOURCE), 'assets_handler'),
url(r'(?ix)^import/{}$'.format(parsers.URL_RE_SOURCE), 'import_handler'),
url(r'(?ix)^import_status/{}/(?P<filename>.+)$'.format(parsers.URL_RE_SOURCE), 'import_status_handler'),
url(r'(?ix)^export/{}$'.format(parsers.URL_RE_SOURCE), 'export_handler'),
url(r'(?ix)^xblock/{}/(?P<view_name>[^/]+)$'.format(parsers.URL_RE_SOURCE), 'xblock_view_handler'),
url(r'(?ix)^xblock($|/){}$'.format(parsers.URL_RE_SOURCE), 'xblock_handler'),
url(r'(?ix)^tabs/{}$'.format(parsers.URL_RE_SOURCE), 'tabs_handler'),
url(r'(?ix)^settings/details/{}$'.format(parsers.URL_RE_SOURCE), 'settings_handler'),
url(r'(?ix)^settings/grading/{}(/)?(?P<grader_index>\d+)?$'.format(parsers.URL_RE_SOURCE), 'grading_handler'),
url(r'(?ix)^settings/advanced/{}$'.format(parsers.URL_RE_SOURCE), 'advanced_settings_handler'),
url(r'(?ix)^textbooks/{}$'.format(parsers.URL_RE_SOURCE), 'textbooks_list_handler'),
url(r'(?ix)^textbooks/{}/(?P<tid>\d[^/]*)$'.format(parsers.URL_RE_SOURCE), 'textbooks_detail_handler'),
)
js_info_dict = {
'domain': 'djangojs',
# No packages needed, we get LOCALE_PATHS anyway.
'packages': (),
}
urlpatterns += patterns('',
# Serve catalog of localized strings to be rendered by Javascript
url(r'^i18n.js$', 'django.views.i18n.javascript_catalog', js_info_dict),
)
if settings.FEATURES.get('ENABLE_EXPORT_GIT'):
urlpatterns += (url(r'^(?P<org>[^/]+)/(?P<course>[^/]+)/export_git/(?P<name>[^/]+)$',
'contentstore.views.export_git', name='export_git'),)
if settings.FEATURES.get('ENABLE_SERVICE_STATUS'):
urlpatterns += patterns('',
url(r'^status/', include('service_status.urls')),
)
if settings.FEATURES.get('AUTH_USE_CAS'):
urlpatterns += (
url(r'^cas-auth/login/$', 'external_auth.views.cas_login', name="cas-login"),
url(r'^cas-auth/logout/$', 'django_cas.views.logout', {'next_page': '/'}, name="cas-logout"),
)
urlpatterns += patterns('', url(r'^admin/', include(admin.site.urls)),)
# enable automatic login
if settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING'):
urlpatterns += (
url(r'^auto_auth$', 'student.views.auto_auth'),
)
if settings.DEBUG:
try:
from .urls_dev import urlpatterns as dev_urlpatterns
urlpatterns += dev_urlpatterns
except ImportError:
pass
# Custom error pages
# pylint: disable=C0103
handler404 = 'contentstore.views.render_404'
handler500 = 'contentstore.views.render_500'
# display error page templates, for testing purposes
urlpatterns += (
url(r'404', handler404),
url(r'500', handler500),
)
| agpl-3.0 | 7,923,510,241,690,101,000 | 44.02027 | 114 | 0.642653 | false |
Lilykos/inspire-next | inspire/modules/classifier/config.py | 1 | 1143 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2015 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Config for classifier INSPIRE module."""
import os
from invenio.config import CFG_PREFIX
CLASSIFIER_MODEL_PATH = os.path.join(CFG_PREFIX, "var/data/classifier/models")
"""
The base path for classifier models used for predictions.
"""
| gpl-2.0 | -6,344,380,451,561,356,000 | 34.71875 | 78 | 0.755031 | false |
scramblingbalam/Alta_Real | twit_token.py | 1 | 1193 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 02 17:07:02 2017
@author: Colin Drayton
"""
import sys
import nltk
import unicodedata as uniD
import re
# This modul uses python 3.5
# a function that turns tweets into one line tokenized strings
# I wrote this for Doc2vec training but could be useful besides
# the goal is that after y
def compiler(UNI_CATs=['So','Po','Pi'],URL=True,ATs=True,HASHs=True):
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
words = '\w+'
at_tags = '@\w+'
hash_tags = '#\w+'
words_plus ='[^\w\s]+'
expr = []
if URL:
expr.append(url_regex)
expr.append(words)
if ATs:
expr.append(at_tags)
if HASHs:
expr.append(hash_tags)
if UNI_CATs:
uni_cats = UNI_CATs
uni_cats_chars = [c for c in map(chr, range(sys.maxunicode + 1)) if uniD.category(c) in uni_cats]
uni_cats_expr = '[\\' + '\\'.join(uni_cats_chars)+"]"
expr.append(uni_cats_expr)
expr.append(words_plus)
return re.compile("|".join(expr))
def ize(string,regex=compiler()):
tokenizer = nltk.RegexpTokenizer(regex)
return tokenizer.tokenize(string)
| mit | -7,263,662,056,736,341,000 | 26.744186 | 105 | 0.598491 | false |
levibostian/myBlanky | googleAppEngine/google/appengine/tools/devappserver2/admin/datastore_viewer_test.py | 2 | 32568 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for devappserver2.admin.datastore_viewer."""
import datetime
import os
import unittest
import google
import mox
import webapp2
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_stub_util
from google.appengine.tools.devappserver2 import api_server
from google.appengine.tools.devappserver2.admin import admin_request_handler
from google.appengine.tools.devappserver2.admin import datastore_viewer
class PropertyNameToValuesTest(unittest.TestCase):
"""Tests for datastore_viewer._property_name_to_value(s)."""
def setUp(self):
self.app_id = 'myapp'
self.entity1 = datastore.Entity('Kind1', id=123, _app=self.app_id)
self.entity1['cat'] = 5
self.entity1['dog'] = 10
self.entity2 = datastore.Entity('Kind1', id=124, _app=self.app_id)
self.entity2['dog'] = 15
self.entity2['mouse'] = 'happy'
def test_property_name_to_values(self):
self.assertEqual({'cat': [5],
'dog': mox.SameElementsAs([10, 15]),
'mouse': ['happy']},
datastore_viewer._property_name_to_values([self.entity1,
self.entity2]))
def test_property_name_to_value(self):
self.assertEqual({'cat': 5,
'dog': mox.Func(lambda v: v in [10, 15]),
'mouse': 'happy'},
datastore_viewer._property_name_to_value([self.entity1,
self.entity2]))
class GetWriteOpsTest(unittest.TestCase):
"""Tests for DatastoreRequestHandler._get_write_ops."""
def setUp(self):
self.app_id = 'myapp'
os.environ['APPLICATION_ID'] = self.app_id
# Use a consistent replication strategy so the puts done in the test code
# are seen immediately by the queries under test.
consistent_policy = datastore_stub_util.MasterSlaveConsistencyPolicy()
api_server.test_setup_stubs(
app_id=self.app_id,
application_root=None, # Needed to allow index updates.
datastore_consistency=consistent_policy)
def test_no_properties(self):
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
self.assertEquals(
2, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_indexed_properties_no_composite_indexes(self):
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['p1'] = None # 2 writes.
entity['p2'] = None # 2 writes.
entity['p3'] = [1, 2, 3] # 6 writes.
self.assertEquals(
12, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_unindexed_properties_no_composite_indexes(self):
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['u1'] = None # 0 writes.
entity['u2'] = None # 0 writes.
entity['u3'] = [1, 2, 3] # 0 writes.
entity.set_unindexed_properties(('u1', 'u2', 'u3'))
# unindexed properties have no impact on cost
self.assertEquals(
2, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_composite_index(self):
ci = datastore_pb.CompositeIndex()
ci.set_app_id(datastore_types.ResolveAppId(None))
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(0)
index.set_entity_type('Yar')
prop = index.add_property()
prop.set_name('this')
prop.set_direction(prop.ASCENDING)
prop = index.add_property()
prop.set_name('that')
prop.set_direction(prop.DESCENDING)
stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
stub.CreateIndex(ci)
self.assertEquals(1, len(datastore.GetIndexes()))
# no properties, no composite indices.
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
# We only have the 2 built-in index writes because the entity doesn't have
# property values for any of the index properties.
self.assertEquals(
2, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
entity['this'] = 4
# Unindexed property so no additional writes
entity.set_unindexed_properties(('this',))
self.assertEquals(
2, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
entity['that'] = 4
# Unindexed property so no additional writes
entity.set_unindexed_properties(('this', 'that'))
self.assertEquals(
2, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# no indexed property value on 'that'
entity.set_unindexed_properties(('that',))
# 2 writes for the entity.
# 2 writes for the single indexed property.
self.assertEquals(
4, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# indexed property value on both 'this' and 'that'
entity.set_unindexed_properties(())
# 2 writes for the entity
# 4 writes for the indexed properties
# 1 writes for the composite index
self.assertEquals(
7, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# now run tests with null property values
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['this'] = None
# 2 for the entity
# 2 for the single indexed property
self.assertEquals(
4, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
entity['that'] = None
# 2 for the entity
# 4 for the indexed properties
# 1 for the composite index
self.assertEquals(
7, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# now run tests with a repeated property
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['this'] = [1, 2, 3]
# 2 for the entity
# 6 for the indexed properties
self.assertEquals(
8, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
entity['that'] = None
# 2 for the entity
# 8 for the indexed properties
# 3 for the Composite index
self.assertEquals(
13, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
entity['that'] = [4, 5]
# 2 for the entity
# 10 for the indexed properties
# 6 for the Composite index
self.assertEquals(
18, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_composite_index_no_properties(self):
ci = datastore_pb.CompositeIndex()
ci.set_app_id(datastore_types.ResolveAppId(None))
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(0)
index.set_entity_type('Yar')
stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
stub.CreateIndex(ci)
self.assertEquals(1, len(datastore.GetIndexes()))
# no properties, and composite index with no properties.
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
# We have the 2 built-in index writes, and one for the entity key in the
# composite index despite the fact that there are no proerties defined in
# the index.
self.assertEquals(
3, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# now with a repeated property
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['this'] = [1, 2, 3]
# 2 for the entity
# 6 for the indexed properties
# 1 for the composite index
self.assertEquals(
9, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_composite_ancestor_index(self):
ci = datastore_pb.CompositeIndex()
ci.set_app_id(datastore_types.ResolveAppId(None))
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(1)
index.set_entity_type('Yar')
prop = index.add_property()
prop.set_name('this')
prop.set_direction(prop.ASCENDING)
prop = index.add_property()
prop.set_name('that')
prop.set_direction(prop.DESCENDING)
stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
stub.CreateIndex(ci)
self.assertEquals(1, len(datastore.GetIndexes()))
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['this'] = 4
entity['that'] = 4
# 2 for the entity
# 4 for the indexed properties
# 1 for the composite index
self.assertEquals(
7, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# Now use the same entity but give it an ancestor
parent_entity = datastore.Entity('parent', id=123, _app=self.app_id)
entity = datastore.Entity(
'Yar',
parent=parent_entity.key(),
id=123,
_app=self.app_id) # 2 writes.
entity['this'] = 4
entity['that'] = 4
# 2 writes for the entity.
# 4 writes for the indexed properties.
# 2 writes for the composite indices.
self.assertEquals(
8, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# Now use the same entity but give it 2 ancestors.
grandparent_entity = datastore.Entity(
'grandparent', id=123, _app=self.app_id)
parent_entity = datastore.Entity(
'parent', parent=grandparent_entity.key(), id=123, _app=self.app_id)
entity = datastore.Entity(
'Yar',
parent=parent_entity.key(),
id=123,
_app=self.app_id) # 2 writes.
entity['this'] = 4
entity['that'] = 4
# 2 writes for the entity.
# 4 writes for the indexed properties.
# 3 writes for the composite indices.
self.assertEquals(
9, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# Now try it with a multi-value prop
entity['this'] = [None, None, None]
# 2 writes for the entity.
# 8 writes for the indexed properties.
# 9 writes for the composite indices.
self.assertEquals(
19, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# Now try it with 2 multi-value props.
entity['that'] = [None, None]
# 2 writes for the entity.
# 10 writes for the indexed properties.
# 18 writes for the composite indices.
self.assertEquals(
30, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
def test_composite_ancestor_index_no_properties(self):
ci = datastore_pb.CompositeIndex()
ci.set_app_id(datastore_types.ResolveAppId(None))
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(1)
index.set_entity_type('Yar')
stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
stub.CreateIndex(ci)
self.assertEquals(1, len(datastore.GetIndexes()))
entity = datastore.Entity('Yar', id=123, _app=self.app_id) # 2 writes.
entity['this'] = [None, None]
# 2 writes for the entity.
# 4 writes for the indexed properties.
# 1 writes for the composite index.
self.assertEquals(
7, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
# Now use the same entity but give it an ancestor
parent_entity = datastore.Entity('parent', id=123, _app=self.app_id)
entity = datastore.Entity(
'Yar',
parent=parent_entity.key(),
id=123,
_app=self.app_id) # 2 writes.
entity['this'] = [None, None]
# 2 writes for the entity.
# 4 writes for the indexed properties.
# 2 writes for the composite indices.
self.assertEquals(
8, datastore_viewer.DatastoreRequestHandler._get_write_ops(entity))
class GetEntitiesTest(unittest.TestCase):
"""Tests for DatastoreRequestHandler._get_entities."""
def setUp(self):
self.app_id = 'myapp'
os.environ['APPLICATION_ID'] = self.app_id
# Use a consistent replication strategy so the puts done in the test code
# are seen immediately by the queries under test.
consistent_policy = datastore_stub_util.MasterSlaveConsistencyPolicy()
api_server.test_setup_stubs(
app_id=self.app_id,
datastore_consistency=consistent_policy)
self.entity1 = datastore.Entity('Kind1', id=123, _app=self.app_id)
self.entity1['intprop'] = 1
self.entity1['listprop'] = [7, 8, 9]
datastore.Put(self.entity1)
self.entity2 = datastore.Entity('Kind1', id=124, _app=self.app_id)
self.entity2['stringprop'] = 'value2'
self.entity2['listprop'] = [4, 5, 6]
datastore.Put(self.entity2)
self.entity3 = datastore.Entity('Kind1', id=125, _app=self.app_id)
self.entity3['intprop'] = 3
self.entity3['stringprop'] = 'value3'
self.entity3['listprop'] = [1, 2, 3]
datastore.Put(self.entity3)
self.entity4 = datastore.Entity('Kind1', id=126, _app=self.app_id)
self.entity4['intprop'] = 4
self.entity4['stringprop'] = 'value4'
self.entity4['listprop'] = [10, 11, 12]
datastore.Put(self.entity4)
def test_ascending_int_order(self):
entities, total = datastore_viewer._get_entities(kind='Kind1',
namespace='',
order='intprop',
start=0,
count=100)
self.assertEqual([self.entity1, self.entity3, self.entity4], entities)
self.assertEqual(3, total)
def test_decending_string_order(self):
entities, total = datastore_viewer._get_entities(kind='Kind1',
namespace='',
order='-stringprop',
start=0,
count=100)
self.assertEqual([self.entity4, self.entity3, self.entity2], entities)
self.assertEqual(3, total)
def test_start_and_count(self):
entities, total = datastore_viewer._get_entities(kind='Kind1',
namespace='',
order='listprop',
start=1,
count=2)
self.assertEqual([self.entity2, self.entity1], entities)
self.assertEqual(4, total)
class GetEntityTemplateDataTest(unittest.TestCase):
def setUp(self):
self.app_id = 'myapp'
os.environ['APPLICATION_ID'] = self.app_id
# Use a consistent replication strategy so the puts done in the test code
# are seen immediately by the queries under test.
consistent_policy = datastore_stub_util.MasterSlaveConsistencyPolicy()
api_server.test_setup_stubs(
app_id=self.app_id,
datastore_consistency=consistent_policy)
self.entity1 = datastore.Entity('Kind1', id=123, _app=self.app_id)
self.entity1['intprop'] = 1
self.entity1['listprop'] = [7, 8, 9]
datastore.Put(self.entity1)
self.entity2 = datastore.Entity('Kind1', id=124, _app=self.app_id)
self.entity2['stringprop'] = 'value2'
self.entity2['listprop'] = [4, 5, 6]
datastore.Put(self.entity2)
self.entity3 = datastore.Entity('Kind1', id=125, _app=self.app_id)
self.entity3['intprop'] = 3
self.entity3['listprop'] = [1, 2, 3]
datastore.Put(self.entity3)
self.entity4 = datastore.Entity('Kind1', id=126, _app=self.app_id)
self.entity4['intprop'] = 4
self.entity4['stringprop'] = 'value4'
self.entity4['listprop'] = [10, 11]
datastore.Put(self.entity4)
def test(self):
headers, entities, total_entities = (
datastore_viewer.DatastoreRequestHandler._get_entity_template_data(
request_uri='http://next/',
kind='Kind1',
namespace='',
order='intprop',
start=1))
self.assertEqual(
[{'name': 'intprop'}, {'name': 'listprop'}, {'name': 'stringprop'}],
headers)
self.assertEqual(
[{'attributes': [{'name': u'intprop',
'short_value': '3',
'value': '3'},
{'name': u'listprop',
'short_value': mox.Regex(r'\[1L?, 2L?, 3L?\]'),
'value': mox.Regex(r'\[1L?, 2L?, 3L?\]')},
{'name': u'stringprop',
'short_value': '',
'value': ''}],
'edit_uri': '/datastore/edit/{0}?next=http%3A//next/'.format(
self.entity3.key()),
'key': datastore_types.Key.from_path(u'Kind1', 125, _app=u'myapp'),
'key_id': 125,
'key_name': None,
'shortened_key': 'agVteWFw...',
'write_ops': 10},
{'attributes': [{'name': u'intprop',
'short_value': '4',
'value': '4'},
{'name': u'listprop',
'short_value': mox.Regex(r'\[10L?, 11L?\]'),
'value': mox.Regex(r'\[10L?, 11L?\]')},
{'name': u'stringprop',
'short_value': u'value4',
'value': u'value4'}],
'edit_uri': '/datastore/edit/{0}?next=http%3A//next/'.format(
self.entity4.key()),
'key': datastore_types.Key.from_path(u'Kind1', 126, _app=u'myapp'),
'key_id': 126,
'key_name': None,
'shortened_key': 'agVteWFw...',
'write_ops': 10}],
entities)
self.assertEqual(3, total_entities)
class DatastoreRequestHandlerGetTest(unittest.TestCase):
"""Tests for DatastoreRequestHandler.get."""
def setUp(self):
self.app_id = 'myapp'
os.environ['APPLICATION_ID'] = self.app_id
api_server.test_setup_stubs(app_id=self.app_id)
self.mox = mox.Mox()
self.mox.StubOutWithMock(admin_request_handler.AdminRequestHandler,
'render')
def tearDown(self):
self.mox.UnsetStubs()
def test_empty_request_and_empty_datastore(self):
request = webapp2.Request.blank('/datastore')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
handler.render('datastore_viewer.html',
{'entities': [],
'headers': [],
'kind': None,
'kinds': [],
'message': None,
'namespace': '',
'num_pages': 0,
'order': None,
'paging_base_url': '/datastore?',
'order_base_url': '/datastore?',
'page': 1,
'select_namespace_url': '/datastore?namespace=',
'show_namespace': False,
'start': 0,
'total_entities': 0})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
def test_empty_request_and_populated_datastore(self):
entity = datastore.Entity('Kind1', id=123, _app=self.app_id)
entity['intprop'] = 1
entity['listprop'] = [7, 8, 9]
datastore.Put(entity)
request = webapp2.Request.blank('/datastore')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
self.assertEqual(302, response.status_int)
self.assertEqual('http://localhost/datastore?kind=Kind1',
response.location)
def test_kind_request_and_populated_datastore(self):
entity = datastore.Entity('Kind1', id=123, _app=self.app_id)
entity['intprop'] = 1
entity['listprop'] = [7, 8, 9]
datastore.Put(entity)
request = webapp2.Request.blank('/datastore?kind=Kind1')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
handler.render(
'datastore_viewer.html',
{'entities': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'headers': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'kind': 'Kind1',
'kinds': ['Kind1'],
'message': None,
'namespace': '',
'num_pages': 1,
'order': None,
'order_base_url': '/datastore?kind=Kind1',
'page': 1,
'paging_base_url': '/datastore?kind=Kind1',
'select_namespace_url': '/datastore?kind=Kind1&namespace=',
'show_namespace': False,
'start': 0,
'total_entities': 1})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
def test_order_request(self):
entity = datastore.Entity('Kind1', id=123, _app=self.app_id)
entity['intprop'] = 1
entity['listprop'] = [7, 8, 9]
datastore.Put(entity)
request = webapp2.Request.blank(
'/datastore?kind=Kind1&order=intprop')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
handler.render(
'datastore_viewer.html',
{'entities': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'headers': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'kind': 'Kind1',
'kinds': ['Kind1'],
'message': None,
'namespace': '',
'num_pages': 1,
'order': 'intprop',
'order_base_url': '/datastore?kind=Kind1',
'page': 1,
'paging_base_url': '/datastore?kind=Kind1&order=intprop',
'select_namespace_url':
'/datastore?kind=Kind1&namespace=&order=intprop',
'show_namespace': False,
'start': 0,
'total_entities': 1})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
def test_namespace_request(self):
entity = datastore.Entity('Kind1',
id=123,
_app=self.app_id,
_namespace='google')
entity['intprop'] = 1
entity['listprop'] = [7, 8, 9]
datastore.Put(entity)
request = webapp2.Request.blank(
'/datastore?kind=Kind1&namespace=google')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
handler.render(
'datastore_viewer.html',
{'entities': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'headers': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'kind': 'Kind1',
'kinds': ['Kind1'],
'message': None,
'namespace': 'google',
'num_pages': 1,
'order': None,
'order_base_url': '/datastore?kind=Kind1&namespace=google',
'page': 1,
'paging_base_url': '/datastore?kind=Kind1&namespace=google',
'select_namespace_url':
'/datastore?kind=Kind1&namespace=google',
'show_namespace': True,
'start': 0,
'total_entities': 1})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
def test_page_request(self):
for i in range(1000):
entity = datastore.Entity('Kind1', id=i+1, _app=self.app_id)
entity['intprop'] = i
datastore.Put(entity)
request = webapp2.Request.blank(
'/datastore?kind=Kind1&page=3')
response = webapp2.Response()
handler = datastore_viewer.DatastoreRequestHandler(request, response)
handler.render(
'datastore_viewer.html',
{'entities': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'headers': mox.IgnoreArg(), # Tested with _get_entity_template_data.
'kind': 'Kind1',
'kinds': ['Kind1'],
'message': None,
'namespace': '',
'num_pages': 50,
'order': None,
'order_base_url': '/datastore?kind=Kind1&page=3',
'page': 3,
'paging_base_url': '/datastore?kind=Kind1',
'select_namespace_url':
'/datastore?kind=Kind1&namespace=&page=3',
'show_namespace': False,
'start': 40,
'total_entities': 1000})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
class DatastoreEditRequestHandlerTest(unittest.TestCase):
"""Tests for DatastoreEditRequestHandler."""
def setUp(self):
self.app_id = 'myapp'
os.environ['APPLICATION_ID'] = self.app_id
# Use a consistent replication strategy so that the test can use queries
# to verify that an entity was written.
consistent_policy = datastore_stub_util.MasterSlaveConsistencyPolicy()
api_server.test_setup_stubs(
app_id=self.app_id,
datastore_consistency=consistent_policy)
self.mox = mox.Mox()
self.mox.StubOutWithMock(admin_request_handler.AdminRequestHandler,
'render')
self.entity1 = datastore.Entity('Kind1', id=123, _app=self.app_id)
self.entity1['intprop'] = 1
self.entity1['listprop'] = [7, 8, 9]
self.entity1['dateprop'] = datastore_types._OverflowDateTime(2**60)
datastore.Put(self.entity1)
self.entity2 = datastore.Entity('Kind1', id=124, _app=self.app_id)
self.entity2['stringprop'] = 'value2'
self.entity2['listprop'] = [4, 5, 6]
datastore.Put(self.entity2)
self.entity3 = datastore.Entity('Kind1', id=125, _app=self.app_id)
self.entity3['intprop'] = 3
self.entity3['listprop'] = [1, 2, 3]
datastore.Put(self.entity3)
self.entity4 = datastore.Entity('Kind1', id=126, _app=self.app_id)
self.entity4['intprop'] = 4
self.entity4['stringprop'] = 'value4'
self.entity4['listprop'] = [10, 11]
datastore.Put(self.entity4)
self.entity5 = datastore.Entity('Kind1', id=127, _app=self.app_id)
self.entity5['intprop'] = 0
self.entity5['boolprop'] = False
self.entity5['stringprop'] = ''
self.entity5['floatprop'] = 0.0
datastore.Put(self.entity5)
def tearDown(self):
self.mox.UnsetStubs()
def test_get_no_entity_key_string(self):
request = webapp2.Request.blank(
'/datastore/edit?kind=Kind1&next=http://next/')
response = webapp2.Response()
handler = datastore_viewer.DatastoreEditRequestHandler(request, response)
handler.render(
'datastore_edit.html',
{'fields': [('boolprop',
'bool',
mox.Regex('^<select class="bool"(.|\n)*$')),
('dateprop',
'overflowdatetime',
mox.Regex('^<input class="overflowdatetime".*'
'value="".*$')),
('floatprop',
'float',
mox.Regex('^<input class="float".*value="".*$')),
('intprop',
'int',
mox.Regex('^<input class="int".*value="".*$')),
('listprop', 'list', ''),
('stringprop',
'string',
mox.Regex('^<input class="string".*$'))],
'key': None,
'key_id': None,
'key_name': None,
'kind': 'Kind1',
'namespace': '',
'next': 'http://next/',
'parent_key': None,
'parent_key_string': None})
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
def test_get_no_entity_key_string_and_no_entities_in_namespace(self):
request = webapp2.Request.blank(
'/datastore/edit?kind=Kind1&namespace=cat&next=http://next/')
response = webapp2.Response()
handler = datastore_viewer.DatastoreEditRequestHandler(request, response)
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
self.assertEqual(302, response.status_int)
self.assertRegexpMatches(
response.location,
r'/datastore\?kind=Kind1&message=Cannot+.*&namespace=cat')
def test_get_entity_string(self):
request = webapp2.Request.blank(
'/datastore/edit/%s?next=http://next/' % self.entity1.key())
response = webapp2.Response()
handler = datastore_viewer.DatastoreEditRequestHandler(request, response)
handler.render(
'datastore_edit.html',
{'fields': [('dateprop',
'overflowdatetime',
mox.Regex('^<input class="overflowdatetime".*'
'value="1152921504606846976".*$')),
('intprop',
'int',
mox.Regex('^<input class="int".*value="1".*$')),
('listprop', 'list', mox.Regex(r'\[7L?, 8L?, 9L?\]'))],
'key': str(self.entity1.key()),
'key_id': 123,
'key_name': None,
'kind': 'Kind1',
'namespace': '',
'next': 'http://next/',
'parent_key': None,
'parent_key_string': None})
self.mox.ReplayAll()
handler.get(str(self.entity1.key()))
self.mox.VerifyAll()
def test_get_entity_zero_props(self):
request = webapp2.Request.blank(
'/datastore/edit/%s?next=http://next/' % self.entity5.key())
response = webapp2.Response()
handler = datastore_viewer.DatastoreEditRequestHandler(request, response)
handler.render(
'datastore_edit.html',
{'fields': [('boolprop',
'bool',
mox.Regex('^<select class="bool"(.|\n)*$')),
('floatprop',
'float',
mox.Regex('^<input class="float".*value="0\.0".*$')),
('intprop',
'int',
mox.Regex('^<input class="int".*value="0".*$')),
('stringprop',
'string',
mox.Regex('^<input class="string".*value="".*$'))],
'key': str(self.entity5.key()),
'key_id': 127,
'key_name': None,
'kind': 'Kind1',
'namespace': '',
'next': 'http://next/',
'parent_key': None,
'parent_key_string': None})
self.mox.ReplayAll()
handler.get(str(self.entity5.key()))
self.mox.VerifyAll()
def test_post_no_entity_key_string(self):
request = webapp2.Request.blank(
'/datastore/edit',
POST={'kind': 'Kind1',
'overflowdatetime|dateprop': '2009-12-24 23:59:59',
'int|intprop': '123',
'string|stringprop': 'Hello',
'next': 'http://redirect/'})
response = webapp2.Response()
handler = datastore_viewer.DatastoreEditRequestHandler(request, response)
self.mox.ReplayAll()
handler.post()
self.mox.VerifyAll()
self.assertEqual(302, response.status_int)
self.assertEqual('http://redirect/', response.location)
# Check that the entity was added.
query = datastore.Query('Kind1')
query.update({'dateprop': datetime.datetime(2009, 12, 24, 23, 59, 59),
'intprop': 123,
'stringprop': 'Hello'})
self.assertEquals(1, query.Count())
def test_post_entity_key_string(self):
request = webapp2.Request.blank(
'/datastore/edit/%s' % self.entity4.key(),
POST={'overflowdatetime|dateprop': str(2**60),
'int|intprop': '123',
'string|stringprop': '',
'next': 'http://redirect/'})
response = webapp2.Response()
handler = datastore_viewer.DatastoreEditRequestHandler(request, response)
self.mox.ReplayAll()
handler.post(str(self.entity4.key()))
self.mox.VerifyAll()
self.assertEqual(302, response.status_int)
self.assertEqual('http://redirect/', response.location)
# Check that the entity was updated.
entity = datastore.Get(self.entity4.key())
self.assertEqual(2**60, entity['dateprop'])
self.assertEqual(123, entity['intprop'])
self.assertEqual([10, 11], entity['listprop'])
self.assertNotIn('stringprop', entity)
if __name__ == '__main__':
unittest.main()
| mit | -1,818,533,265,572,240,100 | 35.470325 | 79 | 0.594019 | false |
jnewland/home-assistant | homeassistant/components/cmus/media_player.py | 7 | 6748 | """Support for interacting with and controlling the cmus music player."""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_SET)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_PAUSED,
STATE_PLAYING)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'cmus'
DEFAULT_PORT = 3000
SUPPORT_CMUS = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_TURN_OFF | \
SUPPORT_TURN_ON | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_PLAY_MEDIA | SUPPORT_SEEK | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Inclusive(CONF_HOST, 'remote'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'remote'): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discover_info=None):
"""Set up the CMUS platform."""
from pycmus import exceptions
host = config.get(CONF_HOST)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
try:
cmus_remote = CmusDevice(host, password, port, name)
except exceptions.InvalidPassword:
_LOGGER.error("The provided password was rejected by cmus")
return False
add_entities([cmus_remote], True)
class CmusDevice(MediaPlayerDevice):
"""Representation of a running cmus."""
# pylint: disable=no-member
def __init__(self, server, password, port, name):
"""Initialize the CMUS device."""
from pycmus import remote
if server:
self.cmus = remote.PyCmus(
server=server, password=password, port=port)
auto_name = 'cmus-{}'.format(server)
else:
self.cmus = remote.PyCmus()
auto_name = 'cmus-local'
self._name = name or auto_name
self.status = {}
def update(self):
"""Get the latest data and update the state."""
status = self.cmus.get_status_dict()
if not status:
_LOGGER.warning("Received no status from cmus")
else:
self.status = status
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self.status.get('status') == 'playing':
return STATE_PLAYING
if self.status.get('status') == 'paused':
return STATE_PAUSED
return STATE_OFF
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.status.get('file')
@property
def content_type(self):
"""Content type of the current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self.status.get('duration')
@property
def media_title(self):
"""Title of current playing media."""
return self.status['tag'].get('title')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self.status['tag'].get('artist')
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return self.status['tag'].get('tracknumber')
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self.status['tag'].get('album')
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return self.status['tag'].get('albumartist')
@property
def volume_level(self):
"""Return the volume level."""
left = self.status['set'].get('vol_left')[0]
right = self.status['set'].get('vol_right')[0]
if left != right:
volume = float(left + right) / 2
else:
volume = left
return int(volume)/100
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_CMUS
def turn_off(self):
"""Service to send the CMUS the command to stop playing."""
self.cmus.player_stop()
def turn_on(self):
"""Service to send the CMUS the command to start playing."""
self.cmus.player_play()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.cmus.set_volume(int(volume * 100))
def volume_up(self):
"""Set the volume up."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) + 5)
def volume_down(self):
"""Set the volume down."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) - 5)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play command."""
if media_type in [MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST]:
self.cmus.player_play_file(media_id)
else:
_LOGGER.error(
"Invalid media type %s. Only %s and %s are supported",
media_type, MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST)
def media_pause(self):
"""Send the pause command."""
self.cmus.player_pause()
def media_next_track(self):
"""Send next track command."""
self.cmus.player_next()
def media_previous_track(self):
"""Send next track command."""
self.cmus.player_prev()
def media_seek(self, position):
"""Send seek command."""
self.cmus.seek(position)
def media_play(self):
"""Send the play command."""
self.cmus.player_play()
def media_stop(self):
"""Send the stop command."""
self.cmus.stop()
| apache-2.0 | 9,002,669,516,285,687,000 | 30.53271 | 76 | 0.606846 | false |
czgu/metaHack | env/lib/python2.7/site-packages/PIL/ImageTransform.py | 71 | 2876 | #
# The Python Imaging Library.
# $Id$
#
# transform wrappers
#
# History:
# 2002-04-08 fl Created
#
# Copyright (c) 2002 by Secret Labs AB
# Copyright (c) 2002 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
class Transform(Image.ImageTransformHandler):
def __init__(self, data):
self.data = data
def getdata(self):
return self.method, self.data
def transform(self, size, image, **options):
# can be overridden
method, data = self.getdata()
return image.transform(size, method, data, **options)
##
# Define an affine image transform.
# <p>
# This function takes a 6-tuple (<i>a, b, c, d, e, f</i>) which
# contain the first two rows from an affine transform matrix. For
# each pixel (<i>x, y</i>) in the output image, the new value is
# taken from a position (a <i>x</i> + b <i>y</i> + c,
# d <i>x</i> + e <i>y</i> + f) in the input image, rounded to
# nearest pixel.
# <p>
# This function can be used to scale, translate, rotate, and shear the
# original image.
#
# @def AffineTransform(matrix)
# @param matrix A 6-tuple (<i>a, b, c, d, e, f</i>) containing
# the first two rows from an affine transform matrix.
# @see Image#Image.transform
class AffineTransform(Transform):
method = Image.AFFINE
##
# Define a transform to extract a subregion from an image.
# <p>
# Maps a rectangle (defined by two corners) from the image to a
# rectangle of the given size. The resulting image will contain
# data sampled from between the corners, such that (<i>x0, y0</i>)
# in the input image will end up at (0,0) in the output image,
# and (<i>x1, y1</i>) at <i>size</i>.
# <p>
# This method can be used to crop, stretch, shrink, or mirror an
# arbitrary rectangle in the current image. It is slightly slower than
# <b>crop</b>, but about as fast as a corresponding <b>resize</b>
# operation.
#
# @def ExtentTransform(bbox)
# @param bbox A 4-tuple (<i>x0, y0, x1, y1</i>) which specifies
# two points in the input image's coordinate system.
# @see Image#Image.transform
class ExtentTransform(Transform):
method = Image.EXTENT
##
# Define an quad image transform.
# <p>
# Maps a quadrilateral (a region defined by four corners) from the
# image to a rectangle of the given size.
#
# @def QuadTransform(xy)
# @param xy An 8-tuple (<i>x0, y0, x1, y1, x2, y2, y3, y3</i>) which
# contain the upper left, lower left, lower right, and upper right
# corner of the source quadrilateral.
# @see Image#Image.transform
class QuadTransform(Transform):
method = Image.QUAD
##
# Define an mesh image transform. A mesh transform consists of one
# or more individual quad transforms.
#
# @def MeshTransform(data)
# @param data A list of (bbox, quad) tuples.
# @see Image#Image.transform
class MeshTransform(Transform):
method = Image.MESH
| apache-2.0 | -6,154,573,690,946,047,000 | 26.92233 | 70 | 0.684631 | false |
171121130/SWI | venv/Lib/site-packages/sqlalchemy/ext/declarative/base.py | 5 | 25290 | # ext/declarative/base.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Internal implementation for declarative."""
from ...schema import Table, Column
from ...orm import mapper, class_mapper, synonym
from ...orm.interfaces import MapperProperty
from ...orm.properties import ColumnProperty, CompositeProperty
from ...orm.attributes import QueryableAttribute
from ...orm.base import _is_mapped_class
from ... import util, exc
from ...util import topological
from ...sql import expression
from ... import event
from . import clsregistry
import collections
import weakref
from sqlalchemy.orm import instrumentation
declared_attr = declarative_props = None
def _declared_mapping_info(cls):
# deferred mapping
if _DeferredMapperConfig.has_cls(cls):
return _DeferredMapperConfig.config_for_cls(cls)
# regular mapping
elif _is_mapped_class(cls):
return class_mapper(cls, configure=False)
else:
return None
def _resolve_for_abstract(cls):
if cls is object:
return None
if _get_immediate_cls_attr(cls, '__abstract__', strict=True):
for sup in cls.__bases__:
sup = _resolve_for_abstract(sup)
if sup is not None:
return sup
else:
return None
else:
return cls
def _get_immediate_cls_attr(cls, attrname, strict=False):
"""return an attribute of the class that is either present directly
on the class, e.g. not on a superclass, or is from a superclass but
this superclass is a mixin, that is, not a descendant of
the declarative base.
This is used to detect attributes that indicate something about
a mapped class independently from any mapped classes that it may
inherit from.
"""
if not issubclass(cls, object):
return None
for base in cls.__mro__:
_is_declarative_inherits = hasattr(base, '_decl_class_registry')
if attrname in base.__dict__ and (
base is cls or
((base in cls.__bases__ if strict else True)
and not _is_declarative_inherits)
):
return getattr(base, attrname)
else:
return None
def _as_declarative(cls, classname, dict_):
global declared_attr, declarative_props
if declared_attr is None:
from .api import declared_attr
declarative_props = (declared_attr, util.classproperty)
if _get_immediate_cls_attr(cls, '__abstract__', strict=True):
return
_MapperConfig.setup_mapping(cls, classname, dict_)
class _MapperConfig(object):
@classmethod
def setup_mapping(cls, cls_, classname, dict_):
defer_map = _get_immediate_cls_attr(
cls_, '_sa_decl_prepare_nocascade', strict=True) or \
hasattr(cls_, '_sa_decl_prepare')
if defer_map:
cfg_cls = _DeferredMapperConfig
else:
cfg_cls = _MapperConfig
cfg_cls(cls_, classname, dict_)
def __init__(self, cls_, classname, dict_):
self.cls = cls_
# dict_ will be a dictproxy, which we can't write to, and we need to!
self.dict_ = dict(dict_)
self.classname = classname
self.mapped_table = None
self.properties = util.OrderedDict()
self.declared_columns = set()
self.column_copies = {}
self._setup_declared_events()
# temporary registry. While early 1.0 versions
# set up the ClassManager here, by API contract
# we can't do that until there's a mapper.
self.cls._sa_declared_attr_reg = {}
self._scan_attributes()
clsregistry.add_class(self.classname, self.cls)
self._extract_mappable_attributes()
self._extract_declared_columns()
self._setup_table()
self._setup_inheritance()
self._early_mapping()
def _early_mapping(self):
self.map()
def _setup_declared_events(self):
if _get_immediate_cls_attr(self.cls, '__declare_last__'):
@event.listens_for(mapper, "after_configured")
def after_configured():
self.cls.__declare_last__()
if _get_immediate_cls_attr(self.cls, '__declare_first__'):
@event.listens_for(mapper, "before_configured")
def before_configured():
self.cls.__declare_first__()
def _scan_attributes(self):
cls = self.cls
dict_ = self.dict_
column_copies = self.column_copies
mapper_args_fn = None
table_args = inherited_table_args = None
tablename = None
for base in cls.__mro__:
class_mapped = base is not cls and \
_declared_mapping_info(base) is not None and \
not _get_immediate_cls_attr(
base, '_sa_decl_prepare_nocascade', strict=True)
if not class_mapped and base is not cls:
self._produce_column_copies(base)
for name, obj in vars(base).items():
if name == '__mapper_args__':
if not mapper_args_fn and (
not class_mapped or
isinstance(obj, declarative_props)
):
# don't even invoke __mapper_args__ until
# after we've determined everything about the
# mapped table.
# make a copy of it so a class-level dictionary
# is not overwritten when we update column-based
# arguments.
mapper_args_fn = lambda: dict(cls.__mapper_args__)
elif name == '__tablename__':
if not tablename and (
not class_mapped or
isinstance(obj, declarative_props)
):
tablename = cls.__tablename__
elif name == '__table_args__':
if not table_args and (
not class_mapped or
isinstance(obj, declarative_props)
):
table_args = cls.__table_args__
if not isinstance(
table_args, (tuple, dict, type(None))):
raise exc.ArgumentError(
"__table_args__ value must be a tuple, "
"dict, or None")
if base is not cls:
inherited_table_args = True
elif class_mapped:
if isinstance(obj, declarative_props):
util.warn("Regular (i.e. not __special__) "
"attribute '%s.%s' uses @declared_attr, "
"but owning class %s is mapped - "
"not applying to subclass %s."
% (base.__name__, name, base, cls))
continue
elif base is not cls:
# we're a mixin, abstract base, or something that is
# acting like that for now.
if isinstance(obj, Column):
# already copied columns to the mapped class.
continue
elif isinstance(obj, MapperProperty):
raise exc.InvalidRequestError(
"Mapper properties (i.e. deferred,"
"column_property(), relationship(), etc.) must "
"be declared as @declared_attr callables "
"on declarative mixin classes.")
elif isinstance(obj, declarative_props):
oldclassprop = isinstance(obj, util.classproperty)
if not oldclassprop and obj._cascading:
dict_[name] = column_copies[obj] = \
ret = obj.__get__(obj, cls)
setattr(cls, name, ret)
else:
if oldclassprop:
util.warn_deprecated(
"Use of sqlalchemy.util.classproperty on "
"declarative classes is deprecated.")
dict_[name] = column_copies[obj] = \
ret = getattr(cls, name)
if isinstance(ret, (Column, MapperProperty)) and \
ret.doc is None:
ret.doc = obj.__doc__
if inherited_table_args and not tablename:
table_args = None
self.table_args = table_args
self.tablename = tablename
self.mapper_args_fn = mapper_args_fn
def _produce_column_copies(self, base):
cls = self.cls
dict_ = self.dict_
column_copies = self.column_copies
# copy mixin columns to the mapped class
for name, obj in vars(base).items():
if isinstance(obj, Column):
if getattr(cls, name) is not obj:
# if column has been overridden
# (like by the InstrumentedAttribute of the
# superclass), skip
continue
elif obj.foreign_keys:
raise exc.InvalidRequestError(
"Columns with foreign keys to other columns "
"must be declared as @declared_attr callables "
"on declarative mixin classes. ")
elif name not in dict_ and not (
'__table__' in dict_ and
(obj.name or name) in dict_['__table__'].c
):
column_copies[obj] = copy_ = obj.copy()
copy_._creation_order = obj._creation_order
setattr(cls, name, copy_)
dict_[name] = copy_
def _extract_mappable_attributes(self):
cls = self.cls
dict_ = self.dict_
our_stuff = self.properties
for k in list(dict_):
if k in ('__table__', '__tablename__', '__mapper_args__'):
continue
value = dict_[k]
if isinstance(value, declarative_props):
value = getattr(cls, k)
elif isinstance(value, QueryableAttribute) and \
value.class_ is not cls and \
value.key != k:
# detect a QueryableAttribute that's already mapped being
# assigned elsewhere in userland, turn into a synonym()
value = synonym(value.key)
setattr(cls, k, value)
if (isinstance(value, tuple) and len(value) == 1 and
isinstance(value[0], (Column, MapperProperty))):
util.warn("Ignoring declarative-like tuple value of attribute "
"%s: possibly a copy-and-paste error with a comma "
"left at the end of the line?" % k)
continue
elif not isinstance(value, (Column, MapperProperty)):
# using @declared_attr for some object that
# isn't Column/MapperProperty; remove from the dict_
# and place the evaluated value onto the class.
if not k.startswith('__'):
dict_.pop(k)
setattr(cls, k, value)
continue
# we expect to see the name 'metadata' in some valid cases;
# however at this point we see it's assigned to something trying
# to be mapped, so raise for that.
elif k == 'metadata':
raise exc.InvalidRequestError(
"Attribute name 'metadata' is reserved "
"for the MetaData instance when using a "
"declarative base class."
)
prop = clsregistry._deferred_relationship(cls, value)
our_stuff[k] = prop
def _extract_declared_columns(self):
our_stuff = self.properties
# set up attributes in the order they were created
our_stuff.sort(key=lambda key: our_stuff[key]._creation_order)
# extract columns from the class dict
declared_columns = self.declared_columns
name_to_prop_key = collections.defaultdict(set)
for key, c in list(our_stuff.items()):
if isinstance(c, (ColumnProperty, CompositeProperty)):
for col in c.columns:
if isinstance(col, Column) and \
col.table is None:
_undefer_column_name(key, col)
if not isinstance(c, CompositeProperty):
name_to_prop_key[col.name].add(key)
declared_columns.add(col)
elif isinstance(c, Column):
_undefer_column_name(key, c)
name_to_prop_key[c.name].add(key)
declared_columns.add(c)
# if the column is the same name as the key,
# remove it from the explicit properties dict.
# the normal rules for assigning column-based properties
# will take over, including precedence of columns
# in multi-column ColumnProperties.
if key == c.key:
del our_stuff[key]
for name, keys in name_to_prop_key.items():
if len(keys) > 1:
util.warn(
"On class %r, Column object %r named "
"directly multiple times, "
"only one will be used: %s. "
"Consider using orm.synonym instead" %
(self.classname, name, (", ".join(sorted(keys))))
)
def _setup_table(self):
cls = self.cls
tablename = self.tablename
table_args = self.table_args
dict_ = self.dict_
declared_columns = self.declared_columns
declared_columns = self.declared_columns = sorted(
declared_columns, key=lambda c: c._creation_order)
table = None
if hasattr(cls, '__table_cls__'):
table_cls = util.unbound_method_to_callable(cls.__table_cls__)
else:
table_cls = Table
if '__table__' not in dict_:
if tablename is not None:
args, table_kw = (), {}
if table_args:
if isinstance(table_args, dict):
table_kw = table_args
elif isinstance(table_args, tuple):
if isinstance(table_args[-1], dict):
args, table_kw = table_args[0:-1], table_args[-1]
else:
args = table_args
autoload = dict_.get('__autoload__')
if autoload:
table_kw['autoload'] = True
cls.__table__ = table = table_cls(
tablename, cls.metadata,
*(tuple(declared_columns) + tuple(args)),
**table_kw)
else:
table = cls.__table__
if declared_columns:
for c in declared_columns:
if not table.c.contains_column(c):
raise exc.ArgumentError(
"Can't add additional column %r when "
"specifying __table__" % c.key
)
self.local_table = table
def _setup_inheritance(self):
table = self.local_table
cls = self.cls
table_args = self.table_args
declared_columns = self.declared_columns
for c in cls.__bases__:
c = _resolve_for_abstract(c)
if c is None:
continue
if _declared_mapping_info(c) is not None and \
not _get_immediate_cls_attr(
c, '_sa_decl_prepare_nocascade', strict=True):
self.inherits = c
break
else:
self.inherits = None
if table is None and self.inherits is None and \
not _get_immediate_cls_attr(cls, '__no_table__'):
raise exc.InvalidRequestError(
"Class %r does not have a __table__ or __tablename__ "
"specified and does not inherit from an existing "
"table-mapped class." % cls
)
elif self.inherits:
inherited_mapper = _declared_mapping_info(self.inherits)
inherited_table = inherited_mapper.local_table
inherited_mapped_table = inherited_mapper.mapped_table
if table is None:
# single table inheritance.
# ensure no table args
if table_args:
raise exc.ArgumentError(
"Can't place __table_args__ on an inherited class "
"with no table."
)
# add any columns declared here to the inherited table.
for c in declared_columns:
if c.primary_key:
raise exc.ArgumentError(
"Can't place primary key columns on an inherited "
"class with no table."
)
if c.name in inherited_table.c:
if inherited_table.c[c.name] is c:
continue
raise exc.ArgumentError(
"Column '%s' on class %s conflicts with "
"existing column '%s'" %
(c, cls, inherited_table.c[c.name])
)
inherited_table.append_column(c)
if inherited_mapped_table is not None and \
inherited_mapped_table is not inherited_table:
inherited_mapped_table._refresh_for_new_column(c)
def _prepare_mapper_arguments(self):
properties = self.properties
if self.mapper_args_fn:
mapper_args = self.mapper_args_fn()
else:
mapper_args = {}
# make sure that column copies are used rather
# than the original columns from any mixins
for k in ('version_id_col', 'polymorphic_on',):
if k in mapper_args:
v = mapper_args[k]
mapper_args[k] = self.column_copies.get(v, v)
assert 'inherits' not in mapper_args, \
"Can't specify 'inherits' explicitly with declarative mappings"
if self.inherits:
mapper_args['inherits'] = self.inherits
if self.inherits and not mapper_args.get('concrete', False):
# single or joined inheritance
# exclude any cols on the inherited table which are
# not mapped on the parent class, to avoid
# mapping columns specific to sibling/nephew classes
inherited_mapper = _declared_mapping_info(self.inherits)
inherited_table = inherited_mapper.local_table
if 'exclude_properties' not in mapper_args:
mapper_args['exclude_properties'] = exclude_properties = \
set([c.key for c in inherited_table.c
if c not in inherited_mapper._columntoproperty])
exclude_properties.difference_update(
[c.key for c in self.declared_columns])
# look through columns in the current mapper that
# are keyed to a propname different than the colname
# (if names were the same, we'd have popped it out above,
# in which case the mapper makes this combination).
# See if the superclass has a similar column property.
# If so, join them together.
for k, col in list(properties.items()):
if not isinstance(col, expression.ColumnElement):
continue
if k in inherited_mapper._props:
p = inherited_mapper._props[k]
if isinstance(p, ColumnProperty):
# note here we place the subclass column
# first. See [ticket:1892] for background.
properties[k] = [col] + p.columns
result_mapper_args = mapper_args.copy()
result_mapper_args['properties'] = properties
self.mapper_args = result_mapper_args
def map(self):
self._prepare_mapper_arguments()
if hasattr(self.cls, '__mapper_cls__'):
mapper_cls = util.unbound_method_to_callable(
self.cls.__mapper_cls__)
else:
mapper_cls = mapper
self.cls.__mapper__ = mp_ = mapper_cls(
self.cls,
self.local_table,
**self.mapper_args
)
del self.cls._sa_declared_attr_reg
return mp_
class _DeferredMapperConfig(_MapperConfig):
_configs = util.OrderedDict()
def _early_mapping(self):
pass
@property
def cls(self):
return self._cls()
@cls.setter
def cls(self, class_):
self._cls = weakref.ref(class_, self._remove_config_cls)
self._configs[self._cls] = self
@classmethod
def _remove_config_cls(cls, ref):
cls._configs.pop(ref, None)
@classmethod
def has_cls(cls, class_):
# 2.6 fails on weakref if class_ is an old style class
return isinstance(class_, type) and \
weakref.ref(class_) in cls._configs
@classmethod
def config_for_cls(cls, class_):
return cls._configs[weakref.ref(class_)]
@classmethod
def classes_for_base(cls, base_cls, sort=True):
classes_for_base = [m for m in cls._configs.values()
if issubclass(m.cls, base_cls)]
if not sort:
return classes_for_base
all_m_by_cls = dict(
(m.cls, m)
for m in classes_for_base
)
tuples = []
for m_cls in all_m_by_cls:
tuples.extend(
(all_m_by_cls[base_cls], all_m_by_cls[m_cls])
for base_cls in m_cls.__bases__
if base_cls in all_m_by_cls
)
return list(
topological.sort(
tuples,
classes_for_base
)
)
def map(self):
self._configs.pop(self._cls, None)
return super(_DeferredMapperConfig, self).map()
def _add_attribute(cls, key, value):
"""add an attribute to an existing declarative class.
This runs through the logic to determine MapperProperty,
adds it to the Mapper, adds a column to the mapped Table, etc.
"""
if '__mapper__' in cls.__dict__:
if isinstance(value, Column):
_undefer_column_name(key, value)
cls.__table__.append_column(value)
cls.__mapper__.add_property(key, value)
elif isinstance(value, ColumnProperty):
for col in value.columns:
if isinstance(col, Column) and col.table is None:
_undefer_column_name(key, col)
cls.__table__.append_column(col)
cls.__mapper__.add_property(key, value)
elif isinstance(value, MapperProperty):
cls.__mapper__.add_property(
key,
clsregistry._deferred_relationship(cls, value)
)
elif isinstance(value, QueryableAttribute) and value.key != key:
# detect a QueryableAttribute that's already mapped being
# assigned elsewhere in userland, turn into a synonym()
value = synonym(value.key)
cls.__mapper__.add_property(
key,
clsregistry._deferred_relationship(cls, value)
)
else:
type.__setattr__(cls, key, value)
else:
type.__setattr__(cls, key, value)
def _declarative_constructor(self, **kwargs):
"""A simple constructor that allows initialization from kwargs.
Sets attributes on the constructed instance using the names and
values in ``kwargs``.
Only keys that are present as
attributes of the instance's class are allowed. These could be,
for example, any mapped columns or relationships.
"""
cls_ = type(self)
for k in kwargs:
if not hasattr(cls_, k):
raise TypeError(
"%r is an invalid keyword argument for %s" %
(k, cls_.__name__))
setattr(self, k, kwargs[k])
_declarative_constructor.__name__ = '__init__'
def _undefer_column_name(key, column):
if column.key is None:
column.key = key
if column.name is None:
column.name = key
| mit | -7,958,241,894,453,305,000 | 37.43465 | 79 | 0.520285 | false |
koomik/CouchPotatoServer | libs/tornado/auth.py | 16 | 59831 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module contains implementations of various third-party
authentication schemes.
All the classes in this file are class mixins designed to be used with
the `tornado.web.RequestHandler` class. They are used in two ways:
* On a login handler, use methods such as ``authenticate_redirect()``,
``authorize_redirect()``, and ``get_authenticated_user()`` to
establish the user's identity and store authentication tokens to your
database and/or cookies.
* In non-login handlers, use methods such as ``facebook_request()``
or ``twitter_request()`` to use the authentication tokens to make
requests to the respective services.
They all take slightly different arguments due to the fact all these
services implement authentication and authorization slightly differently.
See the individual service classes below for complete documentation.
Example usage for Google OpenID::
class GoogleLoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("openid.mode", None):
user = yield self.get_authenticated_user()
# Save the user with e.g. set_secure_cookie()
else:
yield self.authenticate_redirect()
"""
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
import functools
import hashlib
import hmac
import time
import uuid
from tornado.concurrent import TracebackFuture, chain_future, return_future
from tornado import gen
from tornado import httpclient
from tornado import escape
from tornado.httputil import url_concat
from tornado.log import gen_log
from tornado.util import bytes_type, u, unicode_type, ArgReplacer
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
class AuthError(Exception):
pass
def _auth_future_to_callback(callback, future):
try:
result = future.result()
except AuthError as e:
gen_log.warning(str(e))
result = None
callback(result)
def _auth_return_future(f):
"""Similar to tornado.concurrent.return_future, but uses the auth
module's legacy callback interface.
Note that when using this decorator the ``callback`` parameter
inside the function will actually be a future.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
callback, args, kwargs = replacer.replace(future, args, kwargs)
if callback is not None:
future.add_done_callback(
functools.partial(_auth_future_to_callback, callback))
f(*args, **kwargs)
return future
return wrapper
class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
See `GoogleMixin` below for a customized example (which also
includes OAuth support).
Class attributes:
* ``_OPENID_ENDPOINT``: the identity provider's URI.
"""
@return_future
def authenticate_redirect(self, callback_uri=None,
ax_attrs=["name", "email", "language", "username"],
callback=None):
"""Redirects to the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI with additional parameters including ``openid.mode``.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
callback()
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the `authenticate_redirect()` method (which is
often the same as the one that calls it; in that case you would
call `get_authenticated_user` if the ``openid.mode`` parameter
is present and `authenticate_redirect` if it is not).
The result of this method will generally be used to set a cookie.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.items())
args["openid.mode"] = u("check_authentication")
url = self._OPENID_ENDPOINT
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(url, self.async_callback(
self._on_authentication_verified, callback),
method="POST", body=urllib_parse.urlencode(args))
def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
url = urlparse.urljoin(self.request.full_url(), callback_uri)
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": urlparse.urljoin(url, '/'),
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update({
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname":
"http://axschema.org/namePerson/first",
"openid.ax.type.fullname":
"http://axschema.org/namePerson",
"openid.ax.type.lastname":
"http://axschema.org/namePerson/last",
})
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth":
"http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": self.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, future, response):
if response.error or b"is_valid:true" not in response.body:
future.set_exception(AuthError(
"Invalid OpenID response: %s" % (response.error or
response.body)))
return
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for name in self.request.arguments:
if name.startswith("openid.ns.") and \
self.get_argument(name) == u("http://openid.net/srv/ax/1.0"):
ax_ns = name[10:]
break
def get_ax_arg(uri):
if not ax_ns:
return u("")
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name in self.request.arguments.keys():
if self.get_argument(name) == uri and name.startswith(prefix):
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name:
return u("")
return self.get_argument(ax_name, u(""))
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = u(" ").join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email:
user["email"] = email
if locale:
user["locale"] = locale
if username:
user["username"] = username
claimed_id = self.get_argument("openid.claimed_id", None)
if claimed_id:
user["claimed_id"] = claimed_id
future.set_result(user)
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuthMixin(object):
"""Abstract implementation of OAuth 1.0 and 1.0a.
See `TwitterMixin` and `FriendFeedMixin` below for example implementations,
or `GoogleMixin` for an OAuth/OpenID hybrid.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url.
* ``_OAUTH_VERSION``: May be either "1.0" or "1.0a".
* ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires
advance registration of callbacks.
Subclasses must also override the `_oauth_get_user_future` and
`_oauth_consumer_token` methods.
"""
@return_future
def authorize_redirect(self, callback_uri=None, extra_params=None,
http_client=None, callback=None):
"""Redirects the user to obtain OAuth authorization for this service.
The ``callback_uri`` may be omitted if you have previously
registered a callback URI with the third-party service. For
some sevices (including Friendfeed), you must use a
previously-registered callback URI and cannot specify a
callback via this method.
This method sets a cookie called ``_oauth_request_token`` which is
subsequently used (and cleared) in `get_authenticated_user` for
security purposes.
Note that this method is asynchronous, although it calls
`.RequestHandler.finish` for you so it may not be necessary
to pass a callback or use the `.Future` it returns. However,
if this method is called from a function decorated with
`.gen.coroutine`, you must call it with ``yield`` to keep the
response from being closed prematurely.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
if http_client is None:
http_client = self.get_auth_http_client()
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
http_client.fetch(
self._oauth_request_token_url(callback_uri=callback_uri,
extra_params=extra_params),
self.async_callback(
self._on_request_token,
self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
else:
http_client.fetch(
self._oauth_request_token_url(),
self.async_callback(
self._on_request_token, self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Gets the OAuth authorized user and access token.
This method should be called from the handler for your
OAuth callback URL to complete the registration process. We run the
callback with the authenticated user dictionary. This dictionary
will contain an ``access_key`` which can be used to make authorized
requests to this service on behalf of the user. The dictionary will
also contain other fields such as ``name``, depending on the service
used.
"""
future = callback
request_key = escape.utf8(self.get_argument("oauth_token"))
oauth_verifier = self.get_argument("oauth_verifier", None)
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
future.set_exception(AuthError(
"Missing OAuth request token cookie"))
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key:
future.set_exception(AuthError(
"Request token does not match cookie"))
return
token = dict(key=cookie_key, secret=cookie_secret)
if oauth_verifier:
token["verifier"] = oauth_verifier
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
def _oauth_request_token_url(self, callback_uri=None, extra_params=None):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
if callback_uri == "oob":
args["oauth_callback"] = "oob"
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
if extra_params:
args.update(extra_params)
signature = _oauth10a_signature(consumer_token, "GET", url, args)
else:
signature = _oauth_signature(consumer_token, "GET", url, args)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_request_token(self, authorize_url, callback_uri, callback,
response):
if response.error:
raise Exception("Could not get request token: %s" % response.error)
request_token = _oauth_parse_response(response.body)
data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" +
base64.b64encode(escape.utf8(request_token["secret"])))
self.set_cookie("_oauth_request_token", data)
args = dict(oauth_token=request_token["key"])
if callback_uri == "oob":
self.finish(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
return
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
self.redirect(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
def _oauth_access_token_url(self, request_token):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(request_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if "verifier" in request_token:
args["oauth_verifier"] = request_token["verifier"]
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, "GET", url, args,
request_token)
else:
signature = _oauth_signature(consumer_token, "GET", url, args,
request_token)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_access_token(self, future, response):
if response.error:
future.set_exception(AuthError("Could not fetch access token"))
return
access_token = _oauth_parse_response(response.body)
self._oauth_get_user_future(access_token).add_done_callback(
self.async_callback(self._on_oauth_get_user, access_token, future))
def _oauth_consumer_token(self):
"""Subclasses must override this to return their OAuth consumer keys.
The return value should be a `dict` with keys ``key`` and ``secret``.
"""
raise NotImplementedError()
@return_future
def _oauth_get_user_future(self, access_token, callback):
"""Subclasses must override this to get basic information about the
user.
Should return a `.Future` whose result is a dictionary
containing information about the user, which may have been
retrieved by using ``access_token`` to make a request to the
service.
The access token will be added to the returned dictionary to make
the result of `get_authenticated_user`.
For backwards compatibility, the callback-based ``_oauth_get_user``
method is also supported.
"""
# By default, call the old-style _oauth_get_user, but new code
# should override this method instead.
self._oauth_get_user(access_token, callback)
def _oauth_get_user(self, access_token, callback):
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, future, user_future):
if user_future.exception() is not None:
future.set_exception(user_future.exception())
return
user = user_future.result()
if not user:
future.set_exception(AuthError("Error getting user"))
return
user["access_token"] = access_token
future.set_result(user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(access_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
args = {}
args.update(base_args)
args.update(parameters)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, method, url, args,
access_token)
else:
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args["oauth_signature"] = escape.to_basestring(signature)
return base_args
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuth2Mixin(object):
"""Abstract implementation of OAuth 2.0.
See `FacebookGraphMixin` below for an example implementation.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url.
"""
@return_future
def authorize_redirect(self, redirect_uri=None, client_id=None,
client_secret=None, extra_params=None,
callback=None, scope=None, response_type="code"):
"""Redirects the user to obtain OAuth authorization for this service.
Some providers require that you register a redirect URL with
your application instead of passing one via this method. You
should call this method to log the user in, and then call
``get_authenticated_user`` in the handler for your
redirect URL to complete the authorization process.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
args = {
"redirect_uri": redirect_uri,
"client_id": client_id,
"response_type": response_type
}
if extra_params:
args.update(extra_params)
if scope:
args['scope'] = ' '.join(scope)
self.redirect(
url_concat(self._OAUTH_AUTHORIZE_URL, args))
callback()
def _oauth_request_token_url(self, redirect_uri=None, client_id=None,
client_secret=None, code=None,
extra_params=None):
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
redirect_uri=redirect_uri,
code=code,
client_id=client_id,
client_secret=client_secret,
)
if extra_params:
args.update(extra_params)
return url_concat(url, args)
class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key
and Consumer Secret to the application
`~tornado.web.Application.settings` ``twitter_consumer_key`` and
``twitter_consumer_secret``. Use this mixin on the handler for the
URL you registered as your application's callback URL.
When your application is set up, you can use this mixin like this
to authenticate the user with Twitter and get access to their stream::
class TwitterLoginHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
# Save the user using e.g. set_secure_cookie()
else:
yield self.authorize_redirect()
The user object returned by `~OAuthMixin.get_authenticated_user`
includes the attributes ``username``, ``name``, ``access_token``,
and all of the custom Twitter user attributes described at
https://dev.twitter.com/docs/api/1.1/get/users/show
"""
_OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize"
_OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate"
_OAUTH_NO_CALLBACKS = False
_TWITTER_BASE_URL = "https://api.twitter.com/1.1"
@return_future
def authenticate_redirect(self, callback_uri=None, callback=None):
"""Just like `~OAuthMixin.authorize_redirect`, but
auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
http = self.get_auth_http_client()
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
self.async_callback(
self._on_request_token, self._OAUTH_AUTHENTICATE_URL,
None, callback))
@_auth_return_future
def twitter_request(self, path, callback=None, access_token=None,
post_args=None, **args):
"""Fetches the given API path, e.g., ``statuses/user_timeline/btaylor``
The path should not include the format or API version number.
(we automatically use JSON format and API version 1).
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at http://dev.twitter.com/
Many methods require an OAuth access token which you can
obtain through `~OAuthMixin.authorize_redirect` and
`~OAuthMixin.get_authenticated_user`. The user returned through that
process includes an 'access_token' attribute that can be used
to make authenticated requests via this method. Example
usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.twitter_request(
"/statuses/update",
post_args={"status": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
"""
if path.startswith('http:') or path.startswith('https:'):
# Raw urls are useful for e.g. search which doesn't follow the
# usual pattern: http://search.twitter.com/search.json
url = path
else:
url = self._TWITTER_BASE_URL + path + ".json"
# Add the OAuth resource request signature if we have credentials
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib_parse.urlencode(args)
http = self.get_auth_http_client()
http_callback = self.async_callback(self._on_twitter_request, callback)
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=http_callback)
else:
http.fetch(url, callback=http_callback)
def _on_twitter_request(self, future, response):
if response.error:
future.set_exception(AuthError(
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
self.require_setting("twitter_consumer_secret", "Twitter OAuth")
return dict(
key=self.settings["twitter_consumer_key"],
secret=self.settings["twitter_consumer_secret"])
@gen.coroutine
def _oauth_get_user_future(self, access_token):
user = yield self.twitter_request(
"/account/verify_credentials",
access_token=access_token)
if user:
user["username"] = user["screen_name"]
raise gen.Return(user)
class FriendFeedMixin(OAuthMixin):
"""FriendFeed OAuth authentication.
To authenticate with FriendFeed, register your application with
FriendFeed at http://friendfeed.com/api/applications. Then copy
your Consumer Key and Consumer Secret to the application
`~tornado.web.Application.settings` ``friendfeed_consumer_key``
and ``friendfeed_consumer_secret``. Use this mixin on the handler
for the URL you registered as your application's Callback URL.
When your application is set up, you can use this mixin like this
to authenticate the user with FriendFeed and get access to their feed::
class FriendFeedLoginHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
# Save the user using e.g. set_secure_cookie()
else:
yield self.authorize_redirect()
The user object returned by `~OAuthMixin.get_authenticated_user()` includes the
attributes ``username``, ``name``, and ``description`` in addition to
``access_token``. You should save the access token with the user;
it is required to make requests on behalf of the user later with
`friendfeed_request()`.
"""
_OAUTH_VERSION = "1.0"
_OAUTH_REQUEST_TOKEN_URL = "https://friendfeed.com/account/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://friendfeed.com/account/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://friendfeed.com/account/oauth/authorize"
_OAUTH_NO_CALLBACKS = True
_OAUTH_VERSION = "1.0"
@_auth_return_future
def friendfeed_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/bret/friends"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
All the FriendFeed methods are documented at
http://friendfeed.com/api/documentation.
Many methods require an OAuth access token which you can
obtain through `~OAuthMixin.authorize_redirect` and
`~OAuthMixin.get_authenticated_user`. The user returned
through that process includes an ``access_token`` attribute that
can be used to make authenticated requests via this
method.
Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.friendfeed_request(
"/entry",
post_args={"body": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://friendfeed-api.com/v2" + path
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib_parse.urlencode(args)
callback = self.async_callback(self._on_friendfeed_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_friendfeed_request(self, future, response):
if response.error:
future.set_exception(AuthError(
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("friendfeed_consumer_key", "FriendFeed OAuth")
self.require_setting("friendfeed_consumer_secret", "FriendFeed OAuth")
return dict(
key=self.settings["friendfeed_consumer_key"],
secret=self.settings["friendfeed_consumer_secret"])
@gen.coroutine
def _oauth_get_user_future(self, access_token, callback):
user = yield self.friendfeed_request(
"/feedinfo/" + access_token["username"],
include="id,name,description", access_token=access_token)
if user:
user["username"] = user["id"]
callback(user)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["id"]
callback(user)
class GoogleMixin(OpenIdMixin, OAuthMixin):
"""Google Open ID / OAuth authentication.
No application registration is necessary to use Google for
authentication or to access Google resources on behalf of a user.
Google implements both OpenID and OAuth in a hybrid mode. If you
just need the user's identity, use
`~OpenIdMixin.authenticate_redirect`. If you need to make
requests to Google on behalf of the user, use
`authorize_redirect`. On return, parse the response with
`~OpenIdMixin.get_authenticated_user`. We send a dict containing
the values for the user, including ``email``, ``name``, and
``locale``.
Example usage::
class GoogleLoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("openid.mode", None):
user = yield self.get_authenticated_user()
# Save the user with e.g. set_secure_cookie()
else:
yield self.authenticate_redirect()
"""
_OPENID_ENDPOINT = "https://www.google.com/accounts/o8/ud"
_OAUTH_ACCESS_TOKEN_URL = "https://www.google.com/accounts/OAuthGetAccessToken"
@return_future
def authorize_redirect(self, oauth_scope, callback_uri=None,
ax_attrs=["name", "email", "language", "username"],
callback=None):
"""Authenticates and authorizes for the given Google resource.
Some of the available resources which can be used in the ``oauth_scope``
argument are:
* Gmail Contacts - http://www.google.com/m8/feeds/
* Calendar - http://www.google.com/calendar/feeds/
* Finance - http://finance.google.com/finance/feeds/
You can authorize multiple resources by separating the resource
URLs with a space.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs,
oauth_scope=oauth_scope)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
callback()
@_auth_return_future
def get_authenticated_user(self, callback):
"""Fetches the authenticated user data upon redirect."""
# Look to see if we are doing combined OpenID/OAuth
oauth_ns = ""
for name, values in self.request.arguments.items():
if name.startswith("openid.ns.") and \
values[-1] == b"http://specs.openid.net/extensions/oauth/1.0":
oauth_ns = name[10:]
break
token = self.get_argument("openid." + oauth_ns + ".request_token", "")
if token:
http = self.get_auth_http_client()
token = dict(key=token, secret="")
http.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
else:
chain_future(OpenIdMixin.get_authenticated_user(self),
callback)
def _oauth_consumer_token(self):
self.require_setting("google_consumer_key", "Google OAuth")
self.require_setting("google_consumer_secret", "Google OAuth")
return dict(
key=self.settings["google_consumer_key"],
secret=self.settings["google_consumer_secret"])
def _oauth_get_user_future(self, access_token):
return OpenIdMixin.get_authenticated_user(self)
class GoogleOAuth2Mixin(OAuth2Mixin):
"""Google authentication using OAuth2.
.. versionadded:: 3.2
"""
_OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth"
_OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
_OAUTH_NO_CALLBACKS = False
_OAUTH_SETTINGS_KEY = 'google_oauth'
@_auth_return_future
def get_authenticated_user(self, redirect_uri, code, callback):
"""Handles the login for the Google user, returning a user object.
Example usage::
class GoogleOAuth2LoginHandler(LoginHandler,
tornado.auth.GoogleOAuth2Mixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument('code', False):
user = yield self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code'))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google',
client_id=self.settings['google_oauth']['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
"""
http = self.get_auth_http_client()
body = urllib_parse.urlencode({
"redirect_uri": redirect_uri,
"code": code,
"client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'],
"client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'],
"grant_type": "authorization_code",
})
http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
self.async_callback(self._on_access_token, callback),
method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body)
def _on_access_token(self, future, response):
"""Callback function for the exchange to the access token."""
if response.error:
future.set_exception(AuthError('Google auth error: %s' % str(response)))
return
args = escape.json_decode(response.body)
future.set_result(args)
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class FacebookMixin(object):
"""Facebook Connect authentication.
*Deprecated:* New applications should use `FacebookGraphMixin`
below instead of this class. This class does not support the
Future-based interface seen on other classes in this module.
To authenticate with Facebook, register your application with
Facebook at http://www.facebook.com/developers/apps.php. Then
copy your API Key and Application Secret to the application settings
``facebook_api_key`` and ``facebook_secret``.
When your application is set up, you can use this mixin like this
to authenticate the user with Facebook::
class FacebookHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("session", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
yield self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by `get_authenticated_user` includes the
attributes ``facebook_uid`` and ``name`` in addition to session attributes
like ``session_key``. You should save the session key with the user; it is
required to make requests on behalf of the user later with
`facebook_request`.
"""
@return_future
def authenticate_redirect(self, callback_uri=None, cancel_uri=None,
extended_permissions=None, callback=None):
"""Authenticates/installs this app for the current user.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
callback_uri = callback_uri or self.request.uri
args = {
"api_key": self.settings["facebook_api_key"],
"v": "1.0",
"fbconnect": "true",
"display": "page",
"next": urlparse.urljoin(self.request.full_url(), callback_uri),
"return_session": "true",
}
if cancel_uri:
args["cancel_url"] = urlparse.urljoin(
self.request.full_url(), cancel_uri)
if extended_permissions:
if isinstance(extended_permissions, (unicode_type, bytes_type)):
extended_permissions = [extended_permissions]
args["req_perms"] = ",".join(extended_permissions)
self.redirect("http://www.facebook.com/login.php?" +
urllib_parse.urlencode(args))
callback()
def authorize_redirect(self, extended_permissions, callback_uri=None,
cancel_uri=None, callback=None):
"""Redirects to an authorization request for the given FB resource.
The available resource names are listed at
http://wiki.developers.facebook.com/index.php/Extended_permission.
The most common resource types include:
* publish_stream
* read_stream
* email
* sms
extended_permissions can be a single permission name or a list of
names. To get the session secret and session key, call
get_authenticated_user() just as you would with
authenticate_redirect().
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
return self.authenticate_redirect(callback_uri, cancel_uri,
extended_permissions,
callback=callback)
def get_authenticated_user(self, callback):
"""Fetches the authenticated Facebook user.
The authenticated user includes the special Facebook attributes
'session_key' and 'facebook_uid' in addition to the standard
user attributes like 'name'.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
session = escape.json_decode(self.get_argument("session"))
self.facebook_request(
method="facebook.users.getInfo",
callback=self.async_callback(
self._on_get_user_info, callback, session),
session_key=session["session_key"],
uids=session["uid"],
fields="uid,first_name,last_name,name,locale,pic_square,"
"profile_url,username")
def facebook_request(self, method, callback, **args):
"""Makes a Facebook API REST request.
We automatically include the Facebook API key and signature, but
it is the callers responsibility to include 'session_key' and any
other required arguments to the method.
The available Facebook methods are documented here:
http://wiki.developers.facebook.com/index.php/API
Here is an example for the stream.get() method::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request(
method="stream.get",
callback=self.async_callback(self._on_stream),
session_key=self.current_user["session_key"])
def _on_stream(self, stream):
if stream is None:
# Not authorized to read the stream yet?
self.redirect(self.authorize_redirect("read_stream"))
return
self.render("stream.html", stream=stream)
"""
self.require_setting("facebook_api_key", "Facebook Connect")
self.require_setting("facebook_secret", "Facebook Connect")
if not method.startswith("facebook."):
method = "facebook." + method
args["api_key"] = self.settings["facebook_api_key"]
args["v"] = "1.0"
args["method"] = method
args["call_id"] = str(long(time.time() * 1e6))
args["format"] = "json"
args["sig"] = self._signature(args)
url = "http://api.facebook.com/restserver.php?" + \
urllib_parse.urlencode(args)
http = self.get_auth_http_client()
http.fetch(url, callback=self.async_callback(
self._parse_response, callback))
def _on_get_user_info(self, callback, session, users):
if users is None:
callback(None)
return
callback({
"name": users[0]["name"],
"first_name": users[0]["first_name"],
"last_name": users[0]["last_name"],
"uid": users[0]["uid"],
"locale": users[0]["locale"],
"pic_square": users[0]["pic_square"],
"profile_url": users[0]["profile_url"],
"username": users[0].get("username"),
"session_key": session["session_key"],
"session_expires": session.get("expires"),
})
def _parse_response(self, callback, response):
if response.error:
gen_log.warning("HTTP error from Facebook: %s", response.error)
callback(None)
return
try:
json = escape.json_decode(response.body)
except Exception:
gen_log.warning("Invalid JSON from Facebook: %r", response.body)
callback(None)
return
if isinstance(json, dict) and json.get("error_code"):
gen_log.warning("Facebook error: %d: %r", json["error_code"],
json.get("error_msg"))
callback(None)
return
callback(json)
def _signature(self, args):
parts = ["%s=%s" % (n, args[n]) for n in sorted(args.keys())]
body = "".join(parts) + self.settings["facebook_secret"]
if isinstance(body, unicode_type):
body = body.encode("utf-8")
return hashlib.md5(body).hexdigest()
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class FacebookGraphMixin(OAuth2Mixin):
"""Facebook authentication using the new Graph API and OAuth2."""
_OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
_OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?"
_OAUTH_NO_CALLBACKS = False
_FACEBOOK_BASE_URL = "https://graph.facebook.com"
@_auth_return_future
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
"""Handles the login for the Facebook user, returning a user object.
Example usage::
class FacebookGraphLoginHandler(LoginHandler, tornado.auth.FacebookGraphMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("code", False):
user = yield self.get_authenticated_user(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "read_stream,offline_access"})
"""
http = self.get_auth_http_client()
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
}
fields = set(['id', 'name', 'first_name', 'last_name',
'locale', 'picture', 'link'])
if extra_fields:
fields.update(extra_fields)
http.fetch(self._oauth_request_token_url(**args),
self.async_callback(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields))
def _on_access_token(self, redirect_uri, client_id, client_secret,
future, fields, response):
if response.error:
future.set_exception(AuthError('Facebook auth error: %s' % str(response)))
return
args = escape.parse_qs_bytes(escape.native_str(response.body))
session = {
"access_token": args["access_token"][-1],
"expires": args.get("expires")
}
self.facebook_request(
path="/me",
callback=self.async_callback(
self._on_get_user_info, future, session, fields),
access_token=session["access_token"],
fields=",".join(fields)
)
def _on_get_user_info(self, future, session, fields, user):
if user is None:
future.set_result(None)
return
fieldmap = {}
for field in fields:
fieldmap[field] = user.get(field)
fieldmap.update({"access_token": session["access_token"], "session_expires": session.get("expires")})
future.set_result(fieldmap)
@_auth_return_future
def facebook_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can
obtain through `~OAuth2Mixin.authorize_redirect` and
`get_authenticated_user`. The user returned through that
process includes an ``access_token`` attribute that can be
used to make authenticated requests via this method.
Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
The given path is relative to ``self._FACEBOOK_BASE_URL``,
by default "https://graph.facebook.com".
.. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``.
"""
url = self._FACEBOOK_BASE_URL + path
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib_parse.urlencode(all_args)
callback = self.async_callback(self._on_facebook_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_facebook_request(self, future, response):
if response.error:
future.set_exception(AuthError("Error response %s fetching %s" %
(response.error, response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(consumer_token["secret"])]
key_elems.append(escape.utf8(token["secret"] if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
See http://oauth.net/core/1.0a/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))]
key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
if isinstance(val, unicode_type):
val = val.encode("utf-8")
return urllib_parse.quote(val, safe="~")
def _oauth_parse_response(body):
# I can't find an officially-defined encoding for oauth responses and
# have never seen anyone use non-ascii. Leave the response in a byte
# string for python 2, and use utf8 on python 3.
body = escape.native_str(body)
p = urlparse.parse_qs(body, keep_blank_values=False)
token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])
# Add the extra parameters the Provider included to the token
special = ("oauth_token", "oauth_token_secret")
token.update((k, p[k][0]) for k in p if k not in special)
return token
| gpl-3.0 | -1,946,024,237,020,586,000 | 40.636047 | 109 | 0.594207 | false |
cloudera/hue | desktop/core/ext-py/Django-1.11.29/django/conf/locale/az/formats.py | 58 | 1256 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j E Y, G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y', # '25.10.06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| apache-2.0 | 7,719,322,486,259,934,000 | 34.885714 | 77 | 0.575637 | false |
sankha93/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/ranges.py | 329 | 3003 | from utils import HTTPException
class RangeParser(object):
def __call__(self, header, file_size):
prefix = "bytes="
if not header.startswith(prefix):
raise HTTPException(416, message="Unrecognised range type %s" % (header,))
parts = header[len(prefix):].split(",")
ranges = []
for item in parts:
components = item.split("-")
if len(components) != 2:
raise HTTPException(416, "Bad range specifier %s" % (item))
data = []
for component in components:
if component == "":
data.append(None)
else:
try:
data.append(int(component))
except ValueError:
raise HTTPException(416, "Bad range specifier %s" % (item))
try:
ranges.append(Range(data[0], data[1], file_size))
except ValueError:
raise HTTPException(416, "Bad range specifier %s" % (item))
return self.coalesce_ranges(ranges, file_size)
def coalesce_ranges(self, ranges, file_size):
rv = []
target = None
for current in reversed(sorted(ranges)):
if target is None:
target = current
else:
new = target.coalesce(current)
target = new[0]
if len(new) > 1:
rv.append(new[1])
rv.append(target)
return rv[::-1]
class Range(object):
def __init__(self, lower, upper, file_size):
self.file_size = file_size
self.lower, self.upper = self._abs(lower, upper)
if self.lower >= self.upper or self.lower >= self.file_size:
raise ValueError
def __repr__(self):
return "<Range %s-%s>" % (self.lower, self.upper)
def __lt__(self, other):
return self.lower < other.lower
def __gt__(self, other):
return self.lower > other.lower
def __eq__(self, other):
return self.lower == other.lower and self.upper == other.upper
def _abs(self, lower, upper):
if lower is None and upper is None:
lower, upper = 0, self.file_size
elif lower is None:
lower, upper = max(0, self.file_size - upper), self.file_size
elif upper is None:
lower, upper = lower, self.file_size
else:
lower, upper = lower, min(self.file_size, upper + 1)
return lower, upper
def coalesce(self, other):
assert self.file_size == other.file_size
if (self.upper < other.lower or self.lower > other.upper):
return sorted([self, other])
else:
return [Range(min(self.lower, other.lower),
max(self.upper, other.upper) - 1,
self.file_size)]
def header_value(self):
return "bytes %i-%i/%i" % (self.lower, self.upper - 1, self.file_size)
| mpl-2.0 | -6,042,647,247,678,426,000 | 32.366667 | 86 | 0.522811 | false |
sgzsh269/django | django/contrib/auth/__init__.py | 46 | 7785 | import inspect
import re
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.middleware.csrf import rotate_token
from django.utils.crypto import constant_time_compare
from django.utils.module_loading import import_string
from django.utils.translation import LANGUAGE_SESSION_KEY
from .signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
HASH_SESSION_KEY = '_auth_user_hash'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
return import_string(path)()
def _get_backends(return_tuples=False):
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
backends.append((backend, backend_path) if return_tuples else backend)
if not backends:
raise ImproperlyConfigured(
'No authentication backends have been defined. Does '
'AUTHENTICATION_BACKENDS contain anything?'
)
return backends
def get_backends():
return _get_backends(return_tuples=False)
def _clean_credentials(credentials):
"""
Cleans a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def _get_user_session_key(request):
# This value in the session is always serialized to a string, so we need
# to convert it back to Python whenever we access it.
return get_user_model()._meta.pk.to_python(request.session[SESSION_KEY])
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend, backend_path in _get_backends(return_tuples=True):
try:
inspect.getcallargs(backend.authenticate, **credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
try:
user = backend.authenticate(**credentials)
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
break
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = backend_path
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__, credentials=_clean_credentials(credentials))
def login(request, user, backend=None):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
session_auth_hash = ''
if user is None:
user = request.user
if hasattr(user, 'get_session_auth_hash'):
session_auth_hash = user.get_session_auth_hash()
if SESSION_KEY in request.session:
if _get_user_session_key(request) != user.pk or (
session_auth_hash and
not constant_time_compare(request.session.get(HASH_SESSION_KEY, ''), session_auth_hash)):
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
try:
backend = backend or user.backend
except AttributeError:
backends = _get_backends(return_tuples=True)
if len(backends) == 1:
_, backend = backends[0]
else:
raise ValueError(
'You have multiple authentication backends configured and '
'therefore must provide the `backend` argument or set the '
'`backend` attribute on the user.'
)
request.session[SESSION_KEY] = user._meta.pk.value_to_string(user)
request.session[BACKEND_SESSION_KEY] = backend
request.session[HASH_SESSION_KEY] = session_auth_hash
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if hasattr(user, 'is_authenticated') and not user.is_authenticated:
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
# remember language choice saved to session
language = request.session.get(LANGUAGE_SESSION_KEY)
request.session.flush()
if language is not None:
request.session[LANGUAGE_SESSION_KEY] = language
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"""
Returns the User model that is active in this project.
"""
try:
return django_apps.get_model(settings.AUTH_USER_MODEL)
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL
)
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `AnonymousUser` is returned.
"""
from .models import AnonymousUser
user = None
try:
user_id = _get_user_session_key(request)
backend_path = request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
else:
if backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
user = backend.get_user(user_id)
# Verify the session
if hasattr(user, 'get_session_auth_hash'):
session_hash = request.session.get(HASH_SESSION_KEY)
session_hash_verified = session_hash and constant_time_compare(
session_hash,
user.get_session_auth_hash()
)
if not session_hash_verified:
request.session.flush()
user = None
return user or AnonymousUser()
def get_permission_codename(action, opts):
"""
Returns the codename of the permission for the specified action.
"""
return '%s_%s' % (action, opts.model_name)
def update_session_auth_hash(request, user):
"""
Updating a user's password logs out all sessions for the user.
This function takes the current request and the updated user object from
which the new session hash will be derived and updates the session hash
appropriately to prevent a password change from logging out the session
from which the password was changed.
"""
if hasattr(user, 'get_session_auth_hash') and request.user == user:
request.session[HASH_SESSION_KEY] = user.get_session_auth_hash()
default_app_config = 'django.contrib.auth.apps.AuthConfig'
| bsd-3-clause | -7,735,517,087,375,775,000 | 34.226244 | 105 | 0.662171 | false |
joelsmith/openshift-tools | openshift/installer/vendored/openshift-ansible-3.4.40/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py | 18 | 25244 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long,invalid-name
"""For details on this module see DOCUMENTATION (below)"""
# router/registry cert grabbing
import subprocess
# etcd config file
import ConfigParser
# Expiration parsing
import datetime
# File path stuff
import os
# Config file parsing
import yaml
# Certificate loading
import OpenSSL.crypto
DOCUMENTATION = '''
---
module: openshift_cert_expiry
short_description: Check OpenShift Container Platform (OCP) and Kube certificate expirations on a cluster
description:
- The M(openshift_cert_expiry) module has two basic functions: to flag certificates which will expire in a set window of time from now, and to notify you about certificates which have already expired.
- When the module finishes, a summary of the examination is returned. Each certificate in the summary has a C(health) key with a value of one of the following:
- C(ok) - not expired, and outside of the expiration C(warning_days) window.
- C(warning) - not expired, but will expire between now and the C(warning_days) window.
- C(expired) - an expired certificate.
- Certificate flagging follow this logic:
- If the expiration date is before now then the certificate is classified as C(expired).
- The certificates time to live (expiration date - now) is calculated, if that time window is less than C(warning_days) the certificate is classified as C(warning).
- All other conditions are classified as C(ok).
- The following keys are ALSO present in the certificate summary:
- C(cert_cn) - The common name of the certificate (additional CNs present in SAN extensions are omitted)
- C(days_remaining) - The number of days until the certificate expires.
- C(expiry) - The date the certificate expires on.
- C(path) - The full path to the certificate on the examined host.
version_added: "1.0"
options:
config_base:
description:
- Base path to OCP system settings.
required: false
default: /etc/origin
warning_days:
description:
- Flag certificates which will expire in C(warning_days) days from now.
required: false
default: 30
show_all:
description:
- Enable this option to show analysis of ALL certificates examined by this module.
- By default only certificates which have expired, or will expire within the C(warning_days) window will be reported.
required: false
default: false
author: "Tim Bielawa (@tbielawa) <[email protected]>"
'''
EXAMPLES = '''
# Default invocation, only notify about expired certificates or certificates which will expire within 30 days from now
- openshift_cert_expiry:
# Expand the warning window to show certificates expiring within a year from now
- openshift_cert_expiry: warning_days=365
# Show expired, soon to expire (now + 30 days), and all other certificates examined
- openshift_cert_expiry: show_all=true
'''
# We only need this for one thing, we don't care if it doesn't have
# that many public methods
#
# pylint: disable=too-few-public-methods
class FakeSecHead(object):
"""etcd does not begin their config file with an opening [section] as
required by the Python ConfigParser module. We hack around it by
slipping one in ourselves prior to parsing.
Source: Alex Martelli - http://stackoverflow.com/a/2819788/6490583
"""
def __init__(self, fp):
self.fp = fp
self.sechead = '[ETCD]\n'
def readline(self):
"""Make this look like a file-type object"""
if self.sechead:
try:
return self.sechead
finally:
self.sechead = None
else:
return self.fp.readline()
######################################################################
def filter_paths(path_list):
"""`path_list` - A list of file paths to check. Only files which exist
will be returned
"""
return [p for p in path_list if os.path.exists(os.path.realpath(p))]
def load_and_handle_cert(cert_string, now, base64decode=False):
"""Load a certificate, split off the good parts, and return some
useful data
Params:
- `cert_string` (string) - a certificate loaded into a string object
- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
- `base64decode` (bool) - run .decode('base64') on the input?
Returns:
A 3-tuple of the form: (certificate_common_name, certificate_expiry_date, certificate_time_remaining)
"""
if base64decode:
_cert_string = cert_string.decode('base-64')
else:
_cert_string = cert_string
cert_loaded = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, _cert_string)
######################################################################
# Read all possible names from the cert
cert_subjects = []
for name, value in cert_loaded.get_subject().get_components():
cert_subjects.append('{}:{}'.format(name, value))
# To read SANs from a cert we must read the subjectAltName
# extension from the X509 Object. What makes this more difficult
# is that pyOpenSSL does not give extensions as a list, nor does
# it provide a count of all loaded extensions.
#
# Rather, extensions are REQUESTED by index. We must iterate over
# all extensions until we find the one called 'subjectAltName'. If
# we don't find that extension we'll eventually request an
# extension at an index where no extension exists (IndexError is
# raised). When that happens we know that the cert has no SANs so
# we break out of the loop.
i = 0
checked_all_extensions = False
while not checked_all_extensions:
try:
# Read the extension at index 'i'
ext = cert_loaded.get_extension(i)
except IndexError:
# We tried to read an extension but it isn't there, that
# means we ran out of extensions to check. Abort
san = None
checked_all_extensions = True
else:
# We were able to load the extension at index 'i'
if ext.get_short_name() == 'subjectAltName':
san = ext
checked_all_extensions = True
else:
# Try reading the next extension
i += 1
if san is not None:
# The X509Extension object for subjectAltName prints as a
# string with the alt names separated by a comma and a
# space. Split the string by ', ' and then add our new names
# to the list of existing names
cert_subjects.extend(str(san).split(', '))
cert_subject = ', '.join(cert_subjects)
######################################################################
# Grab the expiration date
cert_expiry = cert_loaded.get_notAfter()
cert_expiry_date = datetime.datetime.strptime(
cert_expiry,
# example get_notAfter() => 20180922170439Z
'%Y%m%d%H%M%SZ')
time_remaining = cert_expiry_date - now
return (cert_subject, cert_expiry_date, time_remaining)
def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list):
"""Given metadata about a certificate under examination, classify it
into one of three categories, 'ok', 'warning', and 'expired'.
Params:
- `cert_meta` dict - A dict with certificate metadata. Required fields
include: 'cert_cn', 'path', 'expiry', 'days_remaining', 'health'.
- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
- `time_remaining` (datetime.timedelta) - a timedelta for how long until the cert expires
- `expire_window` (datetime.timedelta) - a timedelta for how long the warning window is
- `cert_list` list - A list to shove the classified cert into
Return:
- `cert_list` - The updated list of classified certificates
"""
expiry_str = str(cert_meta['expiry'])
# Categorization
if cert_meta['expiry'] < now:
# This already expired, must NOTIFY
cert_meta['health'] = 'expired'
elif time_remaining < expire_window:
# WARN about this upcoming expirations
cert_meta['health'] = 'warning'
else:
# Not expired or about to expire
cert_meta['health'] = 'ok'
cert_meta['expiry'] = expiry_str
cert_list.append(cert_meta)
return cert_list
def tabulate_summary(certificates, kubeconfigs, etcd_certs, router_certs, registry_certs):
"""Calculate the summary text for when the module finishes
running. This includes counts of each classification and what have
you.
Params:
- `certificates` (list of dicts) - Processed `expire_check_result`
dicts with filled in `health` keys for system certificates.
- `kubeconfigs` - as above for kubeconfigs
- `etcd_certs` - as above for etcd certs
Return:
- `summary_results` (dict) - Counts of each cert type classification
and total items examined.
"""
items = certificates + kubeconfigs + etcd_certs + router_certs + registry_certs
summary_results = {
'system_certificates': len(certificates),
'kubeconfig_certificates': len(kubeconfigs),
'etcd_certificates': len(etcd_certs),
'router_certs': len(router_certs),
'registry_certs': len(registry_certs),
'total': len(items),
'ok': 0,
'warning': 0,
'expired': 0
}
summary_results['expired'] = len([c for c in items if c['health'] == 'expired'])
summary_results['warning'] = len([c for c in items if c['health'] == 'warning'])
summary_results['ok'] = len([c for c in items if c['health'] == 'ok'])
return summary_results
######################################################################
# This is our module MAIN function after all, so there's bound to be a
# lot of code bundled up into one block
#
# pylint: disable=too-many-locals,too-many-locals,too-many-statements,too-many-branches
def main():
"""This module examines certificates (in various forms) which compose
an OpenShift Container Platform cluster
"""
module = AnsibleModule(
argument_spec=dict(
config_base=dict(
required=False,
default="/etc/origin",
type='str'),
warning_days=dict(
required=False,
default=30,
type='int'),
show_all=dict(
required=False,
default=False,
type='bool')
),
supports_check_mode=True,
)
# Basic scaffolding for OpenShift specific certs
openshift_base_config_path = module.params['config_base']
openshift_master_config_path = os.path.normpath(
os.path.join(openshift_base_config_path, "master/master-config.yaml")
)
openshift_node_config_path = os.path.normpath(
os.path.join(openshift_base_config_path, "node/node-config.yaml")
)
openshift_cert_check_paths = [
openshift_master_config_path,
openshift_node_config_path,
]
# Paths for Kubeconfigs. Additional kubeconfigs are conditionally
# checked later in the code
master_kube_configs = ['admin', 'openshift-master',
'openshift-node', 'openshift-router',
'openshift-registry']
kubeconfig_paths = []
for m_kube_config in master_kube_configs:
kubeconfig_paths.append(
os.path.normpath(
os.path.join(openshift_base_config_path, "master/%s.kubeconfig" % m_kube_config)
)
)
# Validate some paths we have the ability to do ahead of time
openshift_cert_check_paths = filter_paths(openshift_cert_check_paths)
kubeconfig_paths = filter_paths(kubeconfig_paths)
# etcd, where do you hide your certs? Used when parsing etcd.conf
etcd_cert_params = [
"ETCD_CA_FILE",
"ETCD_CERT_FILE",
"ETCD_PEER_CA_FILE",
"ETCD_PEER_CERT_FILE",
]
# Expiry checking stuff
now = datetime.datetime.now()
# todo, catch exception for invalid input and return a fail_json
warning_days = int(module.params['warning_days'])
expire_window = datetime.timedelta(days=warning_days)
# Module stuff
#
# The results of our cert checking to return from the task call
check_results = {}
check_results['meta'] = {}
check_results['meta']['warning_days'] = warning_days
check_results['meta']['checked_at_time'] = str(now)
check_results['meta']['warn_before_date'] = str(now + expire_window)
check_results['meta']['show_all'] = str(module.params['show_all'])
# All the analyzed certs accumulate here
ocp_certs = []
######################################################################
# Sure, why not? Let's enable check mode.
if module.check_mode:
check_results['ocp_certs'] = []
module.exit_json(
check_results=check_results,
msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'],
rc=0,
changed=False
)
######################################################################
# Check for OpenShift Container Platform specific certs
######################################################################
for os_cert in filter_paths(openshift_cert_check_paths):
# Open up that config file and locate the cert and CA
with open(os_cert, 'r') as fp:
cert_meta = {}
cfg = yaml.load(fp)
# cert files are specified in parsed `fp` as relative to the path
# of the original config file. 'master-config.yaml' with certFile
# = 'foo.crt' implies that 'foo.crt' is in the same
# directory. certFile = '../foo.crt' is in the parent directory.
cfg_path = os.path.dirname(fp.name)
cert_meta['certFile'] = os.path.join(cfg_path, cfg['servingInfo']['certFile'])
cert_meta['clientCA'] = os.path.join(cfg_path, cfg['servingInfo']['clientCA'])
######################################################################
# Load the certificate and the CA, parse their expiration dates into
# datetime objects so we can manipulate them later
for _, v in cert_meta.iteritems():
with open(v, 'r') as fp:
cert = fp.read()
cert_subject, cert_expiry_date, time_remaining = load_and_handle_cert(cert, now)
expire_check_result = {
'cert_cn': cert_subject,
'path': fp.name,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
}
classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs)
######################################################################
# /Check for OpenShift Container Platform specific certs
######################################################################
######################################################################
# Check service Kubeconfigs
######################################################################
kubeconfigs = []
# There may be additional kubeconfigs to check, but their naming
# is less predictable than the ones we've already assembled.
try:
# Try to read the standard 'node-config.yaml' file to check if
# this host is a node.
with open(openshift_node_config_path, 'r') as fp:
cfg = yaml.load(fp)
# OK, the config file exists, therefore this is a
# node. Nodes have their own kubeconfig files to
# communicate with the master API. Let's read the relative
# path to that file from the node config.
node_masterKubeConfig = cfg['masterKubeConfig']
# As before, the path to the 'masterKubeConfig' file is
# relative to `fp`
cfg_path = os.path.dirname(fp.name)
node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig)
with open(node_kubeconfig, 'r') as fp:
# Read in the nodes kubeconfig file and grab the good stuff
cfg = yaml.load(fp)
c = cfg['users'][0]['user']['client-certificate-data']
(cert_subject,
cert_expiry_date,
time_remaining) = load_and_handle_cert(c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
'path': fp.name,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
}
classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
except IOError:
# This is not a node
pass
for kube in filter_paths(kubeconfig_paths):
with open(kube, 'r') as fp:
# TODO: Maybe consider catching exceptions here?
cfg = yaml.load(fp)
# Per conversation, "the kubeconfigs you care about:
# admin, router, registry should all be single
# value". Following that advice we only grab the data for
# the user at index 0 in the 'users' list. There should
# not be more than one user.
c = cfg['users'][0]['user']['client-certificate-data']
(cert_subject,
cert_expiry_date,
time_remaining) = load_and_handle_cert(c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
'path': fp.name,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
}
classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
######################################################################
# /Check service Kubeconfigs
######################################################################
######################################################################
# Check etcd certs
######################################################################
# Some values may be duplicated, make this a set for now so we
# unique them all
etcd_certs_to_check = set([])
etcd_certs = []
etcd_cert_params.append('dne')
try:
with open('/etc/etcd/etcd.conf', 'r') as fp:
etcd_config = ConfigParser.ConfigParser()
etcd_config.readfp(FakeSecHead(fp))
for param in etcd_cert_params:
try:
etcd_certs_to_check.add(etcd_config.get('ETCD', param))
except ConfigParser.NoOptionError:
# That parameter does not exist, oh well...
pass
except IOError:
# No etcd to see here, move along
pass
for etcd_cert in filter_paths(etcd_certs_to_check):
with open(etcd_cert, 'r') as fp:
c = fp.read()
(cert_subject,
cert_expiry_date,
time_remaining) = load_and_handle_cert(c, now)
expire_check_result = {
'cert_cn': cert_subject,
'path': fp.name,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
}
classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
######################################################################
# /Check etcd certs
######################################################################
######################################################################
# Check router/registry certs
#
# These are saved as secrets in etcd. That means that we can not
# simply read a file to grab the data. Instead we're going to
# subprocess out to the 'oc get' command. On non-masters this
# command will fail, that is expected so we catch that exception.
######################################################################
router_certs = []
registry_certs = []
######################################################################
# First the router certs
try:
router_secrets_raw = subprocess.Popen('oc get secret router-certs -o yaml'.split(),
stdout=subprocess.PIPE)
router_ds = yaml.load(router_secrets_raw.communicate()[0])
router_c = router_ds['data']['tls.crt']
router_path = router_ds['metadata']['selfLink']
except TypeError:
# YAML couldn't load the result, this is not a master
pass
except OSError:
# The OC command doesn't exist here. Move along.
pass
else:
(cert_subject,
cert_expiry_date,
time_remaining) = load_and_handle_cert(router_c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
'path': router_path,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
}
classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs)
######################################################################
# Now for registry
try:
registry_secrets_raw = subprocess.Popen('oc get secret registry-certificates -o yaml'.split(),
stdout=subprocess.PIPE)
registry_ds = yaml.load(registry_secrets_raw.communicate()[0])
registry_c = registry_ds['data']['registry.crt']
registry_path = registry_ds['metadata']['selfLink']
except TypeError:
# YAML couldn't load the result, this is not a master
pass
except OSError:
# The OC command doesn't exist here. Move along.
pass
else:
(cert_subject,
cert_expiry_date,
time_remaining) = load_and_handle_cert(registry_c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
'path': registry_path,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
}
classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs)
######################################################################
# /Check router/registry certs
######################################################################
res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs)
msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format(
count=res['total'],
exp=res['expired'],
warn=res['warning'],
ok=res['ok'],
window=int(module.params['warning_days']),
)
# By default we only return detailed information about expired or
# warning certificates. If show_all is true then we will print all
# the certificates examined.
if not module.params['show_all']:
check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']]
check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']]
check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']]
check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']]
check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']]
else:
check_results['ocp_certs'] = ocp_certs
check_results['kubeconfigs'] = kubeconfigs
check_results['etcd'] = etcd_certs
check_results['registry'] = registry_certs
check_results['router'] = router_certs
# Sort the final results to report in order of ascending safety
# time. That is to say, the certificates which will expire sooner
# will be at the front of the list and certificates which will
# expire later are at the end. Router and registry certs should be
# limited to just 1 result, so don't bother sorting those.
check_results['ocp_certs'] = sorted(check_results['ocp_certs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
check_results['etcd'] = sorted(check_results['etcd'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
# This module will never change anything, but we might want to
# change the return code parameter if there is some catastrophic
# error we noticed earlier
module.exit_json(
check_results=check_results,
summary=res,
msg=msg,
rc=0,
changed=False
)
######################################################################
# It's just the way we do things in Ansible. So disable this warning
#
# pylint: disable=wrong-import-position,import-error
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| apache-2.0 | -7,811,337,554,961,977,000 | 38.629513 | 202 | 0.585921 | false |
nugget/home-assistant | homeassistant/components/light/demo.py | 7 | 4375 | """
Demo light platform that implements lights.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
import random
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_HS_COLOR,
ATTR_WHITE_VALUE, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT,
SUPPORT_COLOR, SUPPORT_WHITE_VALUE, Light)
LIGHT_COLORS = [
(56, 86),
(345, 75),
]
LIGHT_EFFECT_LIST = ['rainbow', 'none']
LIGHT_TEMPS = [240, 380]
SUPPORT_DEMO = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_EFFECT |
SUPPORT_COLOR | SUPPORT_WHITE_VALUE)
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up the demo light platform."""
add_entities_callback([
DemoLight(1, "Bed Light", False, True, effect_list=LIGHT_EFFECT_LIST,
effect=LIGHT_EFFECT_LIST[0]),
DemoLight(2, "Ceiling Lights", True, True,
LIGHT_COLORS[0], LIGHT_TEMPS[1]),
DemoLight(3, "Kitchen Lights", True, True,
LIGHT_COLORS[1], LIGHT_TEMPS[0])
])
class DemoLight(Light):
"""Representation of a demo light."""
def __init__(self, unique_id, name, state, available=False, hs_color=None,
ct=None, brightness=180, white=200, effect_list=None,
effect=None):
"""Initialize the light."""
self._unique_id = unique_id
self._name = name
self._state = state
self._hs_color = hs_color
self._ct = ct or random.choice(LIGHT_TEMPS)
self._brightness = brightness
self._white = white
self._effect_list = effect_list
self._effect = effect
self._available = True
@property
def should_poll(self) -> bool:
"""No polling needed for a demo light."""
return False
@property
def name(self) -> str:
"""Return the name of the light if any."""
return self._name
@property
def unique_id(self):
"""Return unique ID for light."""
return self._unique_id
@property
def available(self) -> bool:
"""Return availability."""
# This demo light is always available, but well-behaving components
# should implement this to inform Home Assistant accordingly.
return self._available
@property
def brightness(self) -> int:
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def hs_color(self) -> tuple:
"""Return the hs color value."""
return self._hs_color
@property
def color_temp(self) -> int:
"""Return the CT color temperature."""
return self._ct
@property
def white_value(self) -> int:
"""Return the white value of this light between 0..255."""
return self._white
@property
def effect_list(self) -> list:
"""Return the list of supported effects."""
return self._effect_list
@property
def effect(self) -> str:
"""Return the current effect."""
return self._effect
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._state
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_DEMO
def turn_on(self, **kwargs) -> None:
"""Turn the light on."""
self._state = True
if ATTR_HS_COLOR in kwargs:
self._hs_color = kwargs[ATTR_HS_COLOR]
if ATTR_COLOR_TEMP in kwargs:
self._ct = kwargs[ATTR_COLOR_TEMP]
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_WHITE_VALUE in kwargs:
self._white = kwargs[ATTR_WHITE_VALUE]
if ATTR_EFFECT in kwargs:
self._effect = kwargs[ATTR_EFFECT]
# As we have disabled polling, we need to inform
# Home Assistant about updates in our state ourselves.
self.schedule_update_ha_state()
def turn_off(self, **kwargs) -> None:
"""Turn the light off."""
self._state = False
# As we have disabled polling, we need to inform
# Home Assistant about updates in our state ourselves.
self.schedule_update_ha_state()
| apache-2.0 | -879,086,031,955,375,000 | 28.560811 | 78 | 0.601143 | false |
Alwnikrotikz/smap-data | python/smap/drivers/obvius/bmo.py | 4 | 7648 | """
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <[email protected]>
"""
import sys
import re
import csv
import urllib
import datetime, time
import traceback
import sensordb
import auth
import obvius
from twisted.internet import reactor, threads, task
from twisted.internet.defer import DeferredSemaphore, Deferred
from smap import core
from smap.util import periodicSequentialCall
import smap.driver
import smap.iface.http.httputils as httputils
import smap.contrib.dtutil as dtutil
TIMEFMT = "%Y-%m-%d %H:%M:%S"
# to prevent killing their db, we make all driver instances acquire
# this semaphore before trying to download data
try:
active_reads
except NameError:
active_reads = DeferredSemaphore(3)
def make_field_idxs(type, header, location=None):
paths = [None]
map_ = sensordb.get_map(type, header=header, location=location)
for t in header[1:]:
paths.append(None)
for channel in map_['sensors'] + map_['meters']:
if t.strip().startswith(channel[0]):
paths[-1] = (channel[2], channel[3])
break
ddups = {}
for elt in paths:
if elt:
name = '-'.join(elt)
ddups[name] = ddups.get(name, 0) + 1
for k, v in ddups.iteritems():
if v > 1:
print "WARNING:", v, "matching channels for", k
print header
print paths
print ddups
return paths, map_
class BMOLoader(smap.driver.SmapDriver):
def setup(self, opts):
self.url = opts['Url']
self.meter_type = opts['Metadata/Instrument/Model']
self.location = opts.get('Metadata/Location/Building', None)
self.rate = int(opts.get('Rate', 3600))
self.running = False
# if not sensordb.get_map(self.meter_type, ):
# raise SmapLoadError(self.meter_type + " is not a known obvius meter type")
self.push_hist = dtutil.now() - datetime.timedelta(hours=1)
self.added = False
self.set_metadata('/', {
'Extra/Driver' : 'smap.drivers.obvius.bmo.BMOLoader' })
print self.url, self.rate
def start(self):
# periodicSequentialCall(self.update).start(self.rate)
task.LoopingCall(self.update).start(self.rate)
def done(self, result):
self.running = False
active_reads.release()
def update(self, startdt=None, enddt=None):
if self.running:
return
self.startdt, self.enddt = startdt, enddt
self.running = True
d = active_reads.acquire()
# in the processing chain, we first open the page
d.addCallback(lambda _: threads.deferToThread(self.open_page))
# then read the first result set
d.addCallback(lambda _: threads.deferToThread(self.process))
# and add it to the outgoing data. this will chain additional
# processes and adds as necessary
d.addCallback(self.add)
# finally release the semaphore (even if we got an error)
d.addCallback(self.done)
# and consume the error
d.addErrback(self.done)
return d
def open_page(self):
if not self.startdt:
self.startdt = self.push_hist
if not self.enddt:
self.enddt = dtutil.now()
start, end = urllib.quote(dtutil.strftime_tz(self.startdt, TIMEFMT)), \
urllib.quote(dtutil.strftime_tz(self.enddt, TIMEFMT))
url = self.url % (start, end)
url += "&mnuStartMonth=%i&mnuStartDay=%i&mnuStartYear=%i" % \
(self.startdt.month,
self.startdt.day,
self.startdt.year)
url += "&mnuStartTime=%i%%3A%i" % (self.startdt.hour,
self.startdt.minute)
url += "&mnuEndMonth=%i&mnuEndDay=%i&mnuEndYear=%i" % \
(self.enddt.month,
self.enddt.day,
self.enddt.year)
url += "&mnuEndTime=%i%%3A%i" % (self.enddt.hour, self.enddt.minute)
print "loading", url
self.fp = httputils.load_http(url, as_fp=True, auth=auth.BMOAUTH)
if not self.fp:
raise core.SmapException("timeout!")
self.reader = csv.reader(self.fp, dialect='excel-tab')
header = self.reader.next()
if len(header) == 0:
print "Warning: no data from", self.url
raise core.SmapException("no data!")
try:
self.field_map, self.map = make_field_idxs(self.meter_type, header,
location=self.location)
except:
traceback.print_exc()
if not self.added:
self.added = True
for channel in self.map['sensors'] + self.map['meters']:
try:
self.add_timeseries('/%s/%s' % channel[2:4], channel[4], data_type='double')
self.set_metadata('/%s/%s' % channel[2:4], {
'Extra/ChannelName' : re.sub('\(.*\)', '', channel[0]).strip(),
})
except:
traceback.print_exc()
def process(self):
readcnt = 0
data = []
if self.reader == None:
return data
try:
for r in self.reader:
ts = dtutil.strptime_tz(r[0], TIMEFMT, tzstr='UTC')
if ts > self.push_hist:
self.push_hist = ts
ts = dtutil.dt2ts(ts)
data.append((ts, zip(self.field_map, r)))
readcnt += 1
if readcnt > 100:
return data
except Exception, e:
self.fp.close()
self.reader = None
raise e
self.fp.close()
self.reader = None
return data
def add(self, data):
if len(data) == 0:
return "DONE"
for ts, rec in data:
for descr, val in rec:
if descr == None: continue
try:
self._add('/' + '/'.join(descr), ts, float(val))
except ValueError:
pass
self.data = []
d = threads.deferToThread(self.process)
d.addCallback(self.add)
return d
| bsd-2-clause | 3,743,487,051,369,216,500 | 32.54386 | 96 | 0.592442 | false |
linuxmcu/ardupilot | Tools/autotest/quadplane.py | 29 | 4362 | # fly ArduPlane QuadPlane in SITL
from __future__ import print_function
import os
import pexpect
import shutil
from pymavlink import mavutil
from common import *
from pysim import util
# get location of scripts
testdir = os.path.dirname(os.path.realpath(__file__))
HOME_LOCATION = '-27.274439,151.290064,343,8.7'
MISSION = 'ArduPlane-Missions/Dalby-OBC2016.txt'
FENCE = 'ArduPlane-Missions/Dalby-OBC2016-fence.txt'
WIND = "0,180,0.2" # speed,direction,variance
homeloc = None
def fly_mission(mavproxy, mav, filename, fence, height_accuracy=-1):
"""Fly a mission from a file."""
print("Flying mission %s" % filename)
mavproxy.send('wp load %s\n' % filename)
mavproxy.expect('Flight plan received')
mavproxy.send('fence load %s\n' % fence)
mavproxy.send('wp list\n')
mavproxy.expect('Requesting [0-9]+ waypoints')
mavproxy.send('mode AUTO\n')
wait_mode(mav, 'AUTO')
if not wait_waypoint(mav, 1, 19, max_dist=60, timeout=1200):
return False
mavproxy.expect('DISARMED')
# wait for blood sample here
mavproxy.send('wp set 20\n')
mavproxy.send('arm throttle\n')
mavproxy.expect('ARMED')
if not wait_waypoint(mav, 20, 34, max_dist=60, timeout=1200):
return False
mavproxy.expect('DISARMED')
print("Mission OK")
return True
def fly_QuadPlane(binary, viewerip=None, use_map=False, valgrind=False, gdb=False):
"""Fly QuadPlane in SITL.
you can pass viewerip as an IP address to optionally send fg and
mavproxy packets too for local viewing of the flight in real time.
"""
global homeloc
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --streamrate=10'
if viewerip:
options += " --out=%s:14550" % viewerip
if use_map:
options += ' --map'
sitl = util.start_SITL(binary, model='quadplane', wipe=True, home=HOME_LOCATION, speedup=10,
defaults_file=os.path.join(testdir, 'default_params/quadplane.parm'), valgrind=valgrind, gdb=gdb)
mavproxy = util.start_MAVProxy_SITL('QuadPlane', options=options)
mavproxy.expect('Telemetry log: (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
buildlog = util.reltopdir("../buildlogs/QuadPlane-test.tlog")
print("buildlog=%s" % buildlog)
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
pass
util.expect_setup_callback(mavproxy, expect_callback)
mavproxy.expect('Received [0-9]+ parameters')
expect_list_clear()
expect_list_extend([sitl, mavproxy])
print("Started simulator")
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception as msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
e = 'None'
try:
print("Waiting for a heartbeat with mavlink protocol %s" % mav.WIRE_PROTOCOL_VERSION)
mav.wait_heartbeat()
print("Waiting for GPS fix")
mav.recv_match(condition='VFR_HUD.alt>10', blocking=True)
mav.wait_gps_fix()
while mav.location().alt < 10:
mav.wait_gps_fix()
homeloc = mav.location()
print("Home location: %s" % homeloc)
# wait for EKF and GPS checks to pass
wait_seconds(mav, 30)
mavproxy.send('arm throttle\n')
mavproxy.expect('ARMED')
if not fly_mission(mavproxy, mav,
os.path.join(testdir, "ArduPlane-Missions/Dalby-OBC2016.txt"),
os.path.join(testdir, "ArduPlane-Missions/Dalby-OBC2016-fence.txt")):
print("Failed mission")
failed = True
except pexpect.TIMEOUT as e:
print("Failed with timeout")
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sitl)
valgrind_log = util.valgrind_log_filepath(binary=binary, model='quadplane')
if os.path.exists(valgrind_log):
os.chmod(valgrind_log, 0o644)
shutil.copy(valgrind_log, util.reltopdir("../buildlogs/QuadPlane-valgrind.log"))
if failed:
print("FAILED: %s" % e)
return False
return True
| gpl-3.0 | -7,951,532,745,462,743,000 | 31.311111 | 123 | 0.6442 | false |
willm/DDEXUI | ddex/party.py | 1 | 1146 | import xml.etree.cElementTree as ET
from DDEXUI.ddex.enum import enum
PartyType = enum(MessageSender=1, MessageRecipient=2)
class Party:
def __init__(self, party_id, name, party_type=PartyType.MessageSender):
self.party_id = party_id
self.name = name
self.party_type = party_type
def write(self):
party = ET.Element(PartyType.reverse_mapping[self.party_type])
party_id = ET.SubElement(party,'PartyId')
party_id.text = self.party_id
name = ET.SubElement(party, 'PartyName')
full_name = ET.SubElement(name, 'FullName')
full_name .text = self.name
return party
def __eq__(self, other):
if(isinstance(other, Party)):
return self.name == other.name and self.party_id == other.party_id and self.party_type == other.party_type
return NotImplemented
def __str__(self):
return str.join(":",[self.party_id,self.name,PartyType.reverse_mapping[self.party_type]])
def __ne__(self, other):
result = self.__eq__(other)
if(result is NotImplemented):
return result
return not result
| gpl-2.0 | 991,767,400,311,403,100 | 31.742857 | 118 | 0.628272 | false |
chenrenyi/flask | tests/test_regression.py | 144 | 2467 | # -*- coding: utf-8 -*-
"""
tests.regression
~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests regressions.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import gc
import sys
import flask
import threading
from werkzeug.exceptions import NotFound
_gc_lock = threading.Lock()
class assert_no_leak(object):
def __enter__(self):
gc.disable()
_gc_lock.acquire()
loc = flask._request_ctx_stack._local
# Force Python to track this dictionary at all times.
# This is necessary since Python only starts tracking
# dicts if they contain mutable objects. It's a horrible,
# horrible hack but makes this kinda testable.
loc.__storage__['FOOO'] = [1, 2, 3]
gc.collect()
self.old_objects = len(gc.get_objects())
def __exit__(self, exc_type, exc_value, tb):
if not hasattr(sys, 'getrefcount'):
gc.collect()
new_objects = len(gc.get_objects())
if new_objects > self.old_objects:
pytest.fail('Example code leaked')
_gc_lock.release()
gc.enable()
def test_memory_consumption():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('simple_template.html', whiskey=42)
def fire():
with app.test_client() as c:
rv = c.get('/')
assert rv.status_code == 200
assert rv.data == b'<h1>42</h1>'
# Trigger caches
fire()
# This test only works on CPython 2.7.
if sys.version_info >= (2, 7) and \
not hasattr(sys, 'pypy_translation_info'):
with assert_no_leak():
for x in range(10):
fire()
def test_safe_join_toplevel_pardir():
from flask.helpers import safe_join
with pytest.raises(NotFound):
safe_join('/foo', '..')
def test_aborting():
class Foo(Exception):
whatever = 42
app = flask.Flask(__name__)
app.testing = True
@app.errorhandler(Foo)
def handle_foo(e):
return str(e.whatever)
@app.route('/')
def index():
raise flask.abort(flask.redirect(flask.url_for('test')))
@app.route('/test')
def test():
raise Foo()
with app.test_client() as c:
rv = c.get('/')
assert rv.headers['Location'] == 'http://localhost/test'
rv = c.get('/test')
assert rv.data == b'42'
| bsd-3-clause | 2,837,938,003,989,210,600 | 22.951456 | 72 | 0.566275 | false |
mariansoban/ardupilot | libraries/AP_HAL_ChibiOS/hwdef/scripts/dma_parse.py | 44 | 2704 | #!/usr/bin/env python
'''
extra DMA mapping tables from a stm32 datasheet
This assumes a csv file extracted from the datasheet using tablula:
https://github.com/tabulapdf/tabula
'''
import sys, csv, os
def parse_dma_table(fname, table):
dma_num = 1
csvt = csv.reader(open(fname,'rb'))
i = 0
last_channel = -1
for row in csvt:
if len(row) > 1 and row[1].startswith('Channel '):
row = row[1:]
if not row[0].startswith('Channel '):
continue
channel = int(row[0].split(' ')[1])
if channel < last_channel:
dma_num += 1
last_channel = channel
for stream in range(8):
s = row[stream+1]
s = s.replace('_\r', '_')
s = s.replace('\r_', '_')
if s == '-':
continue
keys = s.split()
for k in keys:
brace = k.find('(')
if brace != -1:
k = k[:brace]
if k not in table:
table[k] = []
table[k] += [(dma_num, stream, channel)]
def error(str):
'''show an error and exit'''
print("Error: " + str)
sys.exit(1)
def check_full_table(table):
'''check the table is not missing rows or columns
we should have at least one entry in every row and one entry in every colum of each dma table
'''
stream_mask = [0,0]
channel_mask = [0,0]
for k in table:
for v in table[k]:
(engine,stream,channel) = v
if engine > 2 or engine < 1:
error("Bad entry for %s: %s" % (k, v))
stream_mask[engine-1] |= 1<<stream
channel_mask[engine-1] |= 1<<channel
for i in range(2):
for c in range(8):
if not ((1<<c) & channel_mask[i]):
error("Missing channel %u for dma table %u" % (c, i))
if not ((1<<c) & stream_mask[i]):
error("Missing stream %u for dma table %u" % (c, i))
table = {}
if len(sys.argv) != 2:
print("Error: expected a CSV files and output file")
sys.exit(1)
parse_dma_table(sys.argv[1], table)
check_full_table(table)
sys.stdout.write("DMA_Map = {\n");
sys.stdout.write('\t# format is (DMA_TABLE, StreamNum, Channel)\n')
sys.stdout.write('\t# extracted from %s\n' % os.path.basename(sys.argv[1]))
for k in sorted(table.iterkeys()):
s = '"%s"' % k
sys.stdout.write('\t%-10s\t:\t[' % s)
for i in range(len(table[k])):
sys.stdout.write("(%u,%u,%u)" % (table[k][i][0], table[k][i][1], table[k][i][2]))
if i < len(table[k])-1:
sys.stdout.write(",")
sys.stdout.write("],\n")
sys.stdout.write("}\n");
| gpl-3.0 | 5,826,547,215,438,189,000 | 29.382022 | 100 | 0.514423 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.