repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
PhonologicalCorpusTools/PyAnnotationGraph | polyglotdb/query/base/query.py | 1 | 15416 | from .results import BaseQueryResults
from .func import Count
from ..base.helper import key_for_cypher, value_for_cypher
class BaseQuery(object):
query_template = '''{match}
{where}
{optional_match}
{with}
{return}'''
delete_template = '''DETACH DELETE {alias}'''
aggregate_template = '''RETURN {aggregates}{order_by}'''
distinct_template = '''RETURN {columns}{order_by}{offset}{limit}'''
set_label_template = '''{alias} {value}'''
remove_label_template = '''{alias}{value}'''
set_property_template = '''{alias}.{attribute} = {value}'''
def __init__(self, corpus, to_find):
self.corpus = corpus
self.to_find = to_find
self._criterion = []
self._columns = []
self._hidden_columns = []
self._order_by = []
self._group_by = []
self._aggregate = []
self._preload = []
self._cache = []
self._delete = False
self._set_labels = []
self._remove_labels = []
self._set_properties = {}
self._limit = None
self._offset = None
self.call_back = None
self.stop_check = None
def cache(self):
raise NotImplementedError
def required_nodes(self):
ns = {self.to_find}
tf_type = type(self.to_find)
for c in self._criterion:
ns.update(x for x in c.nodes if type(x) is not tf_type)
for c in self._columns + self._hidden_columns + self._aggregate + self._preload + self._cache:
ns.update(x for x in c.nodes if type(x) is not tf_type and x.non_optional)
for c, _ in self._order_by:
ns.update(x for x in c.nodes if type(x) is not tf_type and x.non_optional)
return ns
def optional_nodes(self):
required_nodes = self.required_nodes()
ns = set()
tf_type = type(self.to_find)
for c in self._columns + self._aggregate + self._preload + self._cache:
ns.update(x for x in c.nodes if type(x) is not tf_type and x not in required_nodes)
for c, _ in self._order_by:
ns.update(x for x in c.nodes if type(x) is not tf_type and x not in required_nodes)
return sorted(ns)
def clear_columns(self):
"""
Remove any columns specified. The default columns for any query
are the id of the token and the label of the type.
"""
self._columns = []
return self
def offset(self, number):
self._offset = number
return self
def filter(self, *args):
"""
Apply one or more filters to a query.
"""
from .elements import EqualClauseElement
for a in args:
for c in self._criterion:
if isinstance(c, EqualClauseElement) and isinstance(a, EqualClauseElement) and \
c.attribute.node == a.attribute.node and c.attribute.label == a.attribute.label:
c.value = a.value
break
else:
self._criterion.append(a)
return self
def columns(self, *args):
"""
Add one or more additional columns to the results.
Columns should be :class:`~polyglotdb.query.base.Attribute` objects.
"""
column_set = set(self._columns)
for c in args:
if c in column_set:
continue
else:
self._columns.append(c)
# column_set.add(c) # FIXME failing tests
return self
def group_by(self, *args):
"""
Specify one or more fields for how aggregates should be grouped.
"""
self._group_by.extend(args)
return self
def order_by(self, field, descending=False):
"""
Specify how the results of the query should be ordered.
Parameters
----------
field : Attribute
Determines what the ordering should be based on
descending : bool, defaults to False
Whether the order should be descending
"""
self._order_by.append((field, descending))
return self
def to_csv(self, path):
"""
Same as ``all``, but the results of the query are output to the
specified path as a CSV file.
"""
results = self.all()
if self.stop_check is not None and self.stop_check():
return
results.to_csv(path)
def count(self):
"""
Returns the number of rows in the query.
"""
self._aggregate = [Count()]
cypher = self.cypher()
value = self.corpus.execute_cypher(cypher, **self.cypher_params())
self._aggregate = []
return value.single().values()[0]
def aggregate(self, *args):
"""
Aggregate the results of the query by a grouping factor or overall.
Not specifying a ``group_by`` in the query will result in a single
result for the aggregate from the whole query.
"""
self._aggregate.extend(args)
cypher = self.cypher()
value = self.corpus.execute_cypher(cypher, **self.cypher_params())
if self._group_by or any(not x.collapsing for x in self._aggregate):
return list(value)
elif len(self._aggregate) > 1:
return list(value)[0]
else:
return value.single().values()[0]
def preload(self, *args):
self._preload.extend(args)
return self
def limit(self, limit):
""" sets object limit to parameter limit """
self._limit = limit
return self
def to_json(self):
data = {'corpus_name': self.corpus.corpus_name,
'filters': [x.for_json() for x in self._criterion],
'columns': [x.for_json() for x in self._columns]}
return data
def cypher(self):
"""
Generates a Cypher statement based on the query.
"""
kwargs = {'match': '',
'optional_match': '',
'where': '',
'with': '',
'return': ''}
# generate initial match strings
match_strings = set()
withs = set()
nodes = self.required_nodes()
for node in nodes:
if node.has_subquery:
continue
match_strings.add(node.for_match())
withs.update(node.withs)
kwargs['match'] = 'MATCH ' + ',\n'.join(match_strings)
# generate main filters
properties = []
for c in self._criterion:
if c.in_subquery:
continue
properties.append(c.for_cypher())
if properties:
kwargs['where'] += 'WHERE ' + '\nAND '.join(properties)
optional_nodes = self.optional_nodes()
optional_match_strings = []
for node in optional_nodes:
if node.has_subquery:
continue
optional_match_strings.append(node.for_match())
withs.update(node.withs)
if optional_match_strings:
s = ''
for i, o in enumerate(optional_match_strings):
s += 'OPTIONAL MATCH ' + o + '\n'
kwargs['optional_match'] = s
# generate subqueries
with_statements = ['WITH ' + ', '.join(withs)]
for node in nodes:
if not node.has_subquery:
continue
statement = node.subquery(withs, self._criterion)
with_statements.append(statement)
withs.update(node.withs)
for node in optional_nodes:
if not node.has_subquery:
continue
statement = node.subquery(withs, self._criterion, optional=True)
with_statements.append(statement)
withs.update(node.withs)
kwargs['with'] = '\n'.join(with_statements)
kwargs['return'] = self.generate_return()
cypher = self.query_template.format(**kwargs)
return cypher
def create_subset(self, label):
self._set_labels.append(label)
self.corpus.execute_cypher(self.cypher(), **self.cypher_params())
self._set_labels = []
def remove_subset(self, label):
self._remove_labels.append(label)
self.corpus.execute_cypher(self.cypher(), **self.cypher_params())
self._remove_labels = []
def delete(self):
"""
Remove the results of a query from the graph. CAUTION: this is
irreversible.
"""
self._delete = True
self.corpus.execute_cypher(self.cypher(), **self.cypher_params())
def set_properties(self, **kwargs):
self._set_properties = {k: v for k,v in kwargs.items()}
print(self.cypher(), self.cypher_params())
self.corpus.execute_cypher(self.cypher(), **self.cypher_params())
self._set_properties = {}
def all(self):
return BaseQueryResults(self)
def get(self):
r = BaseQueryResults(self)
if len(r) > 1:
raise Exception("Can't use get on query with more than one result.")
return r[0]
def cypher_params(self):
from ..base.complex import ComplexClause
from ..base.elements import SubsetClauseElement, NotSubsetClauseElement
from ..base.attributes import NodeAttribute
params = {}
for c in self._criterion:
if isinstance(c, ComplexClause):
params.update(c.generate_params())
elif isinstance(c, (SubsetClauseElement, NotSubsetClauseElement)):
pass
else:
try:
if not isinstance(c.value, NodeAttribute):
params[c.cypher_value_string()[1:-1].replace('`', '')] = c.value
except AttributeError:
pass
return params
def generate_return(self):
"""
Generates final statement from query object, calling whichever one of the other generate statements is specified in the query obj
Parameters
----------
query : :class: `~polyglotdb.graph.GraphQuery`
a query object
Returns
-------
str
cypher formatted string
"""
if self._delete:
statement = self._generate_delete_return()
elif self._cache:
statement = self._generate_cache_return()
elif self._set_properties:
statement = self._generate_set_properties_return()
elif self._set_labels:
statement = self._generate_set_labels_return()
elif self._remove_labels:
statement = self._generate_remove_labels_return()
elif self._aggregate:
statement = self._generate_aggregate_return()
else:
statement = self._generate_distinct_return()
return statement
def _generate_delete_return(self):
kwargs = {}
kwargs['alias'] = self.to_find.alias
return_statement = self.delete_template.format(**kwargs)
return return_statement
def _generate_cache_return(self):
properties = []
for c in self._cache:
kwargs = {'alias': c.node.cache_alias,
'attribute': c.output_alias,
'value': c.for_cypher()
}
if c.label == 'position':
kwargs['alias'] = self.to_find.alias
set_string = self.set_property_template.format(**kwargs)
properties.append(set_string)
return 'SET {}'.format(', '.join(properties))
def _generate_remove_labels_return(self):
remove_label_strings = []
kwargs = {}
kwargs['alias'] = self.to_find.alias
kwargs['value'] = ':' + ':'.join(map(key_for_cypher, self._remove_labels))
remove_label_strings.append(self.remove_label_template.format(**kwargs))
return_statement = ''
if remove_label_strings:
if return_statement:
return_statement += '\nWITH {alias}\n'.format(alias=self.to_find.alias)
return_statement += '\nREMOVE ' + ', '.join(remove_label_strings)
return return_statement
def _generate_set_properties_return(self):
set_strings = []
for k, v in self._set_properties.items():
if v is None:
v = 'NULL'
else:
v = value_for_cypher(v)
s = self.set_property_template.format(alias=self.to_find.alias, attribute=k, value=v)
set_strings.append(s)
return 'SET ' + ', '.join(set_strings)
def _generate_set_labels_return(self):
set_label_strings = []
kwargs = {}
kwargs['alias'] = self.to_find.alias
kwargs['value'] = ':' + ':'.join(map(key_for_cypher, self._set_labels))
set_label_strings.append(self.set_label_template.format(**kwargs))
return 'SET ' + ', '.join(set_label_strings)
def _generate_aggregate_return(self):
kwargs = {'order_by': self._generate_order_by(),
'limit': self._generate_limit()}
properties = []
for g in self._group_by:
properties.append(g.aliased_for_output())
if any(not x.collapsing for x in self._aggregate):
for c in self._columns:
properties.append(c.aliased_for_output())
if len(self._order_by) == 0 and len(self._group_by) > 0:
self._order_by.append((self._group_by[0], False))
for a in self._aggregate:
properties.append(a.aliased_for_output())
kwargs['aggregates'] = ', '.join(properties)
return self.aggregate_template.format(**kwargs)
def _generate_distinct_return(self):
kwargs = {'order_by': self._generate_order_by(),
'limit': self._generate_limit(),
'offset': self._generate_offset()}
properties = []
for c in self._columns + self._hidden_columns:
properties.append(c.aliased_for_output())
if not properties:
properties = self.to_find.withs
for a in self._preload:
properties.extend(a.withs)
kwargs['columns'] = ', '.join(properties)
return self.distinct_template.format(**kwargs)
def _generate_limit(self):
if self._limit is not None:
return '\nLIMIT {}'.format(self._limit)
return ''
def _generate_offset(self):
if self._offset is not None:
return '\nSKIP {}'.format(self._offset)
return ''
def _generate_order_by(self):
properties = []
for c in self._order_by:
ac_set = set(self._columns)
gb_set = set(self._group_by)
h_c = hash(c[0])
for col in ac_set:
if h_c == hash(col):
element = col.for_cypher()
break
else:
for col in gb_set:
if h_c == hash(col):
element = col.for_cypher()
break
else:
element = c[0].for_cypher()
# query.columns(c[0])
if c[1]:
element += ' DESC'
properties.append(element)
if properties:
return '\nORDER BY ' + ', '.join(properties)
return ''
| mit | 7,027,644,700,880,472,000 | 32.733042 | 137 | 0.54411 | false |
tigersirvine/occtigerscricket | django/core/management/commands/reset.py | 78 | 2540 | from optparse import make_option
from django.core.management.base import AppCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_reset
from django.db import connections, transaction, DEFAULT_DB_ALIAS
class Command(AppCommand):
option_list = AppCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to reset. '
'Defaults to the "default" database.'),
)
help = "Executes ``sqlreset`` for the given app(s) in the current database."
args = '[appname ...]'
output_transaction = True
def handle_app(self, app, **options):
# This command breaks a lot and should be deprecated
import warnings
warnings.warn(
'This command has been deprecated. The command ``flush`` can be used to delete everything. You can also use ALTER TABLE or DROP TABLE statements manually.',
DeprecationWarning
)
using = options.get('database')
connection = connections[using]
app_name = app.__name__.split('.')[-2]
self.style = no_style()
sql_list = sql_reset(app, self.style, connection)
if options.get('interactive'):
confirm = raw_input("""
You have requested a database reset.
This will IRREVERSIBLY DESTROY any data for
the "%s" application in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (app_name, connection.settings_dict['NAME']))
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed()
raise CommandError("""Error: %s couldn't be reset. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlreset %s'. That's the SQL this command wasn't able to run.
The full error: %s""" % (app_name, app_name, e))
transaction.commit_unless_managed()
else:
print "Reset cancelled."
| bsd-3-clause | -446,280,103,695,910,100 | 39.967742 | 168 | 0.633858 | false |
sheadovas/tools | misc/plotter.py | 1 | 1780 | #!/usr/bin/python
# created by shead
import sys
import numpy as np
import matplotlib.pyplot as plt
import pylab
"""
USAGE
============
./plotter.py [log]
./plotter.py my_log.log
REQUIRED DEPENDENCIES
============
* Python2
* Matplot http://matplotlib.org/users/installing.html
FILE FORMAT
============
[iteration] [amount_of_cmp] [amount_of_swaps]
...
EXAMPLE FILE
============
10 1 2
20 30 121
"""
def load_data_from_file(filename, data_size, data_cmp, data_swp):
with open(filename, 'r') as f:
for line in f:
raw = line.split()
data_size.append(int(raw[0]))
data_cmp.append(int(raw[1]))
data_swp.append(int(raw[2]))
# func from docs
def autolabel(rects, ax):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2.0, 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
def main(argv):
if len(argv) != 2:
print 'USAGE: plotter [path_to_log]'
sys.exit(1)
data_size = []
data_cmp = []
data_swp = []
load_data_from_file(argv[1], data_size, data_cmp, data_swp)
# plot
N = len(data_size)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, data_cmp, width, color='r')
rects2 = ax.bar(ind + width, data_swp, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Values')
title = argv[1].split('.')[0]
ax.set_title(title)
#ax.set_xticks(ind + width)
#x.set_xticklabels(data_size)
ax.legend((rects1[0], rects2[0]), ('cmp', 'swp'))
#autolabel(rects1, ax)
#autolabel(rects2, ax)
fname = '%s.png' % (title)
pylab.savefig(fname, dpi=333)
print 'Saved to %s' % fname
if __name__ == "__main__":
main(sys.argv) | mit | -7,161,643,909,423,687,000 | 15.64486 | 65 | 0.620225 | false |
compmech/meshless | meshless/espim/plate2d_add_k0s.py | 1 | 1946 | from meshless.espim.plate2d_add_k0s_cell_based import add_k0s as add_k0s_cell
from meshless.espim.plate2d_add_k0s_cell_based_no_smoothing import add_k0s as add_k0s_cell_no_smoothing
from meshless.espim.plate2d_add_k0s_edge_based import add_k0s as add_k0s_edge
def add_k0s(k0, mesh, prop_from_node, method='cell-based', alpha=0.08,
maxl_from_area=False):
"""Add the transverse shear stiffness to an existing consitutive stiffness
matrix
The transverse shear stiffness is computed using the Discrete Shear Gap
method, with a correction that uses parameter `alpha`
Parameters
----------
k0 : (N, N) array-like
Existing stiffness matrix. This object is modified in-place
mesh : :class:`pyNastran.bdf.BDF` object
The object must have the proper edge references as those returned by
:func:`.read_mesh` or :func:`.read_delaunay`
prop_from_node : bool
If the constitutive properties are assigned per node. Otherwise they
are considered assigned per element
method : str, optional
The smoothing method for the transverse shear
alpha : float, optional
Positive constant used in the correction applied to the transverse
shear stiffness
maxl_from_area : bool, optional
If maxl, used in Lyly`s formula, should be sqrt(area). It uses the
maximum edge length otherwise.
"""
#alpha between 0. and 0.6, according to studies of Lyly et al.
if method == 'cell-based':
return add_k0s_cell(k0, mesh, prop_from_node, alpha=alpha, maxl_from_area=maxl_from_area)
elif method == 'cell-based-no-smoothing':
return add_k0s_cell_no_smoothing(k0, mesh, prop_from_node, alpha=alpha, maxl_from_area=maxl_from_area)
elif method == 'edge-based':
return add_k0s_edge(k0, mesh, prop_from_node, alpha=alpha, maxl_from_area=maxl_from_area)
else:
raise ValueError('Invalid method')
| bsd-2-clause | -908,895,335,671,644,200 | 40.404255 | 110 | 0.692703 | false |
mrunge/horizon | openstack_dashboard/dashboards/admin/info/tables.py | 11 | 6531 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from horizon.utils import filters as utils_filters
SERVICE_ENABLED = "enabled"
SERVICE_DISABLED = "disabled"
SERVICE_STATUS_DISPLAY_CHOICES = (
(SERVICE_ENABLED, _("Enabled")),
(SERVICE_DISABLED, _("Disabled")),
)
class ServiceFilterAction(tables.FilterAction):
filter_field = 'type'
def filter(self, table, services, filter_string):
q = filter_string.lower()
def comp(service):
attr = getattr(service, self.filter_field, '')
if attr is not None and q in attr.lower():
return True
return False
return filter(comp, services)
class SubServiceFilterAction(ServiceFilterAction):
filter_field = 'binary'
def get_stats(service):
return template.loader.render_to_string('admin/services/_stats.html',
{'service': service})
def get_status(service):
# if not configured in this region, neither option makes sense
if service.host:
return SERVICE_ENABLED if not service.disabled else SERVICE_DISABLED
return None
class ServicesTable(tables.DataTable):
id = tables.Column('id', hidden=True)
name = tables.Column("name", verbose_name=_('Name'))
service_type = tables.Column('__unicode__', verbose_name=_('Service'))
host = tables.Column('host', verbose_name=_('Host'))
status = tables.Column(get_status,
verbose_name=_('Status'),
status=True,
display_choices=SERVICE_STATUS_DISPLAY_CHOICES)
class Meta:
name = "services"
verbose_name = _("Services")
table_actions = (ServiceFilterAction,)
multi_select = False
status_columns = ["status"]
def get_available(zone):
return zone.zoneState['available']
def get_nova_agent_status(agent):
template_name = 'admin/info/_cell_status.html'
context = {
'status': agent.status,
'disabled_reason': agent.disabled_reason
}
return template.loader.render_to_string(template_name, context)
class NovaServicesTable(tables.DataTable):
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
zone = tables.Column('zone', verbose_name=_('Zone'))
status = tables.Column(get_nova_agent_status, verbose_name=_('Status'))
state = tables.Column('state', verbose_name=_('State'),
filters=(filters.title,))
updated_at = tables.Column('updated_at',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s-%s" % (obj.binary, obj.host, obj.zone)
class Meta:
name = "nova_services"
verbose_name = _("Compute Services")
table_actions = (SubServiceFilterAction,)
multi_select = False
class CinderServicesTable(tables.DataTable):
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
zone = tables.Column('zone', verbose_name=_('Zone'))
status = tables.Column('status', verbose_name=_('Status'),
filters=(filters.title, ))
state = tables.Column('state', verbose_name=_('State'),
filters=(filters.title, ))
updated_at = tables.Column('updated_at',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s-%s" % (obj.binary, obj.host, obj.zone)
class Meta:
name = "cinder_services"
verbose_name = _("Block Storage Services")
table_actions = (SubServiceFilterAction,)
multi_select = False
class NetworkAgentsFilterAction(tables.FilterAction):
def filter(self, table, agents, filter_string):
q = filter_string.lower()
def comp(agent):
if q in agent.agent_type.lower():
return True
return False
return filter(comp, agents)
def get_network_agent_status(agent):
if agent.admin_state_up:
return _('Enabled')
return _('Disabled')
def get_network_agent_state(agent):
if agent.alive:
return _('Up')
return _('Down')
class NetworkAgentsTable(tables.DataTable):
agent_type = tables.Column('agent_type', verbose_name=_('Type'))
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
status = tables.Column(get_network_agent_status, verbose_name=_('Status'))
state = tables.Column(get_network_agent_state, verbose_name=_('State'))
heartbeat_timestamp = tables.Column('heartbeat_timestamp',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s" % (obj.binary, obj.host)
class Meta:
name = "network_agents"
verbose_name = _("Network Agents")
table_actions = (NetworkAgentsFilterAction,)
multi_select = False
| apache-2.0 | -279,604,993,973,928,320 | 33.739362 | 78 | 0.593324 | false |
dcosentino/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_modulestore_settings.py | 41 | 8713 | """
Tests for testing the modulestore settings migration code.
"""
import copy
import ddt
from tempfile import mkdtemp
from unittest import TestCase
from xmodule.modulestore.modulestore_settings import (
convert_module_store_setting_if_needed,
update_module_store_settings,
get_mixed_stores,
)
@ddt.ddt
class ModuleStoreSettingsMigration(TestCase):
"""
Tests for the migration code for the module store settings
"""
OLD_CONFIG = {
"default": {
"ENGINE": "xmodule.modulestore.xml.XMLModuleStore",
"OPTIONS": {
"data_dir": "directory",
"default_class": "xmodule.hidden_module.HiddenDescriptor",
},
"DOC_STORE_CONFIG": {},
}
}
OLD_CONFIG_WITH_DIRECT_MONGO = {
"default": {
"ENGINE": "xmodule.modulestore.mongo.MongoModuleStore",
"OPTIONS": {
"collection": "modulestore",
"db": "edxapp",
"default_class": "xmodule.hidden_module.HiddenDescriptor",
"fs_root": mkdtemp(),
"host": "localhost",
"password": "password",
"port": 27017,
"render_template": "edxmako.shortcuts.render_to_string",
"user": "edxapp"
},
"DOC_STORE_CONFIG": {},
}
}
OLD_MIXED_CONFIG_WITH_DICT = {
"default": {
"ENGINE": "xmodule.modulestore.mixed.MixedModuleStore",
"OPTIONS": {
"mappings": {},
"stores": {
"an_old_mongo_store": {
"DOC_STORE_CONFIG": {},
"ENGINE": "xmodule.modulestore.mongo.MongoModuleStore",
"OPTIONS": {
"collection": "modulestore",
"db": "test",
"default_class": "xmodule.hidden_module.HiddenDescriptor",
}
},
"default": {
"ENGINE": "the_default_store",
"OPTIONS": {
"option1": "value1",
"option2": "value2"
},
"DOC_STORE_CONFIG": {}
},
"xml": {
"ENGINE": "xmodule.modulestore.xml.XMLModuleStore",
"OPTIONS": {
"data_dir": "directory",
"default_class": "xmodule.hidden_module.HiddenDescriptor"
},
"DOC_STORE_CONFIG": {}
}
}
}
}
}
ALREADY_UPDATED_MIXED_CONFIG = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'stores': [
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': {},
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': "fs_root",
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.draft.DraftModuleStore',
'DOC_STORE_CONFIG': {},
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': "fs_root",
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
]
}
}
}
def assertStoreValuesEqual(self, store_setting1, store_setting2):
"""
Tests whether the fields in the given store_settings are equal.
"""
store_fields = ["OPTIONS", "DOC_STORE_CONFIG"]
for field in store_fields:
self.assertEqual(store_setting1[field], store_setting2[field])
def assertMigrated(self, old_setting):
"""
Migrates the given setting and checks whether it correctly converted
to an ordered list of stores within Mixed.
"""
# pass a copy of the old setting since the migration modifies the given setting
new_mixed_setting = convert_module_store_setting_if_needed(copy.deepcopy(old_setting))
# check whether the configuration is encapsulated within Mixed.
self.assertEqual(new_mixed_setting["default"]["ENGINE"], "xmodule.modulestore.mixed.MixedModuleStore")
# check whether the stores are in an ordered list
new_stores = get_mixed_stores(new_mixed_setting)
self.assertIsInstance(new_stores, list)
return new_mixed_setting, new_stores[0]
def is_split_configured(self, mixed_setting):
"""
Tests whether the split module store is configured in the given setting.
"""
stores = get_mixed_stores(mixed_setting)
split_settings = [store for store in stores if store['ENGINE'].endswith('.DraftVersioningModuleStore')]
if len(split_settings):
# there should only be one setting for split
self.assertEquals(len(split_settings), 1)
# verify name
self.assertEquals(split_settings[0]['NAME'], 'split')
# verify split config settings equal those of mongo
self.assertStoreValuesEqual(
split_settings[0],
next((store for store in stores if 'DraftModuleStore' in store['ENGINE']), None)
)
return len(split_settings) > 0
def test_convert_into_mixed(self):
old_setting = self.OLD_CONFIG
new_mixed_setting, new_default_store_setting = self.assertMigrated(old_setting)
self.assertStoreValuesEqual(new_default_store_setting, old_setting["default"])
self.assertEqual(new_default_store_setting["ENGINE"], old_setting["default"]["ENGINE"])
self.assertFalse(self.is_split_configured(new_mixed_setting))
def test_convert_from_old_mongo_to_draft_store(self):
old_setting = self.OLD_CONFIG_WITH_DIRECT_MONGO
new_mixed_setting, new_default_store_setting = self.assertMigrated(old_setting)
self.assertStoreValuesEqual(new_default_store_setting, old_setting["default"])
self.assertEqual(new_default_store_setting["ENGINE"], "xmodule.modulestore.mongo.draft.DraftModuleStore")
self.assertTrue(self.is_split_configured(new_mixed_setting))
def test_convert_from_dict_to_list(self):
old_mixed_setting = self.OLD_MIXED_CONFIG_WITH_DICT
new_mixed_setting, new_default_store_setting = self.assertMigrated(old_mixed_setting)
self.assertEqual(new_default_store_setting["ENGINE"], "the_default_store")
self.assertTrue(self.is_split_configured(new_mixed_setting))
# exclude split when comparing old and new, since split was added as part of the migration
new_stores = [store for store in get_mixed_stores(new_mixed_setting) if store['NAME'] != 'split']
old_stores = get_mixed_stores(self.OLD_MIXED_CONFIG_WITH_DICT)
# compare each store configured in mixed
self.assertEqual(len(new_stores), len(old_stores))
for new_store in new_stores:
self.assertStoreValuesEqual(new_store, old_stores[new_store['NAME']])
def test_no_conversion(self):
# make sure there is no migration done on an already updated config
old_mixed_setting = self.ALREADY_UPDATED_MIXED_CONFIG
new_mixed_setting, new_default_store_setting = self.assertMigrated(old_mixed_setting)
self.assertTrue(self.is_split_configured(new_mixed_setting))
self.assertEquals(old_mixed_setting, new_mixed_setting)
@ddt.data('draft', 'split')
def test_update_settings(self, default_store):
mixed_setting = self.ALREADY_UPDATED_MIXED_CONFIG
update_module_store_settings(mixed_setting, default_store=default_store)
self.assertTrue(get_mixed_stores(mixed_setting)[0]['NAME'] == default_store)
def test_update_settings_error(self):
mixed_setting = self.ALREADY_UPDATED_MIXED_CONFIG
with self.assertRaises(Exception):
update_module_store_settings(mixed_setting, default_store='non-existent store')
| agpl-3.0 | 4,642,001,015,288,036,000 | 41.296117 | 113 | 0.555951 | false |
kunthar/hustle | test/test_pipeline.py | 3 | 6724 | import unittest
from hustle.core.column_fn import ip_ntoa
from hustle.core.pipeline import SelectPipe, _get_sort_range
from hustle.core.marble import Marble
from operator import itemgetter
EMP_FIELDS = ("+@2id", "+$name", "+%2hire_date", "+@4salary", "+@2department_id")
DEPT_FIELDS = ("+@2id", "+%2name", "+%2building", "+@2manager_id")
def first_items(items):
first = itemgetter(0)
return [first(item) for item in items]
def second_items(items):
first = itemgetter(1)
return [first(item) for item in items]
class TestPipeline(unittest.TestCase):
def setUp(self):
self.emp = Marble(name="employee",
fields=EMP_FIELDS)
self.dept = Marble(name="department",
fields=DEPT_FIELDS)
def test_get_key_names(self):
wheres = [(self.emp.salary > 25000), self.dept]
project = [self.emp.name, self.emp.salary, self.dept.building]
pipe = SelectPipe('server', wheres=wheres, project=project)
self.assertTupleEqual(('name', 'salary', None), tuple(first_items(pipe._get_key_names(project, ())[0])))
self.assertTupleEqual((None, None, 'building'), tuple(first_items(pipe._get_key_names(project, ())[1])))
join = [self.dept.id, self.emp.department_id]
pipe = SelectPipe('server', wheres=wheres, project=project, join=join)
self.assertTupleEqual(('department_id', 'name', 'salary', None), tuple(first_items(pipe._get_key_names(project, join)[0])))
self.assertTupleEqual(('id', None, None, 'building'), tuple(first_items(pipe._get_key_names(project, join)[1])))
project = [self.dept.building, self.emp.name, self.emp.salary]
pipe = SelectPipe('server', wheres=wheres, project=project, join=join)
self.assertTupleEqual(('department_id', None, 'name', 'salary'), tuple(first_items(pipe._get_key_names(project, join)[0])))
self.assertTupleEqual(('id', 'building', None, None), tuple(first_items(pipe._get_key_names(project, join)[1])))
def test_get_key_names_with_column_fn(self):
wheres = [(self.emp.salary > 25000), self.dept]
project = [self.emp.name, ip_ntoa(self.emp.salary), self.dept.building]
pipe = SelectPipe('server', wheres=wheres, project=project)
self.assertTupleEqual((None, None, None), tuple(second_items(pipe._get_key_names(project, ())[1])))
join = [self.dept.id, self.emp.department_id]
pipe = SelectPipe('server', wheres=wheres, project=project, join=join)
self.assertTupleEqual((None, None, None, None), tuple(second_items(pipe._get_key_names(project, join)[1])))
def test_get_sort_range(self):
project = [self.emp.name, self.emp.salary, self.dept.building]
order_by = []
# first case is with an empty order_by, it should sort by all columns
sort_range = _get_sort_range(2, project, order_by)
self.assertTupleEqual(tuple(sort_range), (2, 3, 4))
sort_range = _get_sort_range(0, project, order_by)
self.assertTupleEqual(tuple(sort_range), (0, 1, 2))
# test with a specified order_by, note that we should always be sorting all columns - the order_by
# just specifies the order. The unspecified columns are not in a defined order.
order_by = [self.emp.salary]
sort_range = _get_sort_range(2, project, order_by)
self.assertEqual(len(sort_range), 3)
self.assertEqual(sort_range[0], 3)
order_by = [self.dept.building, self.emp.name]
sort_range = _get_sort_range(1, project, order_by)
self.assertEqual(len(sort_range), 3)
self.assertTupleEqual(sort_range[:2], (3, 1))
def test_get_pipeline(self):
wheres = [(self.emp.salary > 25000), self.dept]
project = [self.emp.name, self.emp.salary, self.dept.building]
pipe = SelectPipe('server',
wheres=wheres,
project=project)
#(SPLIT, HustleStage('restrict-project',
# process=partial(process_restrict, jobobj=job),
# input_chain=[partial(hustle_stream, jobobj=job)]))
pipeline = pipe.pipeline
self.assertEqual(len(pipeline), 1)
self.assertEqual('split', pipeline[0][0])
self.assertEqual('restrict-select', pipeline[0][1].name)
order_by = [self.dept.building, self.emp.name]
pipe = SelectPipe('server',
wheres=wheres,
project=project,
order_by=order_by)
#(SPLIT, HustleStage('restrict-project',
# process=partial(process_restrict, jobobj=job),
# input_chain=[partial(hustle_stream, jobobj=job)])),
#(GROUP_LABEL, HustleStage('order',
# process=partial(process_order, jobobj=job, distinct=job.distinct),
# sort=sort_range))]
pipeline = pipe.pipeline
self.assertEqual(len(pipeline), 3)
self.assertEqual('split', pipeline[0][0])
self.assertEqual('group_node_label', pipeline[1][0])
self.assertEqual('order-combine', pipeline[1][1].name)
order_by = [self.dept.building, self.emp.name]
join = [self.dept.id, self.emp.department_id]
pipe = SelectPipe('server',
wheres=wheres,
project=project,
order_by=order_by,
join=join)
pipeline = pipe.pipeline
self.assertEqual(len(pipeline), 4)
self.assertEqual('split', pipeline[0][0])
self.assertEqual('group_label', pipeline[1][0])
self.assertEqual('join', pipeline[1][1].name)
self.assertEqual('group_all', pipeline[3][0])
self.assertEqual('order-reduce', pipeline[3][1].name)
def test_column_aliases_project(self):
wheres = [(self.emp.salary > 25000), self.dept]
project = [self.emp.name, self.emp.salary, self.dept.building, self.dept.name]
order_by = ['name', 'employee.salary', self.dept.building, 3]
join = [self.emp.name, self.dept.name]
pipe = SelectPipe('server',
wheres=wheres,
project=project,
order_by=order_by,
join=join)
self.assertEqual(len(pipe.order_by), 4)
self.assertEqual(pipe.order_by[0], self.emp.name)
self.assertEqual(pipe.order_by[1], self.emp.salary)
self.assertEqual(pipe.order_by[2], self.dept.building)
self.assertEqual(pipe.order_by[0], self.dept.name)
| mit | -6,208,428,895,704,928,000 | 45.694444 | 131 | 0.594438 | false |
joshbruning/selenium | py/selenium/webdriver/phantomjs/service.py | 53 | 2587 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tempfile
from selenium.webdriver.common import service
class Service(service.Service):
"""
Object that manages the starting and stopping of PhantomJS / Ghostdriver
"""
def __init__(self, executable_path, port=0, service_args=None, log_path=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to PhantomJS binary
- port : Port the service is running on
- service_args : A List of other command line options to pass to PhantomJS
- log_path: Path for PhantomJS service to log to
"""
self.service_args = service_args
if self.service_args is None:
self.service_args = []
else:
self.service_args = service_args[:]
if not log_path:
log_path = "ghostdriver.log"
if not self._args_contain("--cookies-file="):
self._cookie_temp_file_handle, self._cookie_temp_file = tempfile.mkstemp()
self.service_args.append("--cookies-file=" + self._cookie_temp_file)
else:
self._cookie_temp_file = None
service.Service.__init__(self, executable_path, port=port, log_file=open(log_path, 'w'))
def _args_contain(self, arg):
return len(list(filter(lambda x: x.startswith(arg), self.service_args))) > 0
def command_line_args(self):
return self.service_args + ["--webdriver=%d" % self.port]
@property
def service_url(self):
"""
Gets the url of the GhostDriver Service
"""
return "http://localhost:%d/wd/hub" % self.port
def send_remote_shutdown_command(self):
if self._cookie_temp_file:
os.close(self._cookie_temp_file_handle)
os.remove(self._cookie_temp_file)
| apache-2.0 | 7,433,826,115,159,434,000 | 37.044118 | 96 | 0.657132 | false |
dushu1203/chromium.src | tools/perf/page_sets/simple_mobile_sites.py | 9 | 1794 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class SimplePage(page_module.Page):
def __init__(self, url, page_set):
super(SimplePage, self).__init__(
url=url,
page_set=page_set,
credentials_path='data/credentials.json')
self.archive_data_file = 'data/simple_mobile_sites.json'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
# TODO(epenner): Remove this wait (http://crbug.com/366933)
action_runner.Wait(5)
class SimpleScrollPage(SimplePage):
def __init__(self, url, page_set):
super(SimpleScrollPage, self).__init__(url=url, page_set=page_set)
def RunPageInteractions(self, action_runner):
# Make the scroll longer to reduce noise.
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage(direction='down', speed_in_pixels_per_second=300)
interaction.End()
class SimpleMobileSitesPageSet(page_set_module.PageSet):
""" Simple mobile sites """
def __init__(self):
super(SimpleMobileSitesPageSet, self).__init__(
user_agent_type='tablet_10_inch',
archive_data_file='data/simple_mobile_sites.json',
bucket=page_set_module.PUBLIC_BUCKET)
scroll_page_list = [
# Why: Scrolls moderately complex pages (up to 60 layers)
'http://www.ebay.co.uk/',
'https://www.flickr.com/',
'http://www.apple.com/mac/',
'http://www.nyc.gov',
'http://m.nytimes.com/'
]
for url in scroll_page_list:
self.AddUserStory(SimpleScrollPage(url, self))
| bsd-3-clause | -7,571,003,238,398,807,000 | 31.618182 | 78 | 0.683946 | false |
maxdeliso/elevatorSim | Lib/test/test_iterlen.py | 59 | 7747 | """ Test Iterator Length Transparency
Some functions or methods which accept general iterable arguments have
optional, more efficient code paths if they know how many items to expect.
For instance, map(func, iterable), will pre-allocate the exact amount of
space required whenever the iterable can report its length.
The desired invariant is: len(it)==len(list(it)).
A complication is that an iterable and iterator can be the same object. To
maintain the invariant, an iterator needs to dynamically update its length.
For instance, an iterable such as range(10) always reports its length as ten,
but it=iter(range(10)) starts at ten, and then goes to nine after next(it).
Having this capability means that map() can ignore the distinction between
map(func, iterable) and map(func, iter(iterable)).
When the iterable is immutable, the implementation can straight-forwardly
report the original length minus the cumulative number of calls to next().
This is the case for tuples, range objects, and itertools.repeat().
Some containers become temporarily immutable during iteration. This includes
dicts, sets, and collections.deque. Their implementation is equally simple
though they need to permanently set their length to zero whenever there is
an attempt to iterate after a length mutation.
The situation slightly more involved whenever an object allows length mutation
during iteration. Lists and sequence iterators are dynamically updatable.
So, if a list is extended during iteration, the iterator will continue through
the new items. If it shrinks to a point before the most recent iteration,
then no further items are available and the length is reported at zero.
Reversed objects can also be wrapped around mutable objects; however, any
appends after the current position are ignored. Any other approach leads
to confusion and possibly returning the same item more than once.
The iterators not listed above, such as enumerate and the other itertools,
are not length transparent because they have no way to distinguish between
iterables that report static length and iterators whose length changes with
each call (i.e. the difference between enumerate('abc') and
enumerate(iter('abc')).
"""
import unittest
from test import support
from itertools import repeat
from collections import deque
from builtins import len as _len
n = 10
def len(obj):
try:
return _len(obj)
except TypeError:
try:
# note: this is an internal undocumented API,
# don't rely on it in your own programs
return obj.__length_hint__()
except AttributeError:
raise TypeError
class TestInvariantWithoutMutations(unittest.TestCase):
def test_invariant(self):
it = self.it
for i in reversed(range(1, n+1)):
self.assertEqual(len(it), i)
next(it)
self.assertEqual(len(it), 0)
self.assertRaises(StopIteration, next, it)
self.assertEqual(len(it), 0)
class TestTemporarilyImmutable(TestInvariantWithoutMutations):
def test_immutable_during_iteration(self):
# objects such as deques, sets, and dictionaries enforce
# length immutability during iteration
it = self.it
self.assertEqual(len(it), n)
next(it)
self.assertEqual(len(it), n-1)
self.mutate()
self.assertRaises(RuntimeError, next, it)
self.assertEqual(len(it), 0)
## ------- Concrete Type Tests -------
class TestRepeat(TestInvariantWithoutMutations):
def setUp(self):
self.it = repeat(None, n)
def test_no_len_for_infinite_repeat(self):
# The repeat() object can also be infinite
self.assertRaises(TypeError, len, repeat(None))
class TestXrange(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(range(n))
class TestXrangeCustomReversed(TestInvariantWithoutMutations):
def setUp(self):
self.it = reversed(range(n))
class TestTuple(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(tuple(range(n)))
## ------- Types that should not be mutated during iteration -------
class TestDeque(TestTemporarilyImmutable):
def setUp(self):
d = deque(range(n))
self.it = iter(d)
self.mutate = d.pop
class TestDequeReversed(TestTemporarilyImmutable):
def setUp(self):
d = deque(range(n))
self.it = reversed(d)
self.mutate = d.pop
class TestDictKeys(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d)
self.mutate = d.popitem
class TestDictItems(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d.items())
self.mutate = d.popitem
class TestDictValues(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d.values())
self.mutate = d.popitem
class TestSet(TestTemporarilyImmutable):
def setUp(self):
d = set(range(n))
self.it = iter(d)
self.mutate = d.pop
## ------- Types that can mutate during iteration -------
class TestList(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(range(n))
def test_mutation(self):
d = list(range(n))
it = iter(d)
next(it)
next(it)
self.assertEqual(len(it), n-2)
d.append(n)
self.assertEqual(len(it), n-1) # grow with append
d[1:] = []
self.assertEqual(len(it), 0)
self.assertEqual(list(it), [])
d.extend(range(20))
self.assertEqual(len(it), 0)
class TestListReversed(TestInvariantWithoutMutations):
def setUp(self):
self.it = reversed(range(n))
def test_mutation(self):
d = list(range(n))
it = reversed(d)
next(it)
next(it)
self.assertEqual(len(it), n-2)
d.append(n)
self.assertEqual(len(it), n-2) # ignore append
d[1:] = []
self.assertEqual(len(it), 0)
self.assertEqual(list(it), []) # confirm invariant
d.extend(range(20))
self.assertEqual(len(it), 0)
## -- Check to make sure exceptions are not suppressed by __length_hint__()
class BadLen(object):
def __iter__(self): return iter(range(10))
def __len__(self):
raise RuntimeError('hello')
class BadLengthHint(object):
def __iter__(self): return iter(range(10))
def __length_hint__(self):
raise RuntimeError('hello')
class NoneLengthHint(object):
def __iter__(self): return iter(range(10))
def __length_hint__(self):
return None
class TestLengthHintExceptions(unittest.TestCase):
def test_issue1242657(self):
self.assertRaises(RuntimeError, list, BadLen())
self.assertRaises(RuntimeError, list, BadLengthHint())
self.assertRaises(RuntimeError, [].extend, BadLen())
self.assertRaises(RuntimeError, [].extend, BadLengthHint())
b = bytearray(range(10))
self.assertRaises(RuntimeError, b.extend, BadLen())
self.assertRaises(RuntimeError, b.extend, BadLengthHint())
def test_invalid_hint(self):
# Make sure an invalid result doesn't muck-up the works
self.assertEqual(list(NoneLengthHint()), list(range(10)))
def test_main():
unittests = [
TestRepeat,
TestXrange,
TestXrangeCustomReversed,
TestTuple,
TestDeque,
TestDequeReversed,
TestDictKeys,
TestDictItems,
TestDictValues,
TestSet,
TestList,
TestListReversed,
TestLengthHintExceptions,
]
support.run_unittest(*unittests)
if __name__ == "__main__":
test_main()
| bsd-2-clause | 7,146,434,548,694,675,000 | 29.864542 | 78 | 0.669291 | false |
jbrahy/capstone | bindings/python/test_skipdata.py | 31 | 2425 | #!/usr/bin/env python
# Capstone Python bindings, by Nguyen Anh Quynnh <[email protected]>
from __future__ import print_function
from capstone import *
import binascii
from xprint import to_x, to_hex, to_x_32
X86_CODE32 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\x00\x91\x92"
RANDOM_CODE = b"\xed\x00\x00\x00\x00\x1a\x5a\x0f\x1f\xff\xc2\x09\x80\x00\x00\x00\x07\xf7\xeb\x2a\xff\xff\x7f\x57\xe3\x01\xff\xff\x7f\x57\xeb\x00\xf0\x00\x00\x24\xb2\x4f\x00\x78"
all_tests = (
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32 (Intel syntax)", 0),
(CS_ARCH_ARM, CS_MODE_ARM, RANDOM_CODE, "Arm", 0),
)
# ## Test cs_disasm_quick()
def test_cs_disasm_quick():
for (arch, mode, code, comment, syntax) in all_tests:
print('*' * 40)
print("Platform: %s" % comment)
print("Disasm:"),
print(to_hex(code))
for insn in cs_disasm_quick(arch, mode, code, 0x1000):
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print
# Sample callback for SKIPDATA option
def testcb(buffer, size, offset, userdata):
# always skip 2 bytes of data
return 2
# ## Test class Cs
def test_class():
for (arch, mode, code, comment, syntax) in all_tests:
print('*' * 16)
print("Platform: %s" %comment)
print("Code: %s" % to_hex(code))
print("Disasm:")
try:
md = Cs(arch, mode)
if syntax != 0:
md.syntax = syntax
md.skipdata = True
# Default "data" instruction's name is ".byte". To rename it to "db", just uncomment
# the code below.
# md.skipdata_setup = ("db", None, None)
# NOTE: This example ignores SKIPDATA's callback (first None) & user_data (second None)
# To customize the SKIPDATA callback, uncomment the line below.
# md.skipdata_setup = (".db", CS_SKIPDATA_CALLBACK(testcb), None)
for insn in md.disasm(code, 0x1000):
#bytes = binascii.hexlify(insn.bytes)
#print("0x%x:\t%s\t%s\t// hex-code: %s" %(insn.address, insn.mnemonic, insn.op_str, bytes))
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print("0x%x:" % (insn.address + insn.size))
print
except CsError as e:
print("ERROR: %s" % e)
if __name__ == '__main__':
test_class()
| bsd-3-clause | -1,326,846,960,444,039,000 | 31.77027 | 177 | 0.578969 | false |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/magic/catalog/builder.py | 1 | 7693 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.catalogbuilder Contains the CatalogBuilder class.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ...core.basics.configurable import OldConfigurable
from ...core.tools import tables
# -----------------------------------------------------------------
class CatalogBuilder(OldConfigurable):
"""
This class ...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
super(CatalogBuilder, self).__init__(config, "magic")
# The image frame
self.frame = None
# References to the extractors
self.galaxy_extractor = None
self.star_extractor = None
self.trained_extractor = None
# The output catalogs
self.galactic_catalog = None
self.stellar_catalog = None
# -----------------------------------------------------------------
def run(self, frame, galaxy_extractor, star_extractor, trained_extractor):
"""
This function ...
:param frame:
:param galaxy_extractor:
:param star_extractor:
:param trained_extractor:
:return:
"""
# 1. Call the setup function
self.setup(frame, galaxy_extractor, star_extractor, trained_extractor)
# 2. Build the catalog
self.build()
# 3. Writing
self.write()
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Set attributes to None
self.frame = None
self.galaxy_extractor = None
self.star_extractor = None
self.trained_extractor = None
# -----------------------------------------------------------------
def setup(self, frame, galaxy_extractor, star_extractor, trained_extractor):
"""
This function ...
:param frame:
:param galaxy_extractor:
:param star_extractor:
:param trained_extractor:
:return:
"""
# Call the setup function of the base class
super(CatalogBuilder, self).setup()
# The frame
self.frame = frame
# References to the extractors
self.galaxy_extractor = galaxy_extractor
self.star_extractor = star_extractor
self.trained_extractor = trained_extractor
# -----------------------------------------------------------------
def build(self):
"""
This function ...
:return:
"""
# Build the galactic catalog
self.build_galactic_catalog()
# Build the stellar catalog
self.build_stellar_catalog()
# -----------------------------------------------------------------
def build_galactic_catalog(self):
"""
This function ...
:return:
"""
# Set galactic catalog (no merging with trained extractor (yet) and undetected galaxies are included anyway)
self.galactic_catalog = self.galaxy_extractor.catalog
# -----------------------------------------------------------------
def build_stellar_catalog(self):
"""
This function ...
:return:
"""
# Initialize columns
catalog_column = []
id_column = []
ra_column = []
dec_column = []
ra_error_column = []
dec_error_column = []
confidence_level_column = []
on_galaxy_column = []
original_id_column = []
# Append stars from the star extractor; loop over the stellar statistics
for i in range(len(self.star_extractor.statistics)):
# Get the index of this star in the input catalog used by the star extractor
index = self.star_extractor.statistics["Star index"][i]
# Skip undetected stars
if not self.star_extractor.statistics["Detected"][i]: continue
# Add the appropriate values in the columns
catalog_column.append(self.star_extractor.catalog["Catalog"][index] if not (hasattr(self.star_extractor.catalog["Catalog"], "mask") and self.star_extractor.catalog["Catalog"].mask[index]) else None)
id_column.append(self.star_extractor.catalog["Id"][index] if not (hasattr(self.star_extractor.catalog["Id"], "mask") and self.star_extractor.catalog["Id"].mask[index]) else None)
ra_column.append(self.star_extractor.catalog["Right ascension"][index])
dec_column.append(self.star_extractor.catalog["Declination"][index])
ra_error_column.append(self.star_extractor.catalog["Right ascension error"][index])
dec_error_column.append(self.star_extractor.catalog["Declination error"][index])
confidence_level_column.append(self.star_extractor.catalog["Confidence level"][index])
on_galaxy_column.append(self.star_extractor.catalog["On galaxy"][index])
original_id_column.append(None)
#position_error = 0.5 * self.frame.average_pixelscale.to("mas/pix").value # in mas !!
x_position_error = 0.5 * self.frame.pixelscale.x.to("mas/pix").value
y_position_error = 0.5 * self.frame.pixelscale.y.to("mas/pix").value
# Append stars from the trained extractor; loop over the stars found by the trained extractor
for star in self.trained_extractor.stars:
# Add the appropriate values in the columns
catalog_column.append(None)
id_column.append(None)
ra_column.append(star.position.ra.value)
dec_column.append(star.position.dec.value)
ra_error_column.append(x_position_error)
dec_error_column.append(y_position_error)
confidence_level_column.append(star.confidence_level)
on_galaxy_column.append(False)
original_id_column.append(None)
data = [catalog_column, id_column, ra_column, dec_column, ra_error_column, dec_error_column, confidence_level_column,
on_galaxy_column, original_id_column]
names = ['Catalog', 'Id', 'Right ascension', 'Declination', 'Right ascension error', 'Declination error', 'Confidence level',
'On galaxy', 'Original catalog and id']
# Create the merged stellar catalog
self.stellar_catalog = tables.new(data, names)
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Write the galactic catalog
self.write_galactic_catalog()
# Write the stellar catalog
self.write_stellar_catalog()
# -----------------------------------------------------------------
def write_galactic_catalog(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
def write_stellar_catalog(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
| mit | 1,450,032,999,793,707,000 | 30.917012 | 210 | 0.520671 | false |
foobarbazblarg/stayclean | stayclean-2017-march/participant.py | 60 | 1524 | import datetime
class Participant:
def __init__(self):
self.name = ""
self.isStillIn = True
self.hasCheckedIn = False
self.relapseDate = None
@property
def hasRelapsed(self):
return self.relapseDate is not None
def setFromLine(self, lineString):
# format of participants.txt line:
# name hasCheckedIn isStillIn
# e.g.:
# foobarbazblarg True True
words = lineString.split()
self.name = words[0]
self.hasCheckedIn = words[1] == 'True'
self.isStillIn = words[2] == 'True'
if len(words) >= 4:
self.relapseDate = datetime.datetime.strptime(words[3], "%Y.%m.%d").date()
def relapseNowIfNotAlready(self):
if self.isStillIn:
self.isStillIn = False
self.relapseDate = datetime.date.today()
def relapseDayOfWeekIndex(self):
if self.relapseDate:
return self.relapseDate.weekday()
else:
return None
def relapseDayOfWeekName(self):
if self.relapseDayOfWeekIndex():
return {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[self.relapseDayOfWeekIndex()]
else:
return None
def asLine(self):
answer = self.name + " " + str(self.hasCheckedIn) + " " + str(self.isStillIn)
if self.relapseDate:
answer += " "
answer += self.relapseDate.strftime("%Y.%m.%d")
return answer
| mit | 4,730,820,464,951,085,000 | 30.102041 | 148 | 0.576115 | false |
nskinkel/oppy | oppy/tests/integration/cell/test_fixedlen.py | 1 | 12689 | import struct
import unittest
from collections import OrderedDict
from oppy.cell.fixedlen import (
FixedLenCell,
Create2Cell,
Created2Cell,
CreatedFastCell,
CreatedCell,
CreateFastCell,
CreateCell,
DestroyCell,
EncryptedCell,
NetInfoCell,
PaddingCell,
)
from oppy.cell.util import TLVTriple
from oppy.tests.integration.cell.cellbase import FixedLenTestBase
CIRC_ID = 1
# Unit tests and constants for Create2Cell
CREATE2_CMD = 10
CREATE2_NTOR_HTYPE = 2
CREATE2_NTOR_HLEN = 84
CREATE2_NTOR_HDATA_DUMMY = "\x00" * CREATE2_NTOR_HLEN
create2_bytes_good = struct.pack(
"!HBHH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
CREATE2_NTOR_HTYPE, CREATE2_NTOR_HLEN, CREATE2_NTOR_HDATA_DUMMY,
)
create2_bytes_good_padded = FixedLenCell.padCellBytes(create2_bytes_good)
assert len(create2_bytes_good_padded) == 512
create2_parse_bad_htype = struct.pack(
"!HBHH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
# ntor should be 2
1, CREATE2_NTOR_HLEN, CREATE2_NTOR_HDATA_DUMMY,
)
create2_parse_bad_htype = FixedLenCell.padCellBytes(create2_parse_bad_htype)
assert len(create2_parse_bad_htype) == 512
create2_parse_bad_hlen = struct.pack(
"!HBHH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
# hlen should be 84 for ntor
CREATE2_NTOR_HTYPE, 83, CREATE2_NTOR_HDATA_DUMMY,
)
create2_parse_bad_hlen = FixedLenCell.padCellBytes(create2_parse_bad_hlen)
assert len(create2_parse_bad_hlen) == 512
# htype should be 2 for ntor
create2_make_bad_htype = (CIRC_ID, 1, CREATE2_NTOR_HLEN,
CREATE2_NTOR_HDATA_DUMMY)
# htype should be int not str
create2_make_bad_htype_2 = (CIRC_ID, str(CREATE2_NTOR_HTYPE),
CREATE2_NTOR_HLEN,
CREATE2_NTOR_HDATA_DUMMY)
# hlen should be 84 for ntor
create2_make_bad_hlen = (CIRC_ID, CREATE2_NTOR_HTYPE, 83,
CREATE2_NTOR_HDATA_DUMMY)
# len(hdata) == hlen must be true
create2_make_bad_hdata = (CIRC_ID, CREATE2_NTOR_HTYPE, CREATE2_NTOR_HLEN,
"\x00")
class Create2CellTests(FixedLenTestBase, unittest.TestCase):
# NOTE: Twisted unfortunately does not support `setUpClass()`, so we
# do actually need to call this before every test
def setUp(self):
self.cell_constants = {
'cell-bytes-good': create2_bytes_good_padded,
'cell-type': Create2Cell,
'cell-bytes-good-nopadding': create2_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = CREATE2_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['htype'] = CREATE2_NTOR_HTYPE
self.cell_attributes['hlen'] = CREATE2_NTOR_HLEN
self.cell_attributes['hdata'] = CREATE2_NTOR_HDATA_DUMMY
self.bad_parse_inputs = (create2_parse_bad_htype,
create2_parse_bad_hlen)
self.bad_make_inputs = (create2_make_bad_htype,
create2_make_bad_htype_2,
create2_make_bad_hlen,
create2_make_bad_hdata)
self.encrypted = False
# Unit tests and constants for Created2Cell
# we can reuse most of the values from Create2Cell for some constants
CREATED2_CMD = 11
created2_bytes_good = struct.pack(
"!HBH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATED2_CMD,
CREATE2_NTOR_HLEN, CREATE2_NTOR_HDATA_DUMMY,
)
created2_bytes_good_padded = FixedLenCell.padCellBytes(created2_bytes_good)
assert len(created2_bytes_good_padded) == 512
created2_parse_bad_hlen = struct.pack(
"!HBH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
# hlen should be 84 for ntor
83, CREATE2_NTOR_HDATA_DUMMY,
)
created2_parse_bad_hlen = FixedLenCell.padCellBytes(created2_parse_bad_hlen)
assert len(created2_parse_bad_hlen) == 512
# hlen should be 84 for ntor
created2_make_bad_hlen = (CIRC_ID, 83, CREATE2_NTOR_HDATA_DUMMY)
# len(hdata) == hlen must be true
created2_make_bad_hdata = (CIRC_ID, CREATE2_NTOR_HLEN, "\x00")
class Created2CellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': created2_bytes_good_padded,
'cell-type': Created2Cell,
'cell-bytes-good-nopadding': created2_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = CREATED2_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['hlen'] = CREATE2_NTOR_HLEN
self.cell_attributes['hdata'] = CREATE2_NTOR_HDATA_DUMMY
self.bad_parse_inputs = (created2_parse_bad_hlen,)
self.bad_make_inputs = (created2_make_bad_hlen,
created2_make_bad_hdata,)
self.encrypted = False
# for unimplemented cells, just verify they fail when we try to create them
class CreatedFastCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreatedFastCell, 'dummy')
class CreatedCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreatedCell, 'dummy')
class CreateFastCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreateFastCell, 'dummy')
class CreateCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreateCell, 'dummy')
# Unit tests and constants for DestroyCell
DESTROY_CMD = 4
destroy_bytes_good = struct.pack(
"!HBB",
CIRC_ID, DESTROY_CMD,
0,
)
destroy_bytes_good_padded = FixedLenCell.padCellBytes(destroy_bytes_good)
assert len(destroy_bytes_good_padded) == 512
destroy_parse_bad_reason = struct.pack(
"!HBB",
CIRC_ID, DESTROY_CMD,
# 13 is not a valid reason
13,
)
destroy_parse_bad_reason = FixedLenCell.padCellBytes(destroy_parse_bad_reason)
assert len(destroy_parse_bad_reason) == 512
destroy_make_bad_reason = (CIRC_ID, 13)
encrypted_bytes_good = struct.pack(
"!HBB",
CIRC_ID, DESTROY_CMD,
0,
)
destroy_bytes_good_padded = FixedLenCell.padCellBytes(destroy_bytes_good)
assert len(destroy_bytes_good_padded) == 512
class DestroyCellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': destroy_bytes_good_padded,
'cell-type': DestroyCell,
'cell-bytes-good-nopadding': destroy_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = DESTROY_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['reason'] = 0
self.bad_parse_inputs = (destroy_parse_bad_reason,)
self.bad_make_inputs = (destroy_make_bad_reason,)
self.encrypted = False
# Unit tests and constants for EncryptedCell
# since the payload of an encrypted cell prior to decryption is, from oppy's
# perspective, just a black box, the only type of "bad" payload data is
# a payload passed to "make()" that is too large for a relay cell
RELAY_CMD = 3
encrypted_bytes_good = struct.pack(
"!HB57s",
CIRC_ID, RELAY_CMD,
"\x00" * 509,
)
encrypted_bytes_good_padded = FixedLenCell.padCellBytes(encrypted_bytes_good)
assert len(encrypted_bytes_good_padded) == 512
encrypted_make_bad_payload_len_long = (CIRC_ID, "\x00" * 510)
encrypted_make_bad_payload_len_short = (CIRC_ID, "\x00" * 508)
class EncryptedCellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': encrypted_bytes_good_padded,
'cell-type': EncryptedCell,
'cell-bytes-good-nopadding': encrypted_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = RELAY_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = {'enc_payload': "\x00" * 509, }
self.bad_parse_inputs = ()
self.bad_make_inputs = (encrypted_make_bad_payload_len_long,
encrypted_make_bad_payload_len_short,)
self.encrypted = True
def test_getBytes_trimmed(self):
# encrypted cells don't know what's in their payload, so
# "trimmed" arg doesn't make sense for them
pass
# NetInfoCell (IPv4 type/length/value) unittests and constant values
NETINFO_CMD = 8
# IPv4 type type/length/value
netinfo_bytes_good = struct.pack(
'!HBIBB4sBBB4s',
CIRC_ID, NETINFO_CMD,
0, 4, 4, "\x7f\x00\x00\x01", # 127.0.0.1
1, 4, 4, "\x7f\x00\x00\x01",
)
netinfo_bytes_good_padded = FixedLenCell.padCellBytes(netinfo_bytes_good)
assert len(netinfo_bytes_good_padded) == 512
netinfo_parse_bad_num_addresses = netinfo_bytes_good_padded[:13]
netinfo_parse_bad_num_addresses += struct.pack('!B', 200)
netinfo_parse_bad_num_addresses += netinfo_bytes_good_padded[14:]
assert len(netinfo_parse_bad_num_addresses) == 512
netinfo_make_bad_num_addresses = (CIRC_ID, TLVTriple(u'127.0.0.1'),
[TLVTriple(u'127.0.0.1') for i in xrange(50)])
class NetInfoCellIPv4Tests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': netinfo_bytes_good_padded,
'cell-type': NetInfoCell,
'cell-bytes-good-nopadding': netinfo_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = NETINFO_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['other_or_address'] = TLVTriple(u'127.0.0.1')
self.cell_attributes['this_or_addresses'] = [TLVTriple(u'127.0.0.1')]
self.cell_attributes['timestamp'] = struct.pack('!I', 0)
self.bad_parse_inputs = (netinfo_parse_bad_num_addresses,)
self.bad_make_inputs = (netinfo_make_bad_num_addresses,)
self.encrypted = False
# IPv6 type type/length/value
netinfo_bytes_good_ipv6 = struct.pack(
'!HBIBB16sBBB16s',
CIRC_ID, NETINFO_CMD,
0, 6, 16, "\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83)",
1, 6, 16, "\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83)",
)
netinfo_bytes_good_padded_ipv6 = FixedLenCell.padCellBytes(netinfo_bytes_good_ipv6)
assert len(netinfo_bytes_good_padded_ipv6) == 512
class NetInfoCellIPv6Tests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': netinfo_bytes_good_padded_ipv6,
'cell-type': NetInfoCell,
'cell-bytes-good-nopadding': netinfo_bytes_good_ipv6,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = NETINFO_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['other_or_address'] = TLVTriple(u'fe80:0000:0000:0000:0202:b3ff:fe1e:8329')
self.cell_attributes['this_or_addresses'] = [TLVTriple(u'fe80:0000:0000:0000:0202:b3ff:fe1e:8329')]
self.cell_attributes['timestamp'] = struct.pack('!I', 0)
self.bad_parse_inputs = ()
self.bad_make_inputs = ()
self.encrypted = False
# PaddingCell unittests and constant values
PADDING_CMD = 0
padding_bytes_good = struct.pack(
'!HB509s',
CIRC_ID, PADDING_CMD,
"\x00" * 509,
)
padding_bytes_good_padded = padding_bytes_good
assert len(padding_bytes_good_padded) == 512
class PaddingCellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': padding_bytes_good_padded,
'cell-type': PaddingCell,
'cell-bytes-good-nopadding': padding_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = PADDING_CMD
self.cell_header['link_version'] = 3
# padding cells don't have any attributes, and they don't really
# have 'bad' inputs, as the payload must be ignored
self.cell_attributes = {}
self.bad_parse_inputs = ()
self.bad_make_inputs = ()
self.encrypted = False
| bsd-3-clause | -7,229,135,415,162,225,000 | 29.873479 | 107 | 0.651982 | false |
lnls-fac/apsuite | apsuite/commisslib/injsi_optimization.py | 1 | 3074 | """."""
import time as _time
import numpy as _np
from epics import PV
from apsuite.optimization import SimulAnneal
from siriuspy.devices import Tune, TuneCorr, CurrInfoSI
from ..utils import MeasBaseClass as _BaseClass, \
ParamsBaseClass as _ParamsBaseClass
class InjSIParams(_ParamsBaseClass):
"""."""
def __init__(self):
"""."""
super().__init__()
self.nr_iter = 10
self.nr_pulses = 5
self.max_delta_tunex = 1e-2
self.max_delta_tuney = 1e-2
self.wait_tunecorr = 1 # [s]
self.pulse_freq = 2 # [Hz]
def __str__(self):
"""."""
ftmp = '{0:15s} = {1:9.6f} {2:s}\n'.format
dtmp = '{0:15s} = {1:9d} {2:s}\n'.format
stg = dtmp('nr_iter', self.nr_iter, '')
stg += dtmp('nr_pulses', self.nr_pulses, '')
stg += ftmp('max_delta_tunex', self.max_delta_tunex, '')
stg += ftmp('max_delta_tuney', self.max_delta_tuney, '')
stg += ftmp('wait_tunecorr', self.wait_tunecorr, '[s]')
stg += ftmp('pulse_freq', self.pulse_freq, '[Hz]')
return stg
class TuneScanInjSI(SimulAnneal, _BaseClass):
"""."""
PV_INJECTION = 'AS-RaMO:TI-EVG:InjectionEvt-Sel'
def __init__(self, save=False):
"""."""
SimulAnneal.__init__(self, save=save)
_BaseClass.__init__(self)
self.devices = dict()
self.params = InjSIParams()
self.devices['tune'] = Tune(Tune.DEVICES.SI)
self.devices['tunecorr'] = TuneCorr(TuneCorr.DEVICES.SI)
self.devices['currinfo'] = CurrInfoSI()
self.devices['injection'] = PV(TuneScanInjSI.PV_INJECTION)
self.devices['tunecorr'].cmd_update_reference()
self.data['measure'] = dict()
self.data['measure']['tunex'] = []
self.data['measure']['tuney'] = []
self.data['measure']['injeff'] = []
def _inject(self):
self.devices['injection'].value = 1
def _apply_variation(self):
tunecorr = self.devices['tunecorr']
dnux, dnuy = self.position[0], self.position[1]
tunecorr.delta_tunex = dnux
tunecorr.delta_tuney = dnuy
tunecorr.cmd_apply_delta()
_time.sleep(self.params.wait_tunecorr)
def calc_obj_fun(self):
"""."""
tune = self.devices['tune']
self.data['measure']['tunex'].append(tune.tunex)
self.data['measure']['tuney'].append(tune.tuney)
self._apply_variation()
injeff = []
for _ in range(self.params.nr_pulses):
self._inject()
injeff.append(self.devices['currinfo'].injeff)
_time.sleep(1/self.params.pulse_freq)
self.data['measure']['injeff'].append(injeff)
return - _np.mean(injeff)
def initialization(self):
"""."""
self.niter = self.params.nr_iter
self.position = _np.array([0, 0])
self.limits_upper = _np.array(
[self.params.max_delta_tunex, self.params.max_delta_tuney])
self.limits_lower = - self.limits_upper
self.deltas = self.limits_upper.copy()
| mit | -4,653,552,106,490,319,000 | 33.155556 | 71 | 0.56799 | false |
alessandro-aglietti/git-repo | subcmds/abandon.py | 48 | 2034 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from command import Command
from git_command import git
from progress import Progress
class Abandon(Command):
common = True
helpSummary = "Permanently abandon a development branch"
helpUsage = """
%prog <branchname> [<project>...]
This subcommand permanently abandons a development branch by
deleting it (and all its history) from your local repository.
It is equivalent to "git branch -D <branchname>".
"""
def Execute(self, opt, args):
if not args:
self.Usage()
nb = args[0]
if not git.check_ref_format('heads/%s' % nb):
print >>sys.stderr, "error: '%s' is not a valid name" % nb
sys.exit(1)
nb = args[0]
err = []
success = []
all_projects = self.GetProjects(args[1:])
pm = Progress('Abandon %s' % nb, len(all_projects))
for project in all_projects:
pm.update()
status = project.AbandonBranch(nb)
if status is not None:
if status:
success.append(project)
else:
err.append(project)
pm.end()
if err:
for p in err:
print >>sys.stderr,\
"error: %s/: cannot abandon %s" \
% (p.relpath, nb)
sys.exit(1)
elif not success:
print >>sys.stderr, 'error: no project has branch %s' % nb
sys.exit(1)
else:
print >>sys.stderr, 'Abandoned in %d project(s):\n %s' % (
len(success), '\n '.join(p.relpath for p in success))
| apache-2.0 | -6,098,478,468,732,730,000 | 28.057143 | 74 | 0.648476 | false |
apagac/cfme_tests | cfme/test_framework/config.py | 3 | 2560 | """
classes to manage the cfme test framework configuration
"""
import os
import warnings
import attr
import yaycl
class Configuration(object):
"""
holds the current configuration
"""
def __init__(self):
self.yaycl_config = None
def configure(self, config_dir, crypt_key_file=None):
"""
do the defered initial loading of the configuration
:param config_dir: path to the folder with configuration files
:param crypt_key_file: optional name of a file holding the key for encrypted
configuration files
:raises: AssertionError if called more than once
if the `utils.conf` api is removed, the loading can be transformed to eager loading
"""
assert self.yaycl_config is None
if crypt_key_file and os.path.exists(crypt_key_file):
self.yaycl_config = yaycl.Config(
config_dir=config_dir,
crypt_key_file=crypt_key_file)
else:
self.yaycl_config = yaycl.Config(config_dir=config_dir)
def get_config(self, name):
"""returns a yaycl config object
:param name: name of the configuration object
"""
if self.yaycl_config is None:
raise RuntimeError('cfme configuration was not initialized')
return getattr(self.yaycl_config, name)
@attr.s
class DeprecatedConfigWrapper(object):
"""
a wrapper that provides the old :code:``utils.conf`` api
"""
configuration = attr.ib()
_warn = attr.ib(default=False)
def __getattr__(self, key):
if self._warn:
warnings.warn(
'the configuration module {} will be deprecated'.format(key),
category=DeprecationWarning,
stacklevel=2,
)
return self.configuration.get_config(key)
@property
def runtime(self):
return self.configuration.runtime
def __getitem__(self, key):
if self._warn:
warnings.warn(
'the configuration module {} will be deprecated'.format(key),
category=DeprecationWarning,
stacklevel=2,
)
return self.configuration.get_config(key)
def __delitem__(self, key):
# used in bad logging
if self._warn:
warnings.warn('clearing configuration is bad', stacklevel=2)
del self.configuration.yaycl_config[key]
# for the initial usage we keep a global object
# later on we want to replace it
global_configuration = Configuration()
| gpl-2.0 | 1,778,776,388,008,612,600 | 27.444444 | 91 | 0.615234 | false |
YuriyIlyin/ansible-modules-core | files/find.py | 109 | 11313 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ruggero Marchei <[email protected]>
# (c) 2015, Brian Coca <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
import os
import stat
import fnmatch
import time
import re
import shutil
DOCUMENTATION = '''
---
module: find
author: Brian Coca (based on Ruggero Marchei's Tidy)
version_added: "2.0"
short_description: return a list of files based on specific criteria
requirements: []
description:
- Return a list files based on specific criteria. Multiple criteria are AND'd together.
options:
age:
required: false
default: null
description:
- Select files whose age is equal to or greater than the specified time.
Use a negative age to find files equal to or less than the specified time.
You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
patterns:
required: false
default: '*'
description:
- One or more (shell type) file glob patterns, which restrict the list of files to be returned to
those whose basenames match at least one of the patterns specified. Multiple patterns can be
specified using a list.
contains:
required: false
default: null
description:
- One or more re patterns which should be matched against the file content
paths:
required: true
aliases: [ "name" ]
description:
- List of paths to the file or directory to search. All paths must be fully qualified.
file_type:
required: false
description:
- Type of file to select
choices: [ "file", "directory" ]
default: "file"
recurse:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If target is a directory, recursively descend into the directory looking for files.
size:
required: false
default: null
description:
- Select files whose size is equal to or greater than the specified size.
Use a negative size to find files equal to or less than the specified size.
Unqualified values are in bytes, but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
Size is not evaluated for directories.
age_stamp:
required: false
default: "mtime"
choices: [ "atime", "mtime", "ctime" ]
description:
- Choose the file property against which we compare age. Default is mtime.
hidden:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to include hidden files, otherwise they'll be ignored.
follow:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to follow symlinks in path for systems with python 2.6+
get_checksum:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to retrieve a file's sha1 checksum
'''
EXAMPLES = '''
# Recursively find /tmp files older than 2 days
- find: paths="/tmp" age="2d" recurse=yes
# Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
- find: paths="/tmp" age="4w" size="1m" recurse=yes
# Recursively find /var/tmp files with last access time greater than 3600 seconds
- find: paths="/var/tmp" age="3600" age_stamp=atime recurse=yes
# find /var/log files equal or greater than 10 megabytes ending with .log or .log.gz
- find: paths="/var/tmp" patterns="*.log","*.log.gz" size="10m"
'''
RETURN = '''
files:
description: all matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list of dictionaries
sample: [
{ path="/var/tmp/test1",
mode=0644,
...,
checksum=16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path="/var/tmp/test2",
...
},
]
matched:
description: number of matches
returned: success
type: string
sample: 14
examined:
description: number of filesystem objects looked at
returned: success
type: string
sample: 34
'''
def pfilter(f, patterns=None):
'''filter using glob patterns'''
if patterns is None:
return True
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None or \
(age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age)) or \
(age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age)):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None or \
(size >= 0 and st.st_size >= abs(size)) or \
(size < 0 and st.st_size <= abs(size)):
return True
return False
def contentfilter(fsname, pattern):
'''filter files which contain the given expression'''
if pattern is None: return True
try:
f = open(fsname)
prog = re.compile(pattern)
for line in f:
if prog.match (line):
f.close()
return True
f.close()
except:
pass
return False
def statinfo(st):
return {
'mode' : "%04o" % stat.S_IMODE(st.st_mode),
'isdir' : stat.S_ISDIR(st.st_mode),
'ischr' : stat.S_ISCHR(st.st_mode),
'isblk' : stat.S_ISBLK(st.st_mode),
'isreg' : stat.S_ISREG(st.st_mode),
'isfifo' : stat.S_ISFIFO(st.st_mode),
'islnk' : stat.S_ISLNK(st.st_mode),
'issock' : stat.S_ISSOCK(st.st_mode),
'uid' : st.st_uid,
'gid' : st.st_gid,
'size' : st.st_size,
'inode' : st.st_ino,
'dev' : st.st_dev,
'nlink' : st.st_nlink,
'atime' : st.st_atime,
'mtime' : st.st_mtime,
'ctime' : st.st_ctime,
'wusr' : bool(st.st_mode & stat.S_IWUSR),
'rusr' : bool(st.st_mode & stat.S_IRUSR),
'xusr' : bool(st.st_mode & stat.S_IXUSR),
'wgrp' : bool(st.st_mode & stat.S_IWGRP),
'rgrp' : bool(st.st_mode & stat.S_IRGRP),
'xgrp' : bool(st.st_mode & stat.S_IXGRP),
'woth' : bool(st.st_mode & stat.S_IWOTH),
'roth' : bool(st.st_mode & stat.S_IROTH),
'xoth' : bool(st.st_mode & stat.S_IXOTH),
'isuid' : bool(st.st_mode & stat.S_ISUID),
'isgid' : bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec = dict(
paths = dict(required=True, aliases=['name'], type='list'),
patterns = dict(default=['*'], type='list'),
contains = dict(default=None, type='str'),
file_type = dict(default="file", choices=['file', 'directory'], type='str'),
age = dict(default=None, type='str'),
age_stamp = dict(default="mtime", choices=['atime','mtime','ctime'], type='str'),
size = dict(default=None, type='str'),
recurse = dict(default='no', type='bool'),
hidden = dict(default="False", type='bool'),
follow = dict(default="False", type='bool'),
get_checksum = dict(default="False", type='bool'),
),
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match("^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match("^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
if os.path.isdir(npath):
''' ignore followlinks for python version < 2.6 '''
for root,dirs,files in (sys.version_info < (2,6,0) and os.walk(npath)) or \
os.walk( npath, followlinks=params['follow']):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname=os.path.normpath(os.path.join(root, fsobj))
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
st = os.stat(fsname)
r = {'path': fsname}
if stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and \
contentfilter(fsname, params['contains']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
if not params['recurse']:
break
else:
msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n"
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -3,340,732,131,754,177,500 | 33.809231 | 115 | 0.56024 | false |
ecolitan/fatics | src/timer.py | 1 | 2084 | # Copyright (C) 2010 Wil Mahan <[email protected]>
#
# This file is part of FatICS.
#
# FatICS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FatICS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with FatICS. If not, see <http://www.gnu.org/licenses/>.
#
import time
from gettext import ngettext
import online
import game
import connection
from config import config
heartbeat_timeout = 5
def heartbeat():
# idle timeout
if config.idle_timeout:
now = time.time()
for u in online.online:
if (now - u.session.last_command_time > config.idle_timeout and
not u.is_admin() and
not u.has_title('TD')):
u.session.conn.idle_timeout(config.idle_timeout // 60)
# ping all zipseal clients
# I wonder if it would be better to spread out the pings in time,
# rather than sending a large number of ping requests all at once.
# However, this method is simple, and FICS timeseal 2 seems to do it
# this way (pinging all capable clients every 10 seconds).
for u in online.online:
if u.session.use_zipseal:
u.session.ping()
# forfeit games on time
for g in game.games.values():
if g.gtype == game.PLAYED and g.clock.is_ticking:
u = g.get_user_to_move()
opp = g.get_opp(u)
if opp.vars['autoflag']:
# TODO: send auto-flagging message a la original fics.
g.clock.check_flag(g, g.get_user_side(u))
connection.send_prompts()
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 smarttab autoindent
| agpl-3.0 | 2,757,826,052,728,467,500 | 34.322034 | 77 | 0.668906 | false |
codrut3/tensorflow | tensorflow/python/kernel_tests/garbage_collection_test.py | 82 | 2102 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests which set DEBUG_SAVEALL and assert no garbage was created.
This flag seems to be sticky, so these tests have been isolated for now.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class NoReferenceCycleTests(test_util.TensorFlowTestCase):
@test_util.assert_no_garbage_created
def testEagerResourceVariables(self):
with context.eager_mode():
resource_variable_ops.ResourceVariable(1.0, name="a")
@test_util.assert_no_garbage_created
def testTensorArrays(self):
with context.eager_mode():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
if __name__ == "__main__":
test.main()
| apache-2.0 | 8,607,779,374,031,355,000 | 32.365079 | 80 | 0.674596 | false |
CloudServer/cinder | cinder/volume/drivers/lvm.py | 1 | 30310 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for Linux servers running LVM.
"""
import math
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder.brick.local_dev import lvm as lvm
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
# FIXME(jdg): We'll put the lvm_ prefix back on these when we
# move over to using this as the real LVM driver, for now we'll
# rename them so that the config generation utility doesn't barf
# on duplicate entries.
volume_opts = [
cfg.StrOpt('volume_group',
default='cinder-volumes',
help='Name for the VG that will contain exported volumes'),
cfg.IntOpt('lvm_mirrors',
default=0,
help='If >0, create LVs with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 PVs with available space'),
cfg.StrOpt('lvm_type',
default='default',
choices=['default', 'thin'],
help='Type of LVM volumes to deploy'),
cfg.StrOpt('lvm_conf_file',
default='/etc/cinder/lvm.conf',
help='LVM conf file to use for the LVM driver in Cinder; '
'this setting is ignored if the specified file does '
'not exist (You can also specify \'None\' to not use '
'a conf file even if one exists).')
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class LVMVolumeDriver(driver.VolumeDriver):
"""Executes commands relating to Volumes."""
VERSION = '3.0.0'
def __init__(self, vg_obj=None, *args, **kwargs):
# Parent sets db, host, _execute and base config
super(LVMVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.hostname = socket.gethostname()
self.vg = vg_obj
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM'
# Target Driver is what handles data-transport
# Transport specific code should NOT be in
# the driver (control path), this way
# different target drivers can be added (iscsi, FC etc)
target_driver = \
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: %s',
target_driver)
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
self.protocol = self.target_driver.protocol
self.sparse_copy_volume = False
def _sizestr(self, size_in_g):
return '%sg' % size_in_g
def _volume_not_present(self, volume_name):
return self.vg.get_volume(volume_name) is None
def _delete_volume(self, volume, is_snapshot=False):
"""Deletes a logical volume."""
if self.configuration.volume_clear != 'none' and \
self.configuration.lvm_type != 'thin':
self._clear_volume(volume, is_snapshot)
name = volume['name']
if is_snapshot:
name = self._escape_snapshot(volume['name'])
self.vg.delete(name)
def _clear_volume(self, volume, is_snapshot=False):
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
if is_snapshot:
# if the volume to be cleared is a snapshot of another volume
# we need to clear out the volume using the -cow instead of the
# directly volume path. We need to skip this if we are using
# thin provisioned LVs.
# bug# lp1191812
dev_path = self.local_path(volume) + "-cow"
else:
dev_path = self.local_path(volume)
# TODO(jdg): Maybe we could optimize this for snaps by looking at
# the cow table and only overwriting what's necessary?
# for now we're still skipping on snaps due to hang issue
if not os.path.exists(dev_path):
msg = (_('Volume device file path %s does not exist.')
% dev_path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
size_in_g = volume.get('volume_size') or volume.get('size')
if size_in_g is None:
msg = (_("Size for volume: %s not found, cannot secure delete.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
# clear_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
vol_sz_in_meg = size_in_g * units.Ki
volutils.clear_volume(
vol_sz_in_meg, dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def _escape_snapshot(self, snapshot_name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def _create_volume(self, name, size, lvm_type, mirror_count, vg=None):
vg_ref = self.vg
if vg is not None:
vg_ref = vg
vg_ref.create_volume(name, size, lvm_type, mirror_count)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
if self.vg is None:
LOG.warning(_LW('Unable to update stats on non-initialized '
'Volume Group: %s'),
self.configuration.volume_group)
return
self.vg.update_volume_group_info()
data = {}
# Note(zhiteng): These information are driver/backend specific,
# each driver may define these values in its own config options
# or fetch from driver specific configuration file.
data["volume_backend_name"] = self.backend_name
data["vendor_name"] = 'Open Source'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
data["pools"] = []
total_capacity = 0
free_capacity = 0
if self.configuration.lvm_mirrors > 0:
total_capacity =\
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
free_capacity =\
self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
elif self.configuration.lvm_type == 'thin':
total_capacity = self.vg.vg_thin_pool_size
free_capacity = self.vg.vg_thin_pool_free_space
provisioned_capacity = self.vg.vg_provisioned_capacity
else:
total_capacity = self.vg.vg_size
free_capacity = self.vg.vg_free_space
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
location_info = \
('LVMVolumeDriver:%(hostname)s:%(vg)s'
':%(lvm_type)s:%(lvm_mirrors)s' %
{'hostname': self.hostname,
'vg': self.configuration.volume_group,
'lvm_type': self.configuration.lvm_type,
'lvm_mirrors': self.configuration.lvm_mirrors})
thin_enabled = self.configuration.lvm_type == 'thin'
# Calculate the total volumes used by the VG group.
# This includes volumes and snapshots.
total_volumes = len(self.vg.get_volumes())
# Skip enabled_pools setting, treat the whole backend as one pool
# XXX FIXME if multipool support is added to LVM driver.
single_pool = {}
single_pool.update(dict(
pool_name=data["volume_backend_name"],
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
reserved_percentage=self.configuration.reserved_percentage,
location_info=location_info,
QoS_support=False,
provisioned_capacity_gb=provisioned_capacity,
max_over_subscription_ratio=(
self.configuration.max_over_subscription_ratio),
thin_provisioning_support=thin_enabled,
thick_provisioning_support=not thin_enabled,
total_volumes=total_volumes,
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function(),
multiattach=True
))
data["pools"].append(single_pool)
self._stats = data
def check_for_setup_error(self):
"""Verify that requirements are in place to use LVM driver."""
if self.vg is None:
root_helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
try:
self.vg = lvm.LVM(self.configuration.volume_group,
root_helper,
lvm_type=self.configuration.lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
except exception.VolumeGroupNotFound:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups(
self.configuration.volume_group)
vg_dict = \
next(vg for vg in vg_list if vg['name'] == self.vg.vg_name)
if vg_dict is None:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
if self.configuration.lvm_type == 'thin':
# Specific checks for using Thin provisioned LV's
if not volutils.supports_thin_provisioning():
message = _("Thin provisioning not supported "
"on this version of LVM.")
raise exception.VolumeBackendAPIException(data=message)
pool_name = "%s-pool" % self.configuration.volume_group
if self.vg.get_volume(pool_name) is None:
try:
self.vg.create_thin_pool(pool_name)
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to create thin pool, "
"error message was: %s")
% six.text_type(exc.stderr))
raise exception.VolumeBackendAPIException(
data=exception_message)
# Enable sparse copy since lvm_type is 'thin'
self.sparse_copy_volume = True
def create_volume(self, volume):
"""Creates a logical volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from LVM for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
name_id = None
provider_location = None
if original_volume_status == 'available':
current_name = CONF.volume_name_template % new_volume['id']
original_volume_name = CONF.volume_name_template % volume['id']
try:
self.vg.rename_volume(current_name, original_volume_name)
except processutils.ProcessExecutionError:
LOG.error(_LE('Unable to rename the logical volume '
'for volume: %s'), volume['name'])
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
else:
# The back-end will not be renamed.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
self.configuration.lvm_mirrors)
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
self.vg.activate_lv(snapshot['name'], is_snapshot=True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(jdg): We don't need to explicitly call
# remove export here because we already did it
# in the manager before we got here.
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
if self.vg.lv_has_snapshot(volume['name']):
LOG.error(_LE('Unable to delete due to existing snapshot '
'for volume: %s'), volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
LOG.info(_LI('Successfully deleted volume: %s'), volume['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),
snapshot['volume_name'],
self.configuration.lvm_type)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_LW("snapshot: %s not found, "
"skipping delete operations"), snapshot['name'])
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot, is_snapshot=True)
def local_path(self, volume, vg=None):
if vg is None:
vg = self.configuration.volume_group
# NOTE(vish): stops deprecation warning
escaped_group = vg.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % volume['id'],
'id': temp_id}
self.create_snapshot(temp_snapshot)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
try:
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)
volutils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
finally:
self.delete_snapshot(temp_snapshot)
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup.volume_id)
temp_snapshot = None
previous_status = volume['previous_status']
if previous_status == 'in-use':
temp_snapshot = self._create_temp_snapshot(context, volume)
backup.temp_snapshot_id = temp_snapshot.id
backup.save()
volume_path = self.local_path(temp_snapshot)
else:
volume_path = self.local_path(volume)
try:
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
finally:
if temp_snapshot:
self._delete_snapshot(context, temp_snapshot)
backup.temp_snapshot_id = None
backup.save()
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
self.vg.extend_volume(volume['name'],
self._sizestr(new_size))
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV.
Renames the LV to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
"""
lv_name = existing_ref['source-name']
self.vg.get_volume(lv_name)
if volutils.check_already_managed_volume(self.db, lv_name):
raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name)
# Attempt to rename the LV to match the OpenStack internal name.
try:
self.vg.rename_volume(lv_name, volume['name'])
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to rename logical volume %(name)s, "
"error message was: %(err_msg)s")
% {'name': lv_name,
'err_msg': exc.stderr})
raise exception.VolumeBackendAPIException(
data=exception_message)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'source-name': <name of LV>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
lv = self.vg.get_volume(lv_name)
# Raise an exception if we didn't find a suitable LV.
if not lv:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
try:
lv_size = int(math.ceil(float(lv['size'])))
except ValueError:
exception_message = (_("Failed to manage existing volume "
"%(name)s, because reported size %(size)s "
"was not a floating-point number.")
% {'name': lv_name,
'size': lv['size']})
raise exception.VolumeBackendAPIException(
data=exception_message)
return lv_size
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
"""Optimize the migration if the destination is on the same server.
If the specified host is another back-end on the same server, and
the volume is not attached, we can do the migration locally without
going through iSCSI.
"""
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
try:
(dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
info.split(':')
lvm_mirrors = int(lvm_mirrors)
except ValueError:
return false_ret
if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):
return false_ret
if dest_vg != self.vg.vg_name:
vg_list = volutils.get_all_volume_groups()
try:
next(vg for vg in vg_list if vg['name'] == dest_vg)
except StopIteration:
LOG.error(_LE("Destination Volume Group %s does not exist"),
dest_vg)
return false_ret
helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
dest_vg_ref = lvm.LVM(dest_vg, helper,
lvm_type=lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
self._create_volume(volume['name'],
self._sizestr(volume['size']),
lvm_type,
lvm_mirrors,
dest_vg_ref)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
size_in_mb = int(volume['size']) * units.Ki
volutils.copy_volume(self.local_path(volume),
self.local_path(volume, vg=dest_vg),
size_in_mb,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
self._delete_volume(volume)
return (True, None)
else:
message = (_("Refusing to migrate volume ID: %(id)s. Please "
"check your configuration because source and "
"destination are the same Volume Group: %(name)s.") %
{'id': volume['id'], 'name': self.vg.vg_name})
LOG.exception(message)
raise exception.VolumeBackendAPIException(data=message)
def get_pool(self, volume):
return self.backend_name
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume['name'])
model_update = \
self.target_driver.ensure_export(context, volume, volume_path)
return model_update
def create_export(self, context, volume, connector, vg=None):
if vg is None:
vg = self.configuration.volume_group
volume_path = "/dev/%s/%s" % (vg, volume['name'])
export_info = self.target_driver.create_export(
context,
volume,
volume_path)
return {'provider_location': export_info['location'],
'provider_auth': export_info['auth'], }
def remove_export(self, context, volume):
self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
return self.target_driver.terminate_connection(volume, connector,
**kwargs)
class LVMISCSIDriver(LVMVolumeDriver):
"""Empty class designation for LVMISCSI.
Since we've decoupled the inheritance of iSCSI and LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISCSIDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISCSIDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'iscsi_helper for the target driver you '
'wish to use.'))
class LVMISERDriver(LVMVolumeDriver):
"""Empty class designation for LVMISER.
Since we've decoupled the inheritance of data path in LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISERDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISERDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'iscsi_helper for the target driver you '
'wish to use. In order to enable iser, please '
'set iscsi_protocol with the value iser.'))
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: '
'cinder.volume.targets.iser.ISERTgtAdm')
self.target_driver = importutils.import_object(
'cinder.volume.targets.iser.ISERTgtAdm',
configuration=self.configuration,
db=self.db,
executor=self._execute)
| apache-2.0 | -7,004,652,998,710,291,000 | 40.864641 | 79 | 0.57034 | false |
LukeHoersten/ansible | lib/ansible/inventory/host.py | 9 | 3582 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.inventory.group import Group
from ansible.utils.vars import combine_vars
__all__ = ['Host']
class Host:
''' a single ansible host '''
#__slots__ = [ 'name', 'vars', 'groups' ]
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def __eq__(self, other):
return self.name == other.name
def serialize(self):
groups = []
for group in self.groups:
groups.append(group.serialize())
return dict(
name=self.name,
vars=self.vars.copy(),
ipv4_address=self.ipv4_address,
ipv6_address=self.ipv6_address,
gathered_facts=self._gathered_facts,
groups=groups,
)
def deserialize(self, data):
self.__init__()
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.ipv4_address = data.get('ipv4_address', '')
self.ipv6_address = data.get('ipv6_address', '')
groups = data.get('groups', [])
for group_data in groups:
g = Group()
g.deserialize(group_data)
self.groups.append(g)
def __init__(self, name=None, port=None):
self.name = name
self.vars = {}
self.groups = []
self.ipv4_address = name
self.ipv6_address = name
if port and port != C.DEFAULT_REMOTE_PORT:
self.set_variable('ansible_ssh_port', int(port))
self._gathered_facts = False
def __repr__(self):
return self.get_name()
def get_name(self):
return self.name
@property
def gathered_facts(self):
return self._gathered_facts
def set_gathered_facts(self, gathered):
self._gathered_facts = gathered
def add_group(self, group):
self.groups.append(group)
def set_variable(self, key, value):
self.vars[key]=value
def get_groups(self):
groups = {}
for g in self.groups:
groups[g.name] = g
ancestors = g.get_ancestors()
for a in ancestors:
groups[a.name] = a
return groups.values()
def get_vars(self):
results = {}
groups = self.get_groups()
for group in sorted(groups, key=lambda g: g.depth):
results = combine_vars(results, group.get_vars())
results = combine_vars(results, self.vars)
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
return results
| gpl-3.0 | 4,942,403,477,836,256,000 | 27.428571 | 85 | 0.600782 | false |
anedos/sqlalchemy-migrate-egg | migrate/changeset/databases/oracle.py | 6 | 3748 | """
Oracle database specific implementations of changeset classes.
"""
import sqlalchemy as sa
from sqlalchemy.databases import oracle as sa_base
from migrate import exceptions
from migrate.changeset import ansisql, SQLA_06
if not SQLA_06:
OracleSchemaGenerator = sa_base.OracleSchemaGenerator
else:
OracleSchemaGenerator = sa_base.OracleDDLCompiler
class OracleColumnGenerator(OracleSchemaGenerator, ansisql.ANSIColumnGenerator):
pass
class OracleColumnDropper(ansisql.ANSIColumnDropper):
pass
class OracleSchemaChanger(OracleSchemaGenerator, ansisql.ANSISchemaChanger):
def get_column_specification(self, column, **kwargs):
# Ignore the NOT NULL generated
override_nullable = kwargs.pop('override_nullable', None)
if override_nullable:
orig = column.nullable
column.nullable = True
ret = super(OracleSchemaChanger, self).get_column_specification(
column, **kwargs)
if override_nullable:
column.nullable = orig
return ret
def visit_column(self, delta):
keys = delta.keys()
if 'name' in keys:
self._run_subvisit(delta,
self._visit_column_name,
start_alter=False)
if len(set(('type', 'nullable', 'server_default')).intersection(keys)):
self._run_subvisit(delta,
self._visit_column_change,
start_alter=False)
def _visit_column_change(self, table, column, delta):
# Oracle cannot drop a default once created, but it can set it
# to null. We'll do that if default=None
# http://forums.oracle.com/forums/message.jspa?messageID=1273234#1273234
dropdefault_hack = (column.server_default is None \
and 'server_default' in delta.keys())
# Oracle apparently doesn't like it when we say "not null" if
# the column's already not null. Fudge it, so we don't need a
# new function
notnull_hack = ((not column.nullable) \
and ('nullable' not in delta.keys()))
# We need to specify NULL if we're removing a NOT NULL
# constraint
null_hack = (column.nullable and ('nullable' in delta.keys()))
if dropdefault_hack:
column.server_default = sa.PassiveDefault(sa.sql.null())
if notnull_hack:
column.nullable = True
colspec = self.get_column_specification(column,
override_nullable=null_hack)
if null_hack:
colspec += ' NULL'
if notnull_hack:
column.nullable = False
if dropdefault_hack:
column.server_default = None
self.start_alter_table(table)
self.append("MODIFY (")
self.append(colspec)
self.append(")")
class OracleConstraintCommon(object):
def get_constraint_name(self, cons):
# Oracle constraints can't guess their name like other DBs
if not cons.name:
raise exceptions.NotSupportedError(
"Oracle constraint names must be explicitly stated")
return cons.name
class OracleConstraintGenerator(OracleConstraintCommon,
ansisql.ANSIConstraintGenerator):
pass
class OracleConstraintDropper(OracleConstraintCommon,
ansisql.ANSIConstraintDropper):
pass
class OracleDialect(ansisql.ANSIDialect):
columngenerator = OracleColumnGenerator
columndropper = OracleColumnDropper
schemachanger = OracleSchemaChanger
constraintgenerator = OracleConstraintGenerator
constraintdropper = OracleConstraintDropper
| mit | 735,734,766,488,244,700 | 32.765766 | 80 | 0.633938 | false |
kevclarx/ansible | lib/ansible/module_utils/dellos9.py | 51 | 5696 | #
# (c) 2015 Peter Sprygada, <[email protected]>
# (c) 2017 Red Hat, Inc
#
# Copyright (c) 2016 Dell Inc.
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list, ComplexList
from ansible.module_utils.connection import exec_command
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
_DEVICE_CONFIGS = {}
WARNING_PROMPTS_RE = [
r"[\r\n]?\[confirm yes/no\]:\s?$",
r"[\r\n]?\[y/n\]:\s?$",
r"[\r\n]?\[yes/no\]:\s?$"
]
dellos9_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int'),
'provider': dict(type='dict'),
}
def check_args(module, warnings):
provider = module.params['provider'] or {}
for key in dellos9_argument_spec:
if key != 'provider' and module.params[key]:
warnings.append('argument %s has been deprecated and will be '
'removed in a future version' % key)
if provider:
for param in ('auth_pass', 'password'):
if provider.get(param):
module.no_log_values.update(return_values(provider[param]))
def get_config(module, flags=[]):
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict'))
cfg = to_text(out, errors='surrogate_or_strict').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
responses.append(to_text(out, errors='surrogate_or_strict'))
return responses
def load_config(module, commands):
rc, out, err = exec_command(module, 'configure terminal')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict'))
for command in to_list(commands):
if command == 'end':
continue
cmd = {'command': command, 'prompt': WARNING_PROMPTS_RE, 'answer': 'yes'}
rc, out, err = exec_command(module, module.jsonify(cmd))
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc)
exec_command(module, 'end')
def get_sublevel_config(running_config, module):
contents = list()
current_config_contents = list()
running_config = NetworkConfig(contents=running_config, indent=1)
obj = running_config.get_object(module.params['parents'])
if obj:
contents = obj.children
contents[:0] = module.params['parents']
indent = 0
for c in contents:
if isinstance(c, str):
current_config_contents.append(c.rjust(len(c) + indent, ' '))
if isinstance(c, ConfigLine):
current_config_contents.append(c.raw)
indent = 1
sublevel_config = '\n'.join(current_config_contents)
return sublevel_config
| gpl-3.0 | 4,789,225,412,063,048,000 | 38.013699 | 120 | 0.670997 | false |
mpenning/exscript | src/Exscript/util/weakmethod.py | 7 | 3634 | # Copyright (C) 2007-2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Weak references to bound and unbound methods.
"""
import weakref
class DeadMethodCalled(Exception):
"""
Raised by L{WeakMethod} if it is called when the referenced object
is already dead.
"""
pass
class WeakMethod(object):
"""
Do not create this class directly; use L{ref()} instead.
"""
__slots__ = 'name', 'callback'
def __init__(self, name, callback):
"""
Constructor. Do not use directly, use L{ref()} instead.
"""
self.name = name
self.callback = callback
def _dead(self, ref):
if self.callback is not None:
self.callback(self)
def get_function(self):
"""
Returns the referenced method/function if it is still alive.
Returns None otherwise.
@rtype: callable|None
@return: The referenced function if it is still alive.
"""
raise NotImplementedError()
def isalive(self):
"""
Returns True if the referenced function is still alive, False
otherwise.
@rtype: bool
@return: Whether the referenced function is still alive.
"""
return self.get_function() is not None
def __call__(self, *args, **kwargs):
"""
Proxied to the underlying function or method. Raises L{DeadMethodCalled}
if the referenced function is dead.
@rtype: object
@return: Whatever the referenced function returned.
"""
method = self.get_function()
if method is None:
raise DeadMethodCalled('method called on dead object ' + self.name)
method(*args, **kwargs)
class _WeakMethodBound(WeakMethod):
__slots__ = 'name', 'callback', 'f', 'c'
def __init__(self, f, callback):
name = f.__self__.__class__.__name__ + '.' + f.__func__.__name__
WeakMethod.__init__(self, name, callback)
self.f = f.__func__
self.c = weakref.ref(f.__self__, self._dead)
def get_function(self):
cls = self.c()
if cls is None:
return None
return getattr(cls, self.f.__name__)
class _WeakMethodFree(WeakMethod):
__slots__ = 'name', 'callback', 'f'
def __init__(self, f, callback):
WeakMethod.__init__(self, f.__class__.__name__, callback)
self.f = weakref.ref(f, self._dead)
def get_function(self):
return self.f()
def ref(function, callback = None):
"""
Returns a weak reference to the given method or function.
If the callback argument is not None, it is called as soon
as the referenced function is garbage deleted.
@type function: callable
@param function: The function to reference.
@type callback: callable
@param callback: Called when the function dies.
"""
try:
function.__func__
except AttributeError:
return _WeakMethodFree(function, callback)
return _WeakMethodBound(function, callback)
| gpl-2.0 | -2,197,405,746,789,327,600 | 30.059829 | 80 | 0.625206 | false |
makermade/arm_android-21_arm-linux-androideabi-4.8 | lib/python2.7/distutils/tests/test_dist.py | 83 | 15708 | # -*- coding: utf8 -*-
"""Tests for distutils.dist."""
import os
import StringIO
import sys
import unittest
import warnings
import textwrap
from distutils.dist import Distribution, fix_help_options
from distutils.cmd import Command
import distutils.dist
from test.test_support import TESTFN, captured_stdout, run_unittest
from distutils.tests import support
class test_dist(Command):
"""Sample distutils extension command."""
user_options = [
("sample-option=", "S", "help text"),
]
def initialize_options(self):
self.sample_option = None
class TestDistribution(Distribution):
"""Distribution subclasses that avoids the default search for
configuration files.
The ._config_files attribute must be set before
.parse_config_files() is called.
"""
def find_config_files(self):
return self._config_files
class DistributionTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(DistributionTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
del sys.argv[1:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(DistributionTestCase, self).tearDown()
def create_distribution(self, configfiles=()):
d = TestDistribution()
d._config_files = configfiles
d.parse_config_files()
d.parse_command_line()
return d
def test_debug_mode(self):
with open(TESTFN, "w") as f:
f.write("[global]\n")
f.write("command_packages = foo.bar, splat")
files = [TESTFN]
sys.argv.append("build")
with captured_stdout() as stdout:
self.create_distribution(files)
stdout.seek(0)
self.assertEqual(stdout.read(), '')
distutils.dist.DEBUG = True
try:
with captured_stdout() as stdout:
self.create_distribution(files)
stdout.seek(0)
self.assertEqual(stdout.read(), '')
finally:
distutils.dist.DEBUG = False
def test_command_packages_unspecified(self):
sys.argv.append("build")
d = self.create_distribution()
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_command_packages_cmdline(self):
from distutils.tests.test_dist import test_dist
sys.argv.extend(["--command-packages",
"foo.bar,distutils.tests",
"test_dist",
"-Ssometext",
])
d = self.create_distribution()
# let's actually try to load our test command:
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "distutils.tests"])
cmd = d.get_command_obj("test_dist")
self.assertIsInstance(cmd, test_dist)
self.assertEqual(cmd.sample_option, "sometext")
def test_command_packages_configfile(self):
sys.argv.append("build")
self.addCleanup(os.unlink, TESTFN)
f = open(TESTFN, "w")
try:
print >> f, "[global]"
print >> f, "command_packages = foo.bar, splat"
finally:
f.close()
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "splat"])
# ensure command line overrides config:
sys.argv[1:] = ["--command-packages", "spork", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "spork"])
# Setting --command-packages to '' should cause the default to
# be used even if a config file specified something else:
sys.argv[1:] = ["--command-packages", "", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_write_pkg_file(self):
# Check DistributionMetadata handling of Unicode fields
tmp_dir = self.mkdtemp()
my_file = os.path.join(tmp_dir, 'f')
klass = Distribution
dist = klass(attrs={'author': u'Mister Café',
'name': 'my.package',
'maintainer': u'Café Junior',
'description': u'Café torréfié',
'long_description': u'Héhéhé'})
# let's make sure the file can be written
# with Unicode fields. they are encoded with
# PKG_INFO_ENCODING
dist.metadata.write_pkg_file(open(my_file, 'w'))
# regular ascii is of course always usable
dist = klass(attrs={'author': 'Mister Cafe',
'name': 'my.package',
'maintainer': 'Cafe Junior',
'description': 'Cafe torrefie',
'long_description': 'Hehehe'})
my_file2 = os.path.join(tmp_dir, 'f2')
dist.metadata.write_pkg_file(open(my_file2, 'w'))
def test_empty_options(self):
# an empty options dictionary should not stay in the
# list of attributes
# catching warnings
warns = []
def _warn(msg):
warns.append(msg)
self.addCleanup(setattr, warnings, 'warn', warnings.warn)
warnings.warn = _warn
dist = Distribution(attrs={'author': 'xxx', 'name': 'xxx',
'version': 'xxx', 'url': 'xxxx',
'options': {}})
self.assertEqual(len(warns), 0)
self.assertNotIn('options', dir(dist))
def test_finalize_options(self):
attrs = {'keywords': 'one,two',
'platforms': 'one,two'}
dist = Distribution(attrs=attrs)
dist.finalize_options()
# finalize_option splits platforms and keywords
self.assertEqual(dist.metadata.platforms, ['one', 'two'])
self.assertEqual(dist.metadata.keywords, ['one', 'two'])
def test_get_command_packages(self):
dist = Distribution()
self.assertEqual(dist.command_packages, None)
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command'])
self.assertEqual(dist.command_packages,
['distutils.command'])
dist.command_packages = 'one,two'
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command', 'one', 'two'])
def test_announce(self):
# make sure the level is known
dist = Distribution()
args = ('ok',)
kwargs = {'level': 'ok2'}
self.assertRaises(ValueError, dist.announce, args, kwargs)
def test_find_config_files_disable(self):
# Ticket #1180: Allow user to disable their home config file.
temp_home = self.mkdtemp()
if os.name == 'posix':
user_filename = os.path.join(temp_home, ".pydistutils.cfg")
else:
user_filename = os.path.join(temp_home, "pydistutils.cfg")
with open(user_filename, 'w') as f:
f.write('[distutils]\n')
def _expander(path):
return temp_home
old_expander = os.path.expanduser
os.path.expanduser = _expander
try:
d = distutils.dist.Distribution()
all_files = d.find_config_files()
d = distutils.dist.Distribution(attrs={'script_args':
['--no-user-cfg']})
files = d.find_config_files()
finally:
os.path.expanduser = old_expander
# make sure --no-user-cfg disables the user cfg file
self.assertEqual(len(all_files)-1, len(files))
class MetadataTestCase(support.TempdirManager, support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(MetadataTestCase, self).tearDown()
def test_classifier(self):
attrs = {'name': 'Boa', 'version': '3.0',
'classifiers': ['Programming Language :: Python :: 3']}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertIn('Metadata-Version: 1.1', meta)
def test_download_url(self):
attrs = {'name': 'Boa', 'version': '3.0',
'download_url': 'http://example.org/boa'}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertIn('Metadata-Version: 1.1', meta)
def test_long_description(self):
long_desc = textwrap.dedent("""\
example::
We start here
and continue here
and end here.""")
attrs = {"name": "package",
"version": "1.0",
"long_description": long_desc}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
meta = meta.replace('\n' + 8 * ' ', '\n')
self.assertIn(long_desc, meta)
def test_simple_metadata(self):
attrs = {"name": "package",
"version": "1.0"}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.0", meta)
self.assertNotIn("provides:", meta.lower())
self.assertNotIn("requires:", meta.lower())
self.assertNotIn("obsoletes:", meta.lower())
def test_provides(self):
attrs = {"name": "package",
"version": "1.0",
"provides": ["package", "package.sub"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_provides(),
["package", "package.sub"])
self.assertEqual(dist.get_provides(),
["package", "package.sub"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("requires:", meta.lower())
self.assertNotIn("obsoletes:", meta.lower())
def test_provides_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"provides": ["my.pkg (splat)"]})
def test_requires(self):
attrs = {"name": "package",
"version": "1.0",
"requires": ["other", "another (==1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_requires(),
["other", "another (==1.0)"])
self.assertEqual(dist.get_requires(),
["other", "another (==1.0)"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("provides:", meta.lower())
self.assertIn("Requires: other", meta)
self.assertIn("Requires: another (==1.0)", meta)
self.assertNotIn("obsoletes:", meta.lower())
def test_requires_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"requires": ["my.pkg (splat)"]})
def test_obsoletes(self):
attrs = {"name": "package",
"version": "1.0",
"obsoletes": ["other", "another (<1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_obsoletes(),
["other", "another (<1.0)"])
self.assertEqual(dist.get_obsoletes(),
["other", "another (<1.0)"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("provides:", meta.lower())
self.assertNotIn("requires:", meta.lower())
self.assertIn("Obsoletes: other", meta)
self.assertIn("Obsoletes: another (<1.0)", meta)
def test_obsoletes_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"obsoletes": ["my.pkg (splat)"]})
def format_metadata(self, dist):
sio = StringIO.StringIO()
dist.metadata.write_pkg_file(sio)
return sio.getvalue()
def test_custom_pydistutils(self):
# fixes #2166
# make sure pydistutils.cfg is found
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
temp_dir = self.mkdtemp()
user_filename = os.path.join(temp_dir, user_filename)
f = open(user_filename, 'w')
try:
f.write('.')
finally:
f.close()
try:
dist = Distribution()
# linux-style
if sys.platform in ('linux', 'darwin'):
os.environ['HOME'] = temp_dir
files = dist.find_config_files()
self.assertIn(user_filename, files)
# win32-style
if sys.platform == 'win32':
# home drive should be found
os.environ['HOME'] = temp_dir
files = dist.find_config_files()
self.assertIn(user_filename, files,
'%r not found in %r' % (user_filename, files))
finally:
os.remove(user_filename)
def test_fix_help_options(self):
help_tuples = [('a', 'b', 'c', 'd'), (1, 2, 3, 4)]
fancy_options = fix_help_options(help_tuples)
self.assertEqual(fancy_options[0], ('a', 'b', 'c'))
self.assertEqual(fancy_options[1], (1, 2, 3))
def test_show_help(self):
# smoke test, just makes sure some help is displayed
dist = Distribution()
sys.argv = []
dist.help = 1
dist.script_name = 'setup.py'
with captured_stdout() as s:
dist.parse_command_line()
output = [line for line in s.getvalue().split('\n')
if line.strip() != '']
self.assertTrue(output)
def test_read_metadata(self):
attrs = {"name": "package",
"version": "1.0",
"long_description": "desc",
"description": "xxx",
"download_url": "http://example.com",
"keywords": ['one', 'two'],
"requires": ['foo']}
dist = Distribution(attrs)
metadata = dist.metadata
# write it then reloads it
PKG_INFO = StringIO.StringIO()
metadata.write_pkg_file(PKG_INFO)
PKG_INFO.seek(0)
metadata.read_pkg_file(PKG_INFO)
self.assertEqual(metadata.name, "package")
self.assertEqual(metadata.version, "1.0")
self.assertEqual(metadata.description, "xxx")
self.assertEqual(metadata.download_url, 'http://example.com')
self.assertEqual(metadata.keywords, ['one', 'two'])
self.assertEqual(metadata.platforms, ['UNKNOWN'])
self.assertEqual(metadata.obsoletes, None)
self.assertEqual(metadata.requires, ['foo'])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DistributionTestCase))
suite.addTest(unittest.makeSuite(MetadataTestCase))
return suite
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-2.0 | 984,821,889,246,496,300 | 34.280899 | 77 | 0.545987 | false |
suneel0101/django-easyrest | setup.py | 1 | 1212 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright <2013> Suneel Chakravorty <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
setup(name='django-easyrest',
version='0.0.2',
description='An ultra-lightweight read-only REST api framework for Django',
author='Suneel Chakravorty',
author_email='[email protected]',
url='https://github.com/suneel0101/django-restroom',
packages=['easyrest'],
install_requires=[
"django",
],
package_data={
'django-restroom': ['LICENSE', '*.md'],
})
| mit | 160,302,950,986,840,640 | 35.727273 | 81 | 0.69802 | false |
loic/django | django/conf/locale/pt/formats.py | 504 | 1717 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause | 6,750,704,937,212,569,000 | 40.853659 | 90 | 0.538462 | false |
wangjun/simDownloader | simDHT.py | 5 | 7047 | #!/usr/bin/env python
# encoding: utf-8
import socket
import threading
from hashlib import sha1
from random import randint
from struct import unpack
from socket import inet_ntoa
from threading import Timer, Thread
from time import sleep
from collections import deque
from bencode import bencode, bdecode
from Queue import Queue
import simMetadata
BOOTSTRAP_NODES = (
("router.bittorrent.com", 6881),
("dht.transmissionbt.com", 6881),
("router.utorrent.com", 6881)
)
TID_LENGTH = 2
RE_JOIN_DHT_INTERVAL = 3
TOKEN_LENGTH = 2
def entropy(length):
return "".join(chr(randint(0, 255)) for _ in xrange(length))
def random_id():
h = sha1()
h.update(entropy(20))
return h.digest()
def decode_nodes(nodes):
n = []
length = len(nodes)
if (length % 26) != 0:
return n
for i in range(0, length, 26):
nid = nodes[i:i+20]
ip = inet_ntoa(nodes[i+20:i+24])
port = unpack("!H", nodes[i+24:i+26])[0]
n.append((nid, ip, port))
return n
def timer(t, f):
Timer(t, f).start()
def get_neighbor(target, nid, end=10):
return target[:end]+nid[end:]
class KNode(object):
def __init__(self, nid, ip, port):
self.nid = nid
self.ip = ip
self.port = port
class DHTClient(Thread):
def __init__(self, max_node_qsize):
Thread.__init__(self)
self.setDaemon(True)
self.max_node_qsize = max_node_qsize
self.nid = random_id()
self.nodes = deque(maxlen=max_node_qsize)
def send_krpc(self, msg, address):
try:
self.ufd.sendto(bencode(msg), address)
except Exception:
pass
def send_find_node(self, address, nid=None):
nid = get_neighbor(nid, self.nid) if nid else self.nid
tid = entropy(TID_LENGTH)
msg = {
"t": tid,
"y": "q",
"q": "find_node",
"a": {
"id": nid,
"target": random_id()
}
}
self.send_krpc(msg, address)
def join_DHT(self):
for address in BOOTSTRAP_NODES:
self.send_find_node(address)
def re_join_DHT(self):
if len(self.nodes) == 0:
self.join_DHT()
timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT)
def auto_send_find_node(self):
wait = 1.0 / self.max_node_qsize
while True:
try:
node = self.nodes.popleft()
self.send_find_node((node.ip, node.port), node.nid)
except IndexError:
pass
sleep(wait)
def process_find_node_response(self, msg, address):
nodes = decode_nodes(msg["r"]["nodes"])
for node in nodes:
(nid, ip, port) = node
if len(nid) != 20: continue
if ip == self.bind_ip: continue
n = KNode(nid, ip, port)
self.nodes.append(n)
class DHTServer(DHTClient):
def __init__(self, master, bind_ip, bind_port, max_node_qsize):
DHTClient.__init__(self, max_node_qsize)
self.master = master
self.bind_ip = bind_ip
self.bind_port = bind_port
self.process_request_actions = {
"get_peers": self.on_get_peers_request,
"announce_peer": self.on_announce_peer_request,
}
self.ufd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.ufd.bind((self.bind_ip, self.bind_port))
timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT)
def run(self):
self.re_join_DHT()
while True:
try:
(data, address) = self.ufd.recvfrom(65536)
msg = bdecode(data)
self.on_message(msg, address)
except Exception:
pass
def on_message(self, msg, address):
try:
if msg["y"] == "r":
if msg["r"].has_key("nodes"):
self.process_find_node_response(msg, address)
elif msg["y"] == "q":
try:
self.process_request_actions[msg["q"]](msg, address)
except KeyError:
self.play_dead(msg, address)
except KeyError:
pass
def on_get_peers_request(self, msg, address):
try:
infohash = msg["a"]["info_hash"]
tid = msg["t"]
nid = msg["a"]["id"]
token = infohash[:TOKEN_LENGTH]
msg = {
"t": tid,
"y": "r",
"r": {
"id": get_neighbor(infohash, self.nid),
"nodes": "",
"token": token
}
}
self.send_krpc(msg, address)
except KeyError:
pass
def on_announce_peer_request(self, msg, address):
try:
infohash = msg["a"]["info_hash"]
token = msg["a"]["token"]
nid = msg["a"]["id"]
tid = msg["t"]
if infohash[:TOKEN_LENGTH] == token:
if msg["a"].has_key("implied_port ") and msg["a"]["implied_port "] != 0:
port = address[1]
else:
port = msg["a"]["port"]
self.master.log(infohash, (address[0], port))
except Exception:
print 'error'
pass
finally:
self.ok(msg, address)
def play_dead(self, msg, address):
try:
tid = msg["t"]
msg = {
"t": tid,
"y": "e",
"e": [202, "Server Error"]
}
self.send_krpc(msg, address)
except KeyError:
pass
def ok(self, msg, address):
try:
tid = msg["t"]
nid = msg["a"]["id"]
msg = {
"t": tid,
"y": "r",
"r": {
"id": get_neighbor(nid, self.nid)
}
}
self.send_krpc(msg, address)
except KeyError:
pass
class Master(Thread):
def __init__(self):
Thread.__init__(self)
self.setDaemon(True)
self.queue = Queue()
def run(self):
while True:
self.downloadMetadata()
def log(self, infohash, address=None):
self.queue.put([address, infohash])
def downloadMetadata(self):
# 100 threads for download metadata
for i in xrange(0, 100):
if self.queue.qsize() == 0:
sleep(1)
continue
announce = self.queue.get()
t = threading.Thread(target = simMetadata.download_metadata, args = (announce[0], announce[1]))
t.setDaemon(True)
t.start()
if __name__ == "__main__":
# max_node_qsize bigger, bandwith bigger, spped higher
master = Master()
master.start()
dht = DHTServer(master, "0.0.0.0", 6881, max_node_qsize=200)
dht.start()
dht.auto_send_find_node() | gpl-2.0 | 4,601,781,395,026,338,300 | 25.200743 | 107 | 0.496949 | false |
unreal666/outwiker | src/outwiker/pages/search/htmlreport.py | 3 | 2955 | # -*- coding: UTF-8 -*-
import html
from outwiker.gui.guiconfig import GeneralGuiConfig
from outwiker.core.system import getOS
class HtmlReport (object):
"""
Класс для генерации HTML-а, для вывода найденных страниц
"""
def __init__(self, pages, searchPhrase, searchTags, application):
"""
pages - список найденных страниц
searchPhrase - искомая фраза
searchTags - теги, которые участвуют в поиске
"""
self.__pages = pages
self.__searchPhrase = searchPhrase
self.__searchTags = searchTags
self.__application = application
def generate(self):
"""
Сгенерить отчет
"""
shell = u"""<html>
<head>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8'/>
</head>
<body>
<ol type='1'>
%s
</ol>
</body>
</html>"""
items = u""
for page in self.__pages:
items += self.generataPageView(page)
result = shell % items
return result
def generataPageView(self, page):
"""
Вернуть представление для одной страницы
"""
item = u'<b><a href="page://%s">%s</a></b>' % (
html.escape(page.subpath, True), page.title)
if page.parent.parent is not None:
item += u" (%s)" % page.parent.subpath
item += u"<br>" + self.generatePageInfo(page) + "<p></p>"
result = u"<li>%s</li>\n" % item
return result
def generatePageInfo(self, page):
tags = self.generatePageTags(page)
date = self.generateDate(page)
pageinfo = u"<font size='-1'>{tags}<br>{date}</font>".format(
tags=tags, date=date)
return pageinfo
def generateDate(self, page):
config = GeneralGuiConfig(self.__application.config)
dateStr = page.datetime.strftime(config.dateTimeFormat.value)
result = _(u"Last modified date: {0}").format(dateStr)
return result
def generatePageTags(self, page):
"""
Создать список тегов для страницы
"""
result = _(u"Tags: ")
for tag in page.tags:
result += self.generageTagView(tag) + u", "
if result.endswith(", "):
result = result[: -2]
return result
def generageTagView(self, tag):
"""
Оформление для одного тега
"""
if tag in self.__searchTags:
style = u"font-weight: bold; background-color: rgb(255,255,36);"
return u"<span style='{style}'>{tag}</span>".format(style=style, tag=tag)
else:
return tag
| gpl-3.0 | -7,295,819,895,699,967,000 | 26.767677 | 85 | 0.534376 | false |
twitchyliquid64/misc-scripts | s3tool/boto/ec2/elb/__init__.py | 110 | 32520 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
load balancing service from AWS.
"""
from boto.connection import AWSQueryConnection
from boto.ec2.instanceinfo import InstanceInfo
from boto.ec2.elb.loadbalancer import LoadBalancer, LoadBalancerZones
from boto.ec2.elb.instancestate import InstanceState
from boto.ec2.elb.healthcheck import HealthCheck
from boto.regioninfo import RegionInfo, get_regions, load_regions
import boto
from boto.compat import six
RegionData = load_regions().get('elasticloadbalancing', {})
def regions():
"""
Get all available regions for the ELB service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
return get_regions('elasticloadbalancing', connection_cls=ELBConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.elb.ELBConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.ELBConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
class ELBConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'elb_version', '2012-06-01')
DefaultRegionName = boto.config.get('Boto', 'elb_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get(
'Boto', 'elb_region_endpoint',
'elasticloadbalancing.us-east-1.amazonaws.com')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
"""
Init method to create a new connection to EC2 Load Balancing Service.
.. note:: The region argument is overridden by the region specified in
the boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(ELBConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def build_list_params(self, params, items, label):
if isinstance(items, six.string_types):
items = [items]
for index, item in enumerate(items):
params[label % (index + 1)] = item
def get_all_load_balancers(self, load_balancer_names=None, marker=None):
"""
Retrieve all load balancers associated with your account.
:type load_balancer_names: list
:keyword load_balancer_names: An optional list of load balancer names.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:rtype: :py:class:`boto.resultset.ResultSet`
:return: A ResultSet containing instances of
:class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
params = {}
if load_balancer_names:
self.build_list_params(params, load_balancer_names,
'LoadBalancerNames.member.%d')
if marker:
params['Marker'] = marker
return self.get_list('DescribeLoadBalancers', params,
[('member', LoadBalancer)])
def create_load_balancer(self, name, zones, listeners=None, subnets=None,
security_groups=None, scheme='internet-facing',
complex_listeners=None):
"""
Create a new load balancer for your account. By default the load
balancer will be created in EC2. To create a load balancer inside a
VPC, parameter zones must be set to None and subnets must not be None.
The load balancer will be automatically created under the VPC that
contains the subnet(s) specified.
:type name: string
:param name: The mnemonic name associated with the new load balancer
:type zones: List of strings
:param zones: The names of the availability zone(s) to add.
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type subnets: list of strings
:param subnets: A list of subnet IDs in your VPC to attach to
your LoadBalancer.
:type security_groups: list of strings
:param security_groups: The security groups assigned to your
LoadBalancer within your VPC.
:type scheme: string
:param scheme: The type of a LoadBalancer. By default, Elastic
Load Balancing creates an internet-facing LoadBalancer with
a publicly resolvable DNS name, which resolves to public IP
addresses.
Specify the value internal for this option to create an
internal LoadBalancer with a DNS name that resolves to
private IP addresses.
This option is only available for LoadBalancers attached
to an Amazon VPC.
:type complex_listeners: List of tuples
:param complex_listeners: Each tuple contains four or five values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
InstanceProtocol, SSLCertificateId).
Where:
- LoadBalancerPortNumber and InstancePortNumber are integer
values between 1 and 65535
- Protocol and InstanceProtocol is a string containing
either 'TCP',
'SSL', 'HTTP', or 'HTTPS'
- SSLCertificateId is the ARN of an SSL certificate loaded into
AWS IAM
:rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
:return: The newly created
:class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
if not listeners and not complex_listeners:
# Must specify one of the two options
return None
params = {'LoadBalancerName': name,
'Scheme': scheme}
# Handle legacy listeners
if listeners:
for index, listener in enumerate(listeners):
i = index + 1
protocol = listener[2].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
# Handle the full listeners
if complex_listeners:
for index, listener in enumerate(complex_listeners):
i = index + 1
protocol = listener[2].upper()
InstanceProtocol = listener[3].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
params['Listeners.member.%d.InstanceProtocol' % i] = listener[3]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[4]
if zones:
self.build_list_params(params, zones, 'AvailabilityZones.member.%d')
if subnets:
self.build_list_params(params, subnets, 'Subnets.member.%d')
if security_groups:
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
load_balancer = self.get_object('CreateLoadBalancer',
params, LoadBalancer)
load_balancer.name = name
load_balancer.listeners = listeners
load_balancer.availability_zones = zones
load_balancer.subnets = subnets
load_balancer.security_groups = security_groups
return load_balancer
def create_load_balancer_listeners(self, name, listeners=None,
complex_listeners=None):
"""
Creates a Listener (or group of listeners) for an existing
Load Balancer
:type name: string
:param name: The name of the load balancer to create the listeners for
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type complex_listeners: List of tuples
:param complex_listeners: Each tuple contains four or five values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
InstanceProtocol, SSLCertificateId).
Where:
- LoadBalancerPortNumber and InstancePortNumber are integer
values between 1 and 65535
- Protocol and InstanceProtocol is a string containing
either 'TCP',
'SSL', 'HTTP', or 'HTTPS'
- SSLCertificateId is the ARN of an SSL certificate loaded into
AWS IAM
:return: The status of the request
"""
if not listeners and not complex_listeners:
# Must specify one of the two options
return None
params = {'LoadBalancerName': name}
# Handle the simple listeners
if listeners:
for index, listener in enumerate(listeners):
i = index + 1
protocol = listener[2].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
# Handle the full listeners
if complex_listeners:
for index, listener in enumerate(complex_listeners):
i = index + 1
protocol = listener[2].upper()
InstanceProtocol = listener[3].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
params['Listeners.member.%d.InstanceProtocol' % i] = listener[3]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[4]
return self.get_status('CreateLoadBalancerListeners', params)
def delete_load_balancer(self, name):
"""
Delete a Load Balancer from your account.
:type name: string
:param name: The name of the Load Balancer to delete
"""
params = {'LoadBalancerName': name}
return self.get_status('DeleteLoadBalancer', params)
def delete_load_balancer_listeners(self, name, ports):
"""
Deletes a load balancer listener (or group of listeners)
:type name: string
:param name: The name of the load balancer to create the listeners for
:type ports: List int
:param ports: Each int represents the port on the ELB to be removed
:return: The status of the request
"""
params = {'LoadBalancerName': name}
for index, port in enumerate(ports):
params['LoadBalancerPorts.member.%d' % (index + 1)] = port
return self.get_status('DeleteLoadBalancerListeners', params)
def enable_availability_zones(self, load_balancer_name, zones_to_add):
"""
Add availability zones to an existing Load Balancer
All zones must be in the same region as the Load Balancer
Adding zones that are already registered with the Load Balancer
has no effect.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type zones: List of strings
:param zones: The name of the zone(s) to add.
:rtype: List of strings
:return: An updated list of zones for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, zones_to_add,
'AvailabilityZones.member.%d')
obj = self.get_object('EnableAvailabilityZonesForLoadBalancer',
params, LoadBalancerZones)
return obj.zones
def disable_availability_zones(self, load_balancer_name, zones_to_remove):
"""
Remove availability zones from an existing Load Balancer.
All zones must be in the same region as the Load Balancer.
Removing zones that are not registered with the Load Balancer
has no effect.
You cannot remove all zones from an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type zones: List of strings
:param zones: The name of the zone(s) to remove.
:rtype: List of strings
:return: An updated list of zones for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, zones_to_remove,
'AvailabilityZones.member.%d')
obj = self.get_object('DisableAvailabilityZonesForLoadBalancer',
params, LoadBalancerZones)
return obj.zones
def modify_lb_attribute(self, load_balancer_name, attribute, value):
"""Changes an attribute of a Load Balancer
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type attribute: string
:param attribute: The attribute you wish to change.
* crossZoneLoadBalancing - Boolean (true)
* connectingSettings - :py:class:`ConnectionSettingAttribute` instance
* accessLog - :py:class:`AccessLogAttribute` instance
* connectionDraining - :py:class:`ConnectionDrainingAttribute` instance
:type value: string
:param value: The new value for the attribute
:rtype: bool
:return: Whether the operation succeeded or not
"""
bool_reqs = ('crosszoneloadbalancing',)
if attribute.lower() in bool_reqs:
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
params = {'LoadBalancerName': load_balancer_name}
if attribute.lower() == 'crosszoneloadbalancing':
params['LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled'
] = value
elif attribute.lower() == 'accesslog':
params['LoadBalancerAttributes.AccessLog.Enabled'] = \
value.enabled and 'true' or 'false'
params['LoadBalancerAttributes.AccessLog.S3BucketName'] = \
value.s3_bucket_name
params['LoadBalancerAttributes.AccessLog.S3BucketPrefix'] = \
value.s3_bucket_prefix
params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \
value.emit_interval
elif attribute.lower() == 'connectiondraining':
params['LoadBalancerAttributes.ConnectionDraining.Enabled'] = \
value.enabled and 'true' or 'false'
params['LoadBalancerAttributes.ConnectionDraining.Timeout'] = \
value.timeout
elif attribute.lower() == 'connectingsettings':
params['LoadBalancerAttributes.ConnectionSettings.IdleTimeout'] = \
value.idle_timeout
else:
raise ValueError('InvalidAttribute', attribute)
return self.get_status('ModifyLoadBalancerAttributes', params,
verb='GET')
def get_all_lb_attributes(self, load_balancer_name):
"""Gets all Attributes of a Load Balancer
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:rtype: boto.ec2.elb.attribute.LbAttributes
:return: The attribute object of the ELB.
"""
from boto.ec2.elb.attributes import LbAttributes
params = {'LoadBalancerName': load_balancer_name}
return self.get_object('DescribeLoadBalancerAttributes',
params, LbAttributes)
def get_lb_attribute(self, load_balancer_name, attribute):
"""Gets an attribute of a Load Balancer
This will make an EC2 call for each method call.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type attribute: string
:param attribute: The attribute you wish to see.
* accessLog - :py:class:`AccessLogAttribute` instance
* crossZoneLoadBalancing - Boolean
* connectingSettings - :py:class:`ConnectionSettingAttribute` instance
* connectionDraining - :py:class:`ConnectionDrainingAttribute`
instance
:rtype: Attribute dependent
:return: The new value for the attribute
"""
attributes = self.get_all_lb_attributes(load_balancer_name)
if attribute.lower() == 'accesslog':
return attributes.access_log
if attribute.lower() == 'crosszoneloadbalancing':
return attributes.cross_zone_load_balancing.enabled
if attribute.lower() == 'connectiondraining':
return attributes.connection_draining
if attribute.lower() == 'connectingsettings':
return attributes.connecting_settings
return None
def register_instances(self, load_balancer_name, instances):
"""
Add new Instances to an existing Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances to add.
:rtype: List of strings
:return: An updated list of instances for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('RegisterInstancesWithLoadBalancer',
params, [('member', InstanceInfo)])
def deregister_instances(self, load_balancer_name, instances):
"""
Remove Instances from an existing Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances to remove.
:rtype: List of strings
:return: An updated list of instances for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DeregisterInstancesFromLoadBalancer',
params, [('member', InstanceInfo)])
def describe_instance_health(self, load_balancer_name, instances=None):
"""
Get current state of all Instances registered to an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances
to return status for. If not provided,
the state of all instances will be returned.
:rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState`
:return: list of state info for instances in this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
if instances:
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DescribeInstanceHealth', params,
[('member', InstanceState)])
def configure_health_check(self, name, health_check):
"""
Define a health check for the EndPoints.
:type name: string
:param name: The mnemonic name associated with the load balancer
:type health_check: :class:`boto.ec2.elb.healthcheck.HealthCheck`
:param health_check: A HealthCheck object populated with the desired
values.
:rtype: :class:`boto.ec2.elb.healthcheck.HealthCheck`
:return: The updated :class:`boto.ec2.elb.healthcheck.HealthCheck`
"""
params = {'LoadBalancerName': name,
'HealthCheck.Timeout': health_check.timeout,
'HealthCheck.Target': health_check.target,
'HealthCheck.Interval': health_check.interval,
'HealthCheck.UnhealthyThreshold': health_check.unhealthy_threshold,
'HealthCheck.HealthyThreshold': health_check.healthy_threshold}
return self.get_object('ConfigureHealthCheck', params, HealthCheck)
def set_lb_listener_SSL_certificate(self, lb_name, lb_port,
ssl_certificate_id):
"""
Sets the certificate that terminates the specified listener's SSL
connections. The specified certificate replaces any prior certificate
that was used on the same LoadBalancer and port.
"""
params = {'LoadBalancerName': lb_name,
'LoadBalancerPort': lb_port,
'SSLCertificateId': ssl_certificate_id}
return self.get_status('SetLoadBalancerListenerSSLCertificate', params)
def create_app_cookie_stickiness_policy(self, name, lb_name, policy_name):
"""
Generates a stickiness policy with sticky session lifetimes that follow
that of an application-generated cookie. This policy can only be
associated with HTTP listeners.
This policy is similar to the policy created by
CreateLBCookieStickinessPolicy, except that the lifetime of the special
Elastic Load Balancing cookie follows the lifetime of the
application-generated cookie specified in the policy configuration. The
load balancer only inserts a new stickiness cookie when the application
response includes a new application cookie.
If the application cookie is explicitly removed or expires, the session
stops being sticky until a new application cookie is issued.
"""
params = {'CookieName': name,
'LoadBalancerName': lb_name,
'PolicyName': policy_name}
return self.get_status('CreateAppCookieStickinessPolicy', params)
def create_lb_cookie_stickiness_policy(self, cookie_expiration_period,
lb_name, policy_name):
"""
Generates a stickiness policy with sticky session lifetimes controlled
by the lifetime of the browser (user-agent) or a specified expiration
period. This policy can only be associated only with HTTP listeners.
When a load balancer implements this policy, the load balancer uses a
special cookie to track the backend server instance for each request.
When the load balancer receives a request, it first checks to see if
this cookie is present in the request. If so, the load balancer sends
the request to the application server specified in the cookie. If not,
the load balancer sends the request to a server that is chosen based on
the existing load balancing algorithm.
A cookie is inserted into the response for binding subsequent requests
from the same user to that server. The validity of the cookie is based
on the cookie expiration time, which is specified in the policy
configuration.
None may be passed for cookie_expiration_period.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name}
if cookie_expiration_period is not None:
params['CookieExpirationPeriod'] = cookie_expiration_period
return self.get_status('CreateLBCookieStickinessPolicy', params)
def create_lb_policy(self, lb_name, policy_name, policy_type,
policy_attributes):
"""
Creates a new policy that contains the necessary attributes
depending on the policy type. Policies are settings that are
saved for your load balancer and that can be applied to the
front-end listener, or the back-end application server.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name,
'PolicyTypeName': policy_type}
for index, (name, value) in enumerate(six.iteritems(policy_attributes), 1):
params['PolicyAttributes.member.%d.AttributeName' % index] = name
params['PolicyAttributes.member.%d.AttributeValue' % index] = value
else:
params['PolicyAttributes'] = ''
return self.get_status('CreateLoadBalancerPolicy', params)
def delete_lb_policy(self, lb_name, policy_name):
"""
Deletes a policy from the LoadBalancer. The specified policy must not
be enabled for any listeners.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name}
return self.get_status('DeleteLoadBalancerPolicy', params)
def set_lb_policies_of_listener(self, lb_name, lb_port, policies):
"""
Associates, updates, or disables a policy with a listener on the load
balancer. Currently only zero (0) or one (1) policy can be associated
with a listener.
"""
params = {'LoadBalancerName': lb_name,
'LoadBalancerPort': lb_port}
if len(policies):
self.build_list_params(params, policies, 'PolicyNames.member.%d')
else:
params['PolicyNames'] = ''
return self.get_status('SetLoadBalancerPoliciesOfListener', params)
def set_lb_policies_of_backend_server(self, lb_name, instance_port,
policies):
"""
Replaces the current set of policies associated with a port on which
the back-end server is listening with a new set of policies.
"""
params = {'LoadBalancerName': lb_name,
'InstancePort': instance_port}
if policies:
self.build_list_params(params, policies, 'PolicyNames.member.%d')
else:
params['PolicyNames'] = ''
return self.get_status('SetLoadBalancerPoliciesForBackendServer',
params)
def apply_security_groups_to_lb(self, name, security_groups):
"""
Associates one or more security groups with the load balancer.
The provided security groups will override any currently applied
security groups.
:type name: string
:param name: The name of the Load Balancer
:type security_groups: List of strings
:param security_groups: The name of the security group(s) to add.
:rtype: List of strings
:return: An updated list of security groups for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
return self.get_list('ApplySecurityGroupsToLoadBalancer',
params, None)
def attach_lb_to_subnets(self, name, subnets):
"""
Attaches load balancer to one or more subnets.
Attaching subnets that are already registered with the
Load Balancer has no effect.
:type name: string
:param name: The name of the Load Balancer
:type subnets: List of strings
:param subnets: The name of the subnet(s) to add.
:rtype: List of strings
:return: An updated list of subnets for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, subnets,
'Subnets.member.%d')
return self.get_list('AttachLoadBalancerToSubnets',
params, None)
def detach_lb_from_subnets(self, name, subnets):
"""
Detaches load balancer from one or more subnets.
:type name: string
:param name: The name of the Load Balancer
:type subnets: List of strings
:param subnets: The name of the subnet(s) to detach.
:rtype: List of strings
:return: An updated list of subnets for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, subnets,
'Subnets.member.%d')
return self.get_list('DetachLoadBalancerFromSubnets',
params, None)
| mit | -5,693,845,129,955,167,000 | 41.902375 | 85 | 0.617589 | false |
pcameron/javafuse-read-only | fs/build/mx4j/tools/jython/jmxUtils.py | 7 | 5355 | """ Copyright (C) MX4J.
All rights reserved.
This software is distributed under the terms of the MX4J License version 1.0.
See the terms of the MX4J License in the documentation provided with this software.
author <a href="mailto:[email protected]">Carlos Quiroz</a>
version $Revision: 1.1 $
Adapted by Martin Fuzzey for testing use.
For this we need to communicate with a REMOTE server (the orignal code
always ran in the same process as the JMX server and was intended to be
used as helpers for python scripts in the python MBean
"""
import sys,java
sys.add_package("javax.management")
sys.add_package("javax.management.loading");
sys.add_package("javax.management.modelmbean");
sys.add_package("javax.management.monitor");
sys.add_package("javax.management.openmbean");
sys.add_package("javax.management.relation");
sys.add_package("javax.management.remote");
sys.add_package("javax.management.remote.rmi");
sys.add_package("javax.management.timer");
from javax.management import *
from javax.management.loading import *
from javax.management.modelmbean import *
from javax.management.monitor import *
from javax.management.openmbean import *
from javax.management.relation import *
from javax.management.remote import *
from javax.management.remote.rmi import *
from javax.management.timer import *
class ServerConnection:
def __init__(self, connection) :
self.server = connection
def createProxy(self, objectname) :
"""
Creates a proxy for the named MBean in this server.
The objectname may either be an instance of javax.management.ObjectName
or a string
The MBeans attributes and methods may be then accessed directly as in :
proxy = server.createProxy("myDomain:myType=toto")
print "val=",proxy.val
proxy.doSomething()
"""
if (isinstance(objectname, ObjectName) == 0) :
objectname = ObjectName(objectname)
return Proxy(self.server, objectname)
def getMBeanNames(self, query="*:*"):
"""
Returns a list of all the available MBeans in the server. The optional
query parameter will filter the list by objectname
"""
names = []
for n in self.server.queryNames(ObjectName(query), None) :
names.append(n) ;# To python collection
return names
def getInstanceNames(self, classname, query="*:*"):
"""
Returns a list of all the available MBeans in the server which are instances
of classname. It accepts a query parameter to filter by objectname
"""
return [x for x in self.getMBeanNames(query) if self.server.isInstanceOf(x, classname)]
class OperationProxy:
def __init__(self, server, objectname, opInfo):
self.server = server
self.objectname = objectname
self.operation = opInfo.name
self.sig = []
for s in opInfo.signature :
self.sig.append(s.type)
def invoke(self, *args):
if (len(args) != len(self.sig)) :
raise "argument list / sig mismatch" + str(args) + str(self.sig)
# Manually map Boolean
nargs = []
for i in range(len(args)) :
arg = args[i]
if (self.sig[i] == "boolean") :
arg = java.lang.Boolean(arg)
nargs.append(arg)
return self.server.invoke(self.objectname, self.operation, nargs, self.sig)
class Proxy:
def __init__(self, server, objectname):
# Need the syntax below to avoid infinite recursion betweed setattr + getattr
self.__dict__["server"] = server
self.__dict__["objectname"] = objectname
info = self.server.getMBeanInfo(objectname)
for o in info.operations:
self.__dict__[o.name] = OperationProxy(self.server, objectname, o).invoke
# print "op:", o.name
def __getattr__(self, name):
return self.server.getAttribute(self.objectname, name)
def __setattr__(self, name, value):
from javax.management import Attribute
return self.server.setAttribute(self.objectname, Attribute(name, value))
def __repr__(self):
return "Proxy of MBean: %s " % (self.__dict__["objectname"], )
def invoke(self, name, arguments=None, types=None):
return self.server.invoke(self.objectname, name, arguments, types)
def addListener(self, l, filter=None, handback=None) :
self.server.addNotificationListener(self.objectname, l, filter, handback)
class proxy (Proxy): # For backwards compatibility
pass
def mbeans(query=None):
"""
Returns a list of all the available MBeans in the server. The optional
query parameter will filter the list by objectname
"""
if query:
return server.queryMBeans(ObjectName(query), None)
else:
return server.queryMBeans(None, None)
def instances(classname, query=None):
"""
Returns a list of all the available MBeans in the server which are instances
of classname. It accepts a query parameter to filter by objectname
"""
return [x for x in mbeans(query) if server.isInstanceOf(x.getObjectName(),classname)]
| gpl-3.0 | -7,534,343,453,667,322,000 | 35.447552 | 95 | 0.646685 | false |
baidu/Paddle | python/paddle/fluid/tests/unittests/test_py_reader_push_pop.py | 7 | 3749 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
import numpy as np
from threading import Thread
def feed_data(feed_queue, inputs):
for in_data in inputs:
feed_queue.push(in_data)
class TestPyReader(unittest.TestCase):
def setUp(self):
self.capacity = 10
self.batch_size_min = 10
self.batch_size_max = 20
self.shapes = [(-1, 3, 2, 1), (-1, 1)]
self.lod_levels = [0, 0]
self.dtypes = ['float32', 'int64']
self.iterations = 20
def test_single_thread_main(self):
self.main(use_thread=False)
def test_multiple_thread_main(self):
self.main(use_thread=True)
def main(self, use_thread=False):
with fluid.program_guard(fluid.Program(), fluid.Program()):
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
executor = fluid.Executor(place)
data_file = fluid.layers.py_reader(
capacity=self.capacity,
dtypes=self.dtypes,
lod_levels=self.lod_levels,
shapes=self.shapes)
feed_queue = data_file.queue
read_out_data = fluid.layers.read_file(data_file)
self.inputs = []
for i in range(self.iterations):
in_data = fluid.LoDTensorArray()
batch_size = np.random.random_integers(self.batch_size_min,
self.batch_size_max)
for shape, dtype in zip(self.shapes, self.dtypes):
next_data = np.random.uniform(
low=0, high=1000,
size=(batch_size, ) + shape[1:]).astype(dtype)
in_data.append(
fluid.executor._as_lodtensor(next_data, place))
self.inputs.append(in_data)
executor.run(fluid.default_startup_program())
self.outputs = []
if use_thread:
thread = Thread(
target=feed_data, args=(feed_queue, self.inputs))
thread.start()
for in_data in self.inputs:
self.outputs.append(
executor.run(fetch_list=list(read_out_data)))
else:
for in_data in self.inputs:
feed_queue.push(in_data)
self.outputs.append(
executor.run(fetch_list=list(read_out_data)))
feed_queue.close()
self.validate()
def validate(self):
self.assertEqual(len(self.inputs), len(self.outputs))
for in_data_list, out_data_list in zip(self.inputs, self.outputs):
self.assertEqual(len(in_data_list), len(out_data_list))
in_data_list_np = [
np.array(in_lod_tensor) for in_lod_tensor in in_data_list
]
for in_data, out_data in zip(in_data_list_np, out_data_list):
self.assertTrue((in_data == out_data).all())
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -8,679,464,368,216,438,000 | 35.754902 | 75 | 0.568952 | false |
ronekko/chainer | tests/chainer_tests/functions_tests/math_tests/test_clip.py | 2 | 2885 | import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestClip(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
# Avoid values around x_min and x_max for stability of numerical
# gradient
for ind in numpy.ndindex(self.x.shape):
if -0.76 < self.x[ind] < -0.74:
self.x[ind] = -0.5
elif 0.74 < self.x[ind] < 0.76:
self.x[ind] = 0.5
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.x_min = -0.75
self.x_max = 0.75
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.clip(x, self.x_min, self.x_max)
self.assertEqual(y.data.dtype, self.dtype)
y_expect = self.x.copy()
for i in numpy.ndindex(self.x.shape):
if self.x[i] < self.x_min:
y_expect[i] = self.x_min
elif self.x[i] > self.x_max:
y_expect[i] = self.x_max
testing.assert_allclose(y_expect, y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
def f(x):
return functions.clip(x, self.x_min, self.x_max)
gradient_check.check_backward(
f, x_data, y_grad, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, gx_grad):
def f(x):
return functions.clip(x, self.x_min, self.x_max)
gradient_check.check_double_backward(
f, x_data, y_grad, gx_grad, dtype=numpy.float64, atol=1e-3)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
class TestClipInvalidInterval(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def test_invalid_interval(self):
with self.assertRaises(AssertionError):
functions.clip(self.x, 1.0, -1.0)
testing.run_module(__name__, __file__)
| mit | 1,562,327,920,186,700,000 | 29.691489 | 77 | 0.606586 | false |
reinout/django | tests/forms_tests/field_tests/test_datefield.py | 23 | 8167 | from datetime import date, datetime
from django.forms import (
DateField, Form, HiddenInput, SelectDateWidget, ValidationError,
)
from django.test import SimpleTestCase, override_settings
from django.utils import translation
class GetDate(Form):
mydate = DateField(widget=SelectDateWidget)
class DateFieldTest(SimpleTestCase):
def test_form_field(self):
a = GetDate({'mydate_month': '4', 'mydate_day': '1', 'mydate_year': '2008'})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data['mydate'], date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict(), we must
# accept the input from the "as_hidden" rendering as well.
self.assertHTMLEqual(
a['mydate'].as_hidden(),
'<input type="hidden" name="mydate" value="2008-4-1" id="id_mydate" />',
)
b = GetDate({'mydate': '2008-4-1'})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data['mydate'], date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {'mydate': ['Enter a valid date.']})
# label tag is correctly associated with month dropdown
d = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_month">', d.as_p())
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n_date_changed(self):
"""
DateField.has_changed() with SelectDateWidget works with a localized
date format (#17165).
"""
# With Field.show_hidden_initial=False
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '2',
}, initial={'mydate': date(2008, 4, 1)})
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 22)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 22)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {'mydate': ['Voer een geldige datum in.']})
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_day">', a.as_p())
def test_datefield_1(self):
f = DateField()
self.assertEqual(date(2006, 10, 25), f.clean(date(2006, 10, 25)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(date(2006, 10, 25), f.clean('2006-10-25'))
self.assertEqual(date(2006, 10, 25), f.clean('10/25/2006'))
self.assertEqual(date(2006, 10, 25), f.clean('10/25/06'))
self.assertEqual(date(2006, 10, 25), f.clean('Oct 25 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('October 25 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('October 25, 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('25 October 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('25 October, 2006'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('2006-4-31')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('200a-10-25')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('25/10/06')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
def test_datefield_2(self):
f = DateField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datefield_3(self):
f = DateField(input_formats=['%Y %m %d'])
self.assertEqual(date(2006, 10, 25), f.clean(date(2006, 10, 25)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30)))
self.assertEqual(date(2006, 10, 25), f.clean('2006 10 25'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('2006-10-25')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('10/25/2006')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('10/25/06')
def test_datefield_4(self):
# Test whitespace stripping behavior (#5714)
f = DateField()
self.assertEqual(date(2006, 10, 25), f.clean(' 10/25/2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' 10/25/06 '))
self.assertEqual(date(2006, 10, 25), f.clean(' Oct 25 2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' October 25 2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' October 25, 2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' 25 October 2006 '))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean(' ')
def test_datefield_5(self):
# Test null bytes (#18982)
f = DateField()
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('a\x00b')
def test_datefield_changed(self):
format = '%d/%m/%Y'
f = DateField(input_formats=[format])
d = date(2007, 9, 17)
self.assertFalse(f.has_changed(d, '17/09/2007'))
def test_datefield_strptime(self):
"""field.strptime() doesn't raise a UnicodeEncodeError (#16123)"""
f = DateField()
try:
f.strptime('31 мая 2011', '%d-%b-%y')
except Exception as e:
# assertIsInstance or assertRaises cannot be used because UnicodeEncodeError
# is a subclass of ValueError
self.assertEqual(e.__class__, ValueError)
| bsd-3-clause | 2,449,196,353,411,058,000 | 42.195767 | 94 | 0.587457 | false |
VishvajitP/readthedocs.org | readthedocs/rtd_tests/tests/test_redirects.py | 20 | 10441 | from django.test import TestCase
from django.test.utils import override_settings
from django_dynamic_fixture import get
from django_dynamic_fixture import fixture
from readthedocs.builds.constants import LATEST
from readthedocs.projects.models import Project
from readthedocs.redirects.models import Redirect
import logging
class RedirectTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
logging.disable(logging.DEBUG)
self.client.login(username='eric', password='test')
self.client.post(
'/dashboard/import/',
{'repo_type': 'git', 'name': 'Pip',
'tags': 'big, fucking, monkey', 'default_branch': '',
'project_url': 'http://pip.rtfd.org',
'repo': 'https://github.com/fail/sauce',
'csrfmiddlewaretoken': '34af7c8a5ba84b84564403a280d9a9be',
'default_version': LATEST,
'privacy_level': 'public',
'version_privacy_level': 'public',
'description': 'wat',
'documentation_type': 'sphinx'})
pip = Project.objects.get(slug='pip')
pip.versions.create_latest()
def test_proper_url_no_slash(self):
r = self.client.get('/docs/pip')
# This is triggered by Django, so its a 301, basically just
# APPEND_SLASH
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/docs/pip/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 302)
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url(self):
r = self.client.get('/docs/pip/')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://testserver/docs/pip/en/latest/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url_with_lang_slug_only(self):
r = self.client.get('/docs/pip/en/')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://testserver/docs/pip/en/latest/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url_full(self):
r = self.client.get('/docs/pip/en/latest/')
self.assertEqual(r.status_code, 200)
def test_proper_url_full_with_filename(self):
r = self.client.get('/docs/pip/en/latest/test.html')
self.assertEqual(r.status_code, 200)
# Specific Page Redirects
def test_proper_page_on_main_site(self):
r = self.client.get('/docs/pip/page/test.html')
self.assertEqual(r.status_code, 302)
self.assertEqual(r['Location'],
'http://testserver/docs/pip/en/latest/test.html')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url_with_version_slug_only(self):
r = self.client.get('/docs/pip/latest/')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://testserver/docs/pip/en/latest/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
# If slug is neither valid lang nor valid version, it should 404.
# TODO: This should 404 directly, not redirect first
def test_improper_url_with_nonexistent_slug(self):
r = self.client.get('/docs/pip/nonexistent/')
self.assertEqual(r.status_code, 302)
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 404)
def test_improper_url_filename_only(self):
r = self.client.get('/docs/pip/test.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_dir_file(self):
r = self.client.get('/docs/pip/nonexistent_dir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_dir_subdir_file(self):
r = self.client.get('/docs/pip/nonexistent_dir/subdir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_lang_file(self):
r = self.client.get('/docs/pip/en/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_lang_subdir_file(self):
r = self.client.get('/docs/pip/en/nonexistent_dir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_dir_subdir_file(self):
r = self.client.get('/docs/pip/en/nonexistent_dir/subdir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_version_dir_file(self):
r = self.client.get('/docs/pip/latest/nonexistent_dir/bogus.html')
self.assertEqual(r.status_code, 404)
# Subdomains
def test_proper_subdomain(self):
r = self.client.get('/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/')
def test_proper_subdomain_with_lang_slug_only(self):
r = self.client.get('/en/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/')
def test_proper_subdomain_and_url(self):
r = self.client.get('/en/latest/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 200)
def test_proper_subdomain_and_url_with_filename(self):
r = self.client.get(
'/en/latest/test.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 200)
# Specific Page Redirects
def test_proper_page_on_subdomain(self):
r = self.client.get('/page/test.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(r['Location'],
'http://pip.readthedocs.org/en/latest/test.html')
# When there's only a version slug, the redirect prepends the lang slug
def test_proper_subdomain_with_version_slug_only(self):
r = self.client.get('/1.4.1/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(r['Location'],
'http://pip.readthedocs.org/en/1.4.1/')
def test_improper_subdomain_filename_only(self):
r = self.client.get('/test.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 404)
class RedirectUnderscoreTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
logging.disable(logging.DEBUG)
self.client.login(username='eric', password='test')
whatup = Project.objects.create(
slug='what_up', name='What Up Underscore')
# Test _ -> - slug lookup
def test_underscore_redirect(self):
r = self.client.get('/',
HTTP_HOST='what-up.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://what-up.readthedocs.org/en/latest/')
class RedirectAppTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.client.post(
'/dashboard/import/',
{'repo_type': 'git', 'name': 'Pip',
'tags': 'big, fucking, monkey', 'default_branch': '',
'project_url': 'http://pip.rtfd.org',
'repo': 'https://github.com/fail/sauce',
'csrfmiddlewaretoken': '34af7c8a5ba84b84564403a280d9a9be',
'default_version': LATEST,
'privacy_level': 'public',
'version_privacy_level': 'public',
'description': 'wat',
'documentation_type': 'sphinx'})
self.pip = Project.objects.get(slug='pip')
self.pip.versions.create_latest()
@override_settings(USE_SUBDOMAIN=True)
def test_redirect_root(self):
Redirect.objects.create(
project=self.pip, redirect_type='prefix', from_url='/woot/')
r = self.client.get('/woot/faq.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/faq.html')
@override_settings(USE_SUBDOMAIN=True)
def test_redirect_page(self):
Redirect.objects.create(
project=self.pip, redirect_type='page', from_url='/install.html', to_url='/tutorial/install.html')
r = self.client.get('/install.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/tutorial/install.html')
@override_settings(USE_SUBDOMAIN=True, PYTHON_MEDIA=True)
def test_redirect_html(self):
Redirect.objects.create(
project=self.pip, redirect_type='sphinx_html')
r = self.client.get('/en/latest/faq/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/faq.html')
@override_settings(USE_SUBDOMAIN=True, PYTHON_MEDIA=True)
def test_redirect_htmldir(self):
Redirect.objects.create(
project=self.pip, redirect_type='sphinx_htmldir')
r = self.client.get('/en/latest/faq.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/faq/')
class RedirectBuildTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.project = get(Project,
slug='project-1',
documentation_type='sphinx',
conf_py_file='test_conf.py',
versions=[fixture()])
self.version = self.project.versions.all()[0]
def test_redirect_list(self):
r = self.client.get('/builds/project-1/')
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/projects/project-1/builds/')
def test_redirect_detail(self):
r = self.client.get('/builds/project-1/1337/')
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/projects/project-1/builds/1337/')
| mit | -3,972,338,867,987,097,600 | 39.468992 | 110 | 0.617182 | false |
isi-nlp/bolinas | extractor_synsem/extractor_synsem.py | 2 | 14687 | from common.exceptions import InvocationException
from common.hgraph.hgraph import Hgraph
from common.cfg import NonterminalLabel
from lib.tree import Tree
import re
from collections import defaultdict as ddict
import itertools
from parser.vo_rule import Rule
import sys
DEFAULT_COMPOSITION_DEPTH = 3
class ExtractorSynSem:
def __init__(self):
pass
@classmethod
def help(self):
"""
Returns SynSem help message.
"""
return 'Usage: python extract-synsem <nl_file> <mr_file> ' + \
'<alignment_file> <destination> [composition_depth (default %d)]' % \
DEFAULT_COMPOSITION_DEPTH
def main(self, *args):
"""
Extracts rules from the given training data, with an optional composition
depth specified.
"""
if len(args) < 4:
print self.help()
raise InvocationException()
nl_path, mr_path, align_path, destination_prefix = args[:4]
if len(args) == 4:
composition_depth = DEFAULT_COMPOSITION_DEPTH
elif len(args) == 5:
composition_depth = int(args[4])
else:
print self.help()
raise InvocationException()
self.extract_rules_corpus(nl_path, mr_path, align_path, destination_prefix,
composition_depth)
def extract_rules_corpus(self, nl_path, amr_path, alignment_path,
destination_prefix, composition_depth):
"""
Extract all rules from the corpus specified by the *_path arguments.
"""
syn_f = open(nl_path)
sem_f = open(amr_path)
align_f = open(alignment_path)
n_examples = count_lines(amr_path)
announce_interval = n_examples / 10
# load input data into examples list
examples = []
for example_i in range(n_examples):
syn_s = syn_f.readline().strip()
sem_s = sem_f.readline().strip()
align_s = align_f.readline().strip()
amr = Dag.from_string(sem_s)
tree = Tree(syn_s)
label_spans(tree)
align = get_alignments(align_s, amr)
examples.append((amr, tree, align))
# extract rules from data
rules = []
for example in examples:
example_rules = extract_rules(example[0], example[1], example[2],
composition_depth)
rules += example_rules
# assign ML weights by counting
grammar = collect_counts(rules)
Rule.write_to_file(grammar, destination_prefix)
def count_lines(filename):
"""
Counts the number of lines in the given file.
"""
n_lines = 0
with open(filename) as f:
for line in f:
n_lines += 1
return n_lines
def get_alignments(align_s, amr):
"""
Converts alignments into an actual mapping into edges of the AMR object.
"""
alignments = ddict(list)
align_s_parts = align_s.split()
for part in align_s_parts:
match = re.match(r'([^:]+):([^:]+:?[^:]+):([^:]+)-(\d+)', part)
head = match.group(1)
label = match.group(2)
tail = match.group(3)
index = int(match.group(4))
edge_l = [e for e in amr.triples() if
e[0] == head and \
e[1] == label and \
e[2] == (tail,)]
assert len(edge_l) == 1
alignments[edge_l[0]].append(index)
return dict(alignments)
def label_spans(tree, start=0):
"""
Labels each constituent with its corresponding sentence span (so that we can
distinguish constituents over different parts of the sentence with identical
tree structure.
"""
end = start
if isinstance(tree, Tree):
for child in tree:
end = label_spans(child, end)
tree.span = (start, end)
return end
else:
return end + 1
def minimal_aligned(constituents, tree_aligned):
"""
Finds frontier constituents.
"""
minimal_constituents = []
for key in constituents:
start,end,height = key
# ignore unaligned constituents
if len(tree_aligned[key]) == 0:
continue
# ignore constituents which have children with identical alignments
minimal = True
for key2 in constituents:
start2,end2,height2 = key2
if tree_aligned[key] == tree_aligned[key2] and start2 >= start and \
end2 <= end and height2 < height:
minimal = False
break
if not minimal:
continue
minimal_constituents.append(key)
return minimal_constituents
# HERE BE DRAGONS
# The following methods implement various searches through the AMR necessary to
# produce the heuristic attachment of unaligned edges described in the paper.
def amr_reachable_h(edges, amr, predicate, expander, seen=None):
if seen == None:
seen = set()
for e in edges:
if e in seen:
continue
if not predicate(e):
continue
seen.add(e)
amr_reachable_h(expander(e), amr, predicate, expander, seen)
return seen
def a_parents(edge, amr):
return amr.in_edges(edge[0])
def a_children(edge, amr):
for t in edge[2]:
return amr.out_edges(t)
def amr_reachable_forward(edges, amr, predicate):
return amr_reachable_h(edges, amr, predicate, lambda e: a_parents(e, amr))
def amr_reachable_backward(edges, amr, predicate):
return amr_reachable_h(edges, amr, predicate, lambda e: a_children(e, amr))
def amr_reachable_nothru_i(edge, amr, predicate, reachable, seen):
if edge in seen:
return
seen.add(edge)
if all(c in reachable for c in a_parents(edge, amr)):
for c in a_parents(edge, amr):
if all(p in reachable for p in a_children(edge, amr)):
amr_reachable_nothru_i(c, amr, predicate, reachable, seen)
if all(p in reachable for p in a_children(edge, amr)):
for p in a_children(edge, amr):
if all(c in reachable for c in a_parents(edge, amr)):
amr_reachable_nothru_i(p, amr, predicate, reachable, seen)
def amr_reachable_nothru(edges, amr, predicate=lambda e: True):
forward = amr_reachable_forward(edges, amr, predicate)
backward = amr_reachable_backward(edges, amr, predicate)
reachable = forward | backward
seen = set()
for edge in edges:
amr_reachable_nothru_i(edge, amr, predicate, reachable, seen)
return seen
def minimal_frontier(frontier):
"""
Extracts the minimal frontier set from the given frontier set.
"""
min_frontier = []
for f in frontier:
fstart, fend = f[0].span
minimal = True
for g in frontier:
gstart, gend = g[0].span
if gstart >= fstart and gend <= fend and g[0].height() < f[0].height():
minimal = False
break
if minimal:
min_frontier.append(f)
return min_frontier
def frontier_edges(amr, tree, alignments):
"""
Extracts the frontier set.
"""
frontier = []
constituents = {}
if isinstance(tree, Tree):
for constituent in tree.subtrees():
key = (constituent.span[0], constituent.span[1], constituent.height())
assert key not in constituents
constituents[key] = constituent
tree_aligned = ddict(set)
for edge in alignments:
for index in alignments[edge]:
for key in constituents:
start,end,height = key
if start <= index < end:
tree_aligned[key].add(index)
aligned_constituents = minimal_aligned(constituents, tree_aligned)
for key in aligned_constituents:
start,end,height = key
constituent = constituents[key]
aligned_edges = [e for e in alignments if all(start <= i < end for i in
alignments[e])]
if constituent == tree:
reachable_edges = amr.triples()
else:
reachable_edges = amr_reachable_nothru(aligned_edges, amr,
lambda e: e in aligned_edges or e not in alignments)
aligned_fragment = Dag.from_triples(reachable_edges)
if len(aligned_fragment.root_edges()) == 1:
frontier.append((constituent, aligned_fragment))
min_frontier = minimal_frontier(frontier)
min_frontier_sorted = sorted(min_frontier, key = lambda m:
len(list(m[0].subtrees())))
return min_frontier_sorted
def collapse_constituent(tree, constituent, label):
"""
Shortcut: replaces a constituent with a single nonterminal label.
"""
return replace_constituent(tree, constituent, str(label))
def replace_constituent(tree, constituent, new_constituent):
"""
Replaces one constituent in this tree with another.
"""
# We injected span, so the standard __eq__ check doesn't look for it
if tree == constituent and (not isinstance(tree, Tree) or tree.span ==
constituent.span):
return new_constituent
if not isinstance(tree, Tree):
return tree
n_tree = Tree(tree.node, [replace_constituent(subtree, constituent,
new_constituent) for subtree in tree])
n_tree.span = tree.span
return n_tree
def collapse_alignments(alignments, amr_fragment, new_triple):
"""
Adjusts alignments when replacing collapsing graph & tree fragments.
"""
new_alignments = {}
new_triple_alignment = []
for triple in alignments:
if triple in amr_fragment.triples():
new_triple_alignment += alignments[triple]
else:
new_alignments[triple] = alignments[triple]
new_triple_alignment = list(set(new_triple_alignment))
new_alignments[new_triple] = new_triple_alignment
return new_alignments
def make_rule(frontier_pair, amr, tree, align, next_index):
"""
Creates a new rule with the given parts, and collapses these parts in the
original graph and tree.
"""
constituent, amr_fragment = frontier_pair
outside_edges = [e for e in amr.triples() if e not in amr_fragment.triples()]
root_label = amr_fragment.root_edges()[0][1]
if isinstance(root_label, NonterminalLabel):
symbol = root_label.label
m = re.match(r'(.+)_(.+)_(\d+)', symbol)
role = m.group(1)
else:
if ':' in root_label:
role, concept = root_label.split(':')
else:
role = root_label
external_nodes = amr.find_external_nodes(amr_fragment)
if len(external_nodes) == 0:
external_nodes = [amr_fragment.find_leaves()[0]]
# WARNING: destructive. Unfortunately we can't make the change any earlier.
# TODO why?
amr_fragment.external_nodes = external_nodes
symbol = '%s_%s_%d' % (role, constituent.node, len(external_nodes))
label = NonterminalLabel(symbol, next_index)
new_triple = (amr_fragment.roots[0], label, tuple(external_nodes))
new_amr = amr.collapse_fragment(amr_fragment, label)
assert new_triple in new_amr.triples()
new_tree = collapse_constituent(tree, constituent, label)
new_alignments = collapse_alignments(align, amr_fragment, new_triple)
rule = Rule(0, symbol, 1, amr_fragment, constituent, original_index =
next_index)
return rule, new_amr, new_tree, new_alignments, next_index+1
def make_composed_rule(rule, cdict):
"""
Creates a composed rule by replacing every nonterminal in this rule's RHS with
the graph and tree fragment specified in cdict.
"""
for label, crule in cdict.items():
replacement_triple_l = [e for e in rule.amr.triples() if e[1] == label]
assert len(replacement_triple_l) == 1
replacement_fragment = Dag.from_triples(replacement_triple_l)
new_amr = rule.amr.replace_fragment(replacement_fragment, crule.amr)
new_tree = replace_constituent(rule.parse, str(label), crule.parse)
new_rule = Rule(rule.rule_id, rule.symbol, rule.weight, new_amr, new_tree,
original_index = rule.original_index)
rule = new_rule
return rule
def make_composed_rules(rules, max_depth):
"""
Finds all possible composed rules, up to the specified max depth.
"""
composed_rules = []
# add all base rules
for rule in rules:
composed_rules.append(rule)
# incrementally compose rules up to the max depth
for i in range(1, max_depth):
composed_rules_this_depth = []
# consider each rule...
for rule in rules:
nt_labels = [e[1] for e in rule.amr.triples() if isinstance(e[1],
NonterminalLabel)]
if len(nt_labels) == 0:
continue
# ...and try to replace its nonterminals with the fragments from other
# composed rules
# we cheat here by relying on the fact that nonterminal indices are
# never repeated in the induced derivation of a training example (so if a
# rule has original_index n, we are sure it can only replace the
# nonterminal with the same index)
composition_candidates = {}
for label in nt_labels:
composition_candidates[label] = []
for crule in composed_rules:
if crule.original_index != label.index:
continue
composition_candidates[label].append(crule)
# we have a set of possible substitutions (of varying depth) for each
# nonterminal; now we consider every possible way of combining them (the
# Cartesian product of all the candidate lists)
comp_cand_list = []
label_list = []
for label, comp_cand in composition_candidates.items():
label_list.append(label)
comp_cand_list.append(comp_cand)
compositions = itertools.product(*comp_cand_list)
compositions = list(compositions)
# now actually create the composed rules
for composition in compositions:
cdict = dict(zip(label_list, composition))
composed_rule = make_composed_rule(rule, cdict)
composed_rules_this_depth.append(composed_rule)
composed_rules += composed_rules_this_depth
return [rule.canonicalize_amr() for rule in composed_rules]
def extract_rules(amr, tree, align, composition_depth):
"""
Extracts all possible rules from the given tree-string pair.
"""
rules = []
frontier = frontier_edges(amr, tree, align)
next_index = 0
while frontier:
rule, amr, tree, align, next_index = make_rule(frontier[0], amr, tree,
align, next_index)
rules.append(rule)
frontier = frontier_edges(amr, tree, align)
composed_rules = make_composed_rules(rules, composition_depth)
return composed_rules
def collect_counts(rules):
"""
Collects counts of the number of times each rule is used in the training data
for the "observed derivation" ML estimate of rule weights.
"""
rule_mapper = {}
rule_counter = {}
rule_normalizer = ddict(lambda:0.0)
for rule in rules:
rule_key = '%s:::%s:::%s' % (rule.symbol, rule.amr, rule.parse)
rule_key = re.sub(r'\s+', ' ', rule_key)
rule_key = re.sub(r'\[\d+\]', '[D]', rule_key)
if rule_key not in rule_mapper:
rule_mapper[rule_key] = rule
rule_counter[rule_key] = 1
else:
rule_counter[rule_key] += 1
rule_normalizer[rule.symbol] += 1
grammar = {}
next_id = 0
for key in rule_mapper:
rule = rule_mapper[key]
count = rule_counter[key]
norm = rule_normalizer[rule.symbol]
g_rule = Rule(next_id, rule.symbol, float(count)/norm, rule.amr, rule.parse)
grammar[next_id] = g_rule
next_id += 1
return grammar
if __name__ == "__main__":
extractor = ExtractorSynSem()
extractor.main(sys.argv)
| mit | -7,124,554,008,113,863,000 | 30.382479 | 80 | 0.664261 | false |
apache/incubator-airflow | airflow/providers/cncf/kubernetes/sensors/spark_kubernetes.py | 5 | 4691 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional
from kubernetes import client
from airflow.exceptions import AirflowException
from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class SparkKubernetesSensor(BaseSensorOperator):
"""
Checks sparkApplication object in kubernetes cluster:
.. seealso::
For more detail about Spark Application Object have a look at the reference:
https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/v1beta2-1.1.0-2.4.5/docs/api-docs.md#sparkapplication
:param application_name: spark Application resource name
:type application_name: str
:param namespace: the kubernetes namespace where the sparkApplication reside in
:type namespace: str
:param kubernetes_conn_id: the connection to Kubernetes cluster
:type kubernetes_conn_id: str
:param attach_log: determines whether logs for driver pod should be appended to the sensor log
:type attach_log: bool
"""
template_fields = ("application_name", "namespace")
FAILURE_STATES = ("FAILED", "UNKNOWN")
SUCCESS_STATES = ("COMPLETED",)
@apply_defaults
def __init__(
self,
*,
application_name: str,
attach_log: bool = False,
namespace: Optional[str] = None,
kubernetes_conn_id: str = "kubernetes_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.application_name = application_name
self.attach_log = attach_log
self.namespace = namespace
self.kubernetes_conn_id = kubernetes_conn_id
self.hook = KubernetesHook(conn_id=self.kubernetes_conn_id)
def _log_driver(self, application_state: str, response: dict) -> None:
if not self.attach_log:
return
status_info = response["status"]
if "driverInfo" not in status_info:
return
driver_info = status_info["driverInfo"]
if "podName" not in driver_info:
return
driver_pod_name = driver_info["podName"]
namespace = response["metadata"]["namespace"]
log_method = self.log.error if application_state in self.FAILURE_STATES else self.log.info
try:
log = ""
for line in self.hook.get_pod_logs(driver_pod_name, namespace=namespace):
log += line.decode()
log_method(log)
except client.rest.ApiException as e:
self.log.warning(
"Could not read logs for pod %s. It may have been disposed.\n"
"Make sure timeToLiveSeconds is set on your SparkApplication spec.\n"
"underlying exception: %s",
driver_pod_name,
e,
)
def poke(self, context: Dict) -> bool:
self.log.info("Poking: %s", self.application_name)
response = self.hook.get_custom_object(
group="sparkoperator.k8s.io",
version="v1beta2",
plural="sparkapplications",
name=self.application_name,
namespace=self.namespace,
)
try:
application_state = response["status"]["applicationState"]["state"]
except KeyError:
return False
if self.attach_log and application_state in self.FAILURE_STATES + self.SUCCESS_STATES:
self._log_driver(application_state, response)
if application_state in self.FAILURE_STATES:
raise AirflowException("Spark application failed with state: %s" % application_state)
elif application_state in self.SUCCESS_STATES:
self.log.info("Spark application ended successfully")
return True
else:
self.log.info("Spark application is still in state: %s", application_state)
return False
| apache-2.0 | 4,549,095,830,007,906,000 | 39.791304 | 127 | 0.659774 | false |
mfm24/ChordViz | ChordViz.py | 1 | 20081 | # -*- coding: utf-8 -*-
"""
Created on Fri May 3 21:09:10 2013
@author: matt
# based on MyPlayer3_Callback (which is newer than MyPlayer3.py)
"""
from __future__ import division
import time, math, logging
import numpy as np
from threading import Lock, Thread
import itertools
# not sure I've added correct path in launchd.conf
# and export doesn't obviously work
import sys
sys.path.append('/Users/matt/Dropbox/personal/dev/PythonLibs/')
try:
from uidecorators import ui_decorators
use_ui = True
except ImportError:
# a bit nasty. We'll create an object were all members
# return a decorator function returning a decorator that does nothing!
class FakeUIDec:
def __getattr__(self, name):
def no_wrap(*args, **kwargs):
def wrap_creator(func):
def w(*args, **kwargs):
func(*args, **kwargs)
return w
return wrap_creator
return no_wrap
ui_decorators = FakeUIDec()
use_ui=False
try:
import pyaudio
p = pyaudio.PyAudio()
has_pyaudio = True
except ImportError:
logging.warn("PyAudio not found! - Will not be able to output any audio!")
has_pyaudio = False
def play_waveform(w):
def callback(in_data, frame_count, time_info, status):
# this requests upto 1024 frames?
with w.datalock:
ndata = w.data
if ndata is not None:
return (np.hstack([ndata]*(frame_count//1024)), pyaudio.paContinue)
else:
return (None, pyaudio.paComplete)
if has_pyaudio:
# open stream using callback (3)
play_waveform.stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=w.rate,
output=True,
frames_per_buffer=w.size,
stream_callback=callback)
play_waveform.stream = None
max_frequency = 22100 # we stop making notes above this
note_types = {
"PureTone": lambda harmonic: 1 if harmonic==0 else 0,
"Poisson0.5": lambda harmonic: poisson(0.5, harmonic),
"Poisson1": lambda harmonic: poisson(1, harmonic),
"Poisson2": lambda harmonic: poisson(2, harmonic),
"Poisson3": lambda harmonic: poisson(3, harmonic),
"Lorentz1": lambda harmonic: 1.0/(1.0+harmonic**2),
"Lorentz10": lambda harmonic: 10.0/(10.0+harmonic**2),
"Equal": lambda harmonic: 1,
"EqualOdd": lambda harmonic: 1 if harmonic%2==1 or harmonic==0 else 0,
"EqualEven": lambda harmonic: 1 if harmonic%2==0 else 0,
"OneOverX": lambda harmonic: 1.0/(harmonic+1.0)
}
equal_temperament_notes = [2 ** (x / 12.0) for x in range(12)]
just_intonation_notes = [1, 16 / 15., 9 / 8., 6 / 5., 5 / 4., 4 / 3., 45 / 32., 3 / 2., 8 / 5., 5 / 3., 16 / 9., 15 / 8.]
twelve_tone_names = ["I", "IIb", "II", "IIIb", "III", "IV", "IV#", "V", "VIb", "VI", "VIIb", "VII"]
class Waveform(object):
def __init__(self, size=1024*16, rate=44100):
self.size = size
self.rate = rate
self.data = np.zeros((size), dtype=np.int16)
self.datalock = Lock()
self.volume_amp = 0.1
self.form = lambda note: poisson(2, note)
self.notetype="Poisson1"
self.notefreq=440
self.on_notes_changed=[]
self._harmonics_slice = None
self.clear_notes()
def clear_notes(self):
self.notes = []
self()
def set_notes(self, notes):
self.clear_notes()
self.add_notes(notes)
self()
def add_notes(self, notes):
self.notes.append(list(notes))
self()
def __call__(self):
newdata = np.zeros((self.size), dtype=np.complex64)
for notegroup in self.notes:
for freq, mag in notegroup:
dphase=int (freq*self.size / self.rate )
logging.info("Adding note at pixel %s", dphase)
if dphase > len(newdata)/2:
continue # this is nyquist, can't go any higher
#let's scale mag by number of notes
newdata[dphase]=self.volume_amp*mag*32765/2
#make ft real
newdata[-dphase] = np.conj(newdata[dphase])
sqrtsumsq = math.sqrt((newdata**2).sum())
if sqrtsumsq:
newdata *= self.volume_amp * 2.0 * 32767.0 / sqrtsumsq
printimag = 0
if printimag:
complex_d=np.imag(np.fft.fft(newdata));
print "imag magnitude: ", np.sqrt(np.sum(complex_d**2))
newdata = np.asarray(np.real(np.fft.fft(newdata)), dtype=np.int16)
with self.datalock:
self.data = newdata
for f in self.on_notes_changed:
f()
def get_volume(self):
v = math.log(self.volume_amp, 10)*20
return v
@ui_decorators.slider(getfunc=get_volume, maximum=0, minimum=-50, scale=1)
def volume(self, value):
self.volume_amp = 10**(value/20.0)
self()
def get_note_type(self):
return self.notetype
@ui_decorators.combobox(
getfunc=get_note_type,
options=note_types.keys())
def note_type(self, t):
self.notetype = t
def get_harmonics_slice(self):
if self._harmonics_slice:
return ",".join(self._harmonics_slice)
else:
return ""
@ui_decorators.textbox(getfunc=get_harmonics_slice)
def harmonics_slice(self, n):
"""
Sets the harmonics to display
Should be either [start:]stop[:step]
or else a,b,c where a,b,c are indices to choose
"""
if n=="":
self._harmonics_slice = None
return
if ':' in n:
sc = [int(x or "0") for x in n.split(":")]
if len(sc)==1:
self._harmonics_slice = (None, sc[0], None)
elif len(sc) == 2:
self._harmonics_slice = (sc[0], sc[1], None)
else:
self._harmonics_slice = (sc[0], sc[1], sc[2])
else:
self._harmonics_slice = [int(x or "-1") for x in n.split(',')]
def get_root_frequency(self):
return self.notefreq
@ui_decorators.textbox(getfunc=get_root_frequency)
def root_frequency(self, val):
self.notefreq = float(val)
self()
def add_form(self, root):
if isinstance(self._harmonics_slice, list):
all_notes = list(notes_from_func(note_types[self.notetype], root))
notes = []
for i in self._harmonics_slice:
notes.append(all_notes[i])
else:
slice_args = self._harmonics_slice or (None,)
notes = itertools.islice(
notes_from_func(note_types[self.notetype], root),
*slice_args)
self.add_notes(notes)
@ui_decorators.button
def clear(self):
self.clear_notes()
@ui_decorators.button
def note_root(self):
self.add_form(self.notefreq)
self()
@ui_decorators.button
def note_major3rd(self):
self.add_form(self.notefreq*5.0/4.0)
self()
@ui_decorators.button
def note_fifth(self):
self.add_form(self.notefreq*6.0/4.0)
self()
@ui_decorators.button
def play_major_chord(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*5.0/4.0,
self.notefreq*6.0/4.0])
@ui_decorators.button
def test(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*7.0/8.0,
self.notefreq*6.0/4.0])
@ui_decorators.button
def play_minor_chord(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*12.0/10.0,
self.notefreq*15.0/10.0])
@ui_decorators.button
def play_minor_chord_fifth(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*4.0/3.0,
self.notefreq*8.0/5.0])
def play_threaded_chord(self, roots):
def run_through():
for i,n in enumerate(roots):
self.clear_notes()
[self.add_form([]) for t in range(i)]
self.add_form(n)
time.sleep(1.5)
self.clear_notes()
for n in roots:
self.add_form(n)
Thread(target=run_through).start()
# run in interactive shell and use set_notes to play?
def poisson(l, n):
return math.exp(-l)*l**n/math.factorial(n)
def notes_from_func(func, root):
for h in itertools.count():
mag = func(h)
# we cut off until we reach max_frequency
if root+root*h > max_frequency:
return
yield root+root*h, mag
def cleanup():
if has_pyaudio:
play_waveform.stream.close()
p.terminate()
######################## UI Stuff ############################
# this could go in a separate file, but keeping it here for the
# moment
# creating a UI Options class for modifying the visualisation using
# out qt decorators
class UIOptions:
def __init__(self):
self._linear_freq_in_octaves = True
self.virtual_size = 1500,1500
self._inverse = True
self._show_just_notes = True
self._show_base_spiral = True
self._show_ET_notes = False # ET=equal temperament
def get_linear_freq_in_octaves(self):
return self._linear_freq_in_octaves
@ui_decorators.checkbox(getfunc=get_linear_freq_in_octaves)
def linear_freq_in_octaves(self, newval):
self._linear_freq_in_octaves = newval
notes_changed()
def get_show_base_spiral(self):
return self._show_base_spiral
@ui_decorators.checkbox(getfunc=get_show_base_spiral)
def show_base_spiral(self, newval):
self._show_base_spiral = newval
notes_changed()
def get_inverse(self):
return self._inverse
@ui_decorators.checkbox(getfunc=get_inverse)
def inverse(self, newval):
self._inverse = newval
notes_changed()
def get_show_just_notes(self):
return self._show_just_notes
@ui_decorators.checkbox(getfunc=get_show_just_notes)
def show_just_notes(self, newval):
self._show_just_notes = newval
notes_changed()
def get_show_ET_notes(self):
return self._show_ET_notes
@ui_decorators.checkbox(getfunc=get_show_ET_notes)
def show_ET_notes(self, newval):
self._show_ET_notes = newval
notes_changed()
def make_note_lines(root, named_notes, width, radius):
"""
For the dictionary named_notes, draws thin lines for each note
adding the key for the note to the SVG.
This way we can overlay scales on the diagrams.
"""
lines = []
for name, freq in named_notes.iteritems():
(x1, y1), theta = get_pos_theta_for_note(freq, root, 0, 0)
font_size = radius/16.0
lines.append(
'<line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" stroke-width="{width}"/>'.format(
x1=x1, x2=x1 + 2 * radius * math.sin(theta),
y1=y1, y2=y1 - 2 * radius * math.cos(theta),
width=width))
lines.append('<text x="{x}" y="{y}" font-size="{fs}">{text}</text>'.format(
x=x1 + radius * math.sin(theta),
y=y1 - radius * math.cos(theta),
text=name, fs=font_size))
return "\n".join(lines)
def get_pos_theta_for_note(f, root, root_radius, length):
"""
Return (x,y),theta where (x,y) is the starting position of the note
and theta is the angle the note should have
"""
# first, we calculate the octave and theta for the root
logf = math.log(f / root, 2)
note_fraction, octave = math.modf(logf)
if ui_opts.get_linear_freq_in_octaves():
note = (2**note_fraction - 1)
else:
note = note_fraction
theta = note * 2.0 * math.pi
centerx, centery = (x / 2 for x in ui_opts.virtual_size)
r = root_radius + (octave + note_fraction) * length
x = centerx + r * math.sin(theta)
y = centery - r * math.cos(theta)
return (x,y), theta
def make_spiral_lines_from_notes(root, notes,
length=75, root_radius=100,
stroke_width_scale=15):
"""
Is there a way to represent notes where octaves are still seperated but
we can see notes of the same pitch?
We could draw a spiral, where an octave is 360 degrees and on the next
ring out.
There's a similar idea here:
http://nastechservices.com/Spectrograms.html
How should we represent a 3:2 ratio? If wejust take log(x,2)*2*pi
then 3/2 is at 210deg (or 3.67rad). Is it worth making the scale linear,
and putting 3/2 at 180deg? We could also spiral so that 3/2f gets us to 180
deg then we stretch out the remaining part of the curve?
We'll try the linear for now.
It works, but not all 3/2 notes are 180deg from each other
(if the higher note is past the root, it's not)
Is there a way to do this? Maybe not, eg we make 5th = 3r/2 opposite root
and 3/2r = 9/4 != root and yet root still needs to be 180deg from it
"""
width_gamma = 0.2 # we use width^this as the width
centerx, centery = (x / 2 for x in ui_opts.virtual_size)
lines = []
for f, m in notes:
# we split the note into octave and note (0 - 1)
width = stroke_width_scale * math.pow(m, width_gamma)
(x1, y1), theta = get_pos_theta_for_note(f, root, root_radius, length)
x2 = x1 + 0.9 * length * math.sin(theta)
y2 = y1 - 0.9 * length * math.cos(theta)
lines.append('<line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" stroke-width="{width}"/>'.format(
x1=x1, x2=x2, y1=y1, y2=y2,
width=width))
return "\n".join(lines)
def make_spiral_octave_lines(root, length=75, root_radius=100, max_f=22100):
"""
Starting with the root note, draw the spiral on which
any higher frequency notes will sit. This way we can count
harmonics more easily
"""
width = 0.5
(x1, y1), _ = get_pos_theta_for_note(root, root, root_radius, length)
lines = []
step = int(root/50) or 1
for f in range(int(root), int(max_f), step):
(x2, y2), theta = get_pos_theta_for_note(f, root, root_radius, length)
lines.append('<line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" stroke-width="{width}"/>'.format(
x1=x1, x2=x2, y1=y1, y2=y2,
width=width))
x1, y1 = x2, y2
return "\n".join(lines)
rgb_colors = [0xFF0000, 0x00FF00, 0x0000FF]
cym_colors = [0x00FFFF, 0xFF00FF, 0xFFFF00]
white = 0xFFFFFFFF
black = 0xFF000000
# some QT specific stuff follows:
import PySide.QtCore
import PySide.QtGui
import PySide.QtSvg
def render_svg(svg, qp):
r = PySide.QtSvg.QSvgRenderer()
w,h = ui_opts.virtual_size
ret = '<svg xmlns="http://www.w3.org/2000/svg" version="1.1" width="{}" height="{}">'.format(w, h)
ret += svg
ret += "</svg>"
# print ret
r.load(PySide.QtCore.QByteArray(ret))
assert r.isValid()
r.render(qp)
def raw_svg_to_group(svg, color, extras=""):
ret = '<g stroke="#{0:06X}" fill="#{0:06X}" {1}>'.format(
color & 0xFFFFFF, extras)
ret += svg
ret += "</g>"
return ret
from uidecorators.qt_framework import Framework
def notes_changed(*args):
mode = "inverse" if ui_opts.get_inverse() else "normal"
qim = PySide.QtGui.QImage(d.widget().width(), d.widget().height(), PySide.QtGui.QImage.Format.Format_ARGB32)
qp = PySide.QtGui.QPainter(qim)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.SmoothPixmapTransform)
if mode == "inverse":
#qim.fill(white)
qp.setCompositionMode(qp.CompositionMode.CompositionMode_Darken)
colors = cym_colors
default_foreground = black
default_background = white
mode = "darken"
else:
#qim.fill(black)
qp.setCompositionMode(qp.CompositionMode.CompositionMode_Lighten)
colors = rgb_colors
default_foreground = white
default_background = black
mode = "lighten"
default_foreground = 0x888888
root = w.get_root_frequency()
all_svgs=[]
num_octaves = math.log(max_frequency / root, 2)
# let's scale note height and width with number of octaves we're drawing
note_length = 400.0 / num_octaves
note_width = 500 / 2**num_octaves
# we'll set the background with a svg rect
svg = raw_svg_to_group('<rect width="1500" height="1500" />', default_background)
all_svgs.append(svg)
for check, notes in [(ui_opts.get_show_just_notes, just_intonation_notes),
(ui_opts.get_show_ET_notes, equal_temperament_notes)]:
if check():
overlay = make_note_lines(
root,
{i: f * root for i, f in zip(twelve_tone_names, notes)},
0.5, 600)
svg = raw_svg_to_group(overlay, default_foreground)
all_svgs.append(svg)
if ui_opts.get_show_base_spiral():
overlay = make_spiral_octave_lines(root, length=note_length)
svg = raw_svg_to_group(overlay, default_foreground)
all_svgs.append(svg)
theta = 0
width, height = ui_opts.virtual_size
for notegroup, col in zip(w.notes, colors):
notegrp_svg = make_spiral_lines_from_notes(
root, notegroup, length=note_length, stroke_width_scale=note_width)
notegrp_svg += '<circle r="{}" cx="{}" cy="{}"/>'.format(
width / 30.0, width / 10.0 + width / 45.0 * math.sin(theta),
width / 10.0 + width / 45.0 * math.cos(theta))
theta += math.pi*2.0/len(w.notes)
# convert to a svg group with some extra tags to make inkscape happy
svg = raw_svg_to_group(
notegrp_svg, col,
extras='inkscape:groupmode="layer" filter="url(#blend)"')
all_svgs.append(svg)
# finally we'll render tham all
for svg in all_svgs:
render_svg(svg, qp)
# try to save an inkscape compatible svg file.
# we can add a darken/lighten filter, and we need to add
# enable-background="new" to the svg header and the
# inkscape ns:
with open("out.svg", 'w') as f:
f.write('<svg xmlns="http://www.w3.org/2000/svg" '
'xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" '
'version="1.1" width="{}" height="{}" '
'enable-background="new">'.format(width, height))
f.write('<filter id="blend">'
'<feBlend in2="BackgroundImage" mode="{0}" />'
'</filter>'.format(mode))
f.write("\n".join(all_svgs))
f.write("</svg>")
d.widget().setPixmap(PySide.QtGui.QPixmap.fromImage(qim))
# qim.save("out.png", 'PNG')
qp = None # we have to make sure qim is deleted before QPainter?
if __name__=="__main__":
w=Waveform()
play_waveform(w)
if use_ui:
ui_opts = UIOptions()
f = Framework()
f.get_main_window().resize(800,600)
d=PySide.QtGui.QDockWidget("Note Visualization")
d.setWidget(PySide.QtGui.QLabel())
f.get_main_window().addDockWidget(PySide.QtCore.Qt.RightDockWidgetArea, d)
# play notes is threaded, so we need to call notes_changed from the
# ui thread.
w.on_notes_changed.append(lambda: f.run_on_ui_thread(notes_changed))
f.display_widgets([f.get_obj_widget(w), f.get_obj_widget(ui_opts)])
f.close()
| mit | 4,261,677,554,815,853,000 | 34.667851 | 121 | 0.567053 | false |
pymedusa/SickRage | ext/boto/sdb/db/blob.py | 153 | 2437 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.compat import six
class Blob(object):
"""Blob object"""
def __init__(self, value=None, file=None, id=None):
self._file = file
self.id = id
self.value = value
@property
def file(self):
from StringIO import StringIO
if self._file:
f = self._file
else:
f = StringIO(self.value)
return f
def __str__(self):
return six.text_type(self).encode('utf-8')
def __unicode__(self):
if hasattr(self.file, "get_contents_as_string"):
value = self.file.get_contents_as_string()
else:
value = self.file.getvalue()
if isinstance(value, six.text_type):
return value
else:
return value.decode('utf-8')
def read(self):
if hasattr(self.file, "get_contents_as_string"):
return self.file.get_contents_as_string()
else:
return self.file.read()
def readline(self):
return self.file.readline()
def next(self):
return next(self.file)
def __iter__(self):
return iter(self.file)
@property
def size(self):
if self._file:
return self._file.size
elif self.value:
return len(self.value)
else:
return 0
| gpl-3.0 | 7,653,667,183,352,438,000 | 31.065789 | 74 | 0.640952 | false |
pymedusa/SickRage | ext/boto/ec2/__init__.py | 22 | 3094 | # Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
service from AWS.
"""
from boto.ec2.connection import EC2Connection
from boto.regioninfo import RegionInfo, get_regions, load_regions
from boto.regioninfo import connect
RegionData = load_regions().get('ec2', {})
def regions(**kw_params):
"""
Get all available regions for the EC2 service.
You may pass any of the arguments accepted by the EC2Connection
object's constructor as keyword arguments and they will be
passed along to the EC2Connection object.
:rtype: list
:return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
"""
return get_regions('ec2', connection_cls=EC2Connection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.connection.EC2Connection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.connection.EC2Connection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
if 'region' in kw_params and isinstance(kw_params['region'], RegionInfo)\
and region_name == kw_params['region'].name:
return EC2Connection(**kw_params)
return connect('ec2', region_name,
connection_cls=EC2Connection, **kw_params)
def get_region(region_name, **kw_params):
"""
Find and return a :class:`boto.ec2.regioninfo.RegionInfo` object
given a region name.
:type: str
:param: The name of the region.
:rtype: :class:`boto.ec2.regioninfo.RegionInfo`
:return: The RegionInfo object for the given region or None if
an invalid region name is provided.
"""
for region in regions(**kw_params):
if region.name == region_name:
return region
return None
| gpl-3.0 | -6,349,467,765,238,059,000 | 35.833333 | 77 | 0.712023 | false |
GunoH/intellij-community | python/helpers/py2only/docutils/parsers/rst/languages/de.py | 121 | 3465 | # -*- coding: utf-8 -*-
# $Id: de.py 7223 2011-11-21 16:43:06Z milde $
# Authors: Engelbert Gruber <[email protected]>;
# Lea Wiemann <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
German-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'achtung': 'attention',
'vorsicht': 'caution',
'code': 'code',
'gefahr': 'danger',
'fehler': 'error',
'hinweis': 'hint',
'wichtig': 'important',
'notiz': 'note',
'tipp': 'tip',
'warnung': 'warning',
'ermahnung': 'admonition',
'kasten': 'sidebar',
'seitenkasten': 'sidebar',
'thema': 'topic',
'zeilen-block': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubrik': 'rubric',
'epigraph': 'epigraph',
'highlights (translation required)': 'highlights',
u'pull-quote': 'pull-quote', # commonly used in German too
u'seitenansprache': 'pull-quote', # cf. http://www.typografie.info/2/wiki.php?title=Seitenansprache
'zusammengesetzt': 'compound',
'verbund': 'compound',
u'container': 'container',
#'fragen': 'questions',
'tabelle': 'table',
'csv-tabelle': 'csv-table',
'list-table (translation required)': 'list-table',
u'mathe': 'math',
u'formel': 'math',
'meta': 'meta',
#'imagemap': 'imagemap',
'bild': 'image',
'abbildung': 'figure',
u'unverändert': 'raw',
u'roh': 'raw',
u'einfügen': 'include',
'ersetzung': 'replace',
'ersetzen': 'replace',
'ersetze': 'replace',
'unicode': 'unicode',
'datum': 'date',
'klasse': 'class',
'rolle': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'inhalt': 'contents',
'kapitel-nummerierung': 'sectnum',
'abschnitts-nummerierung': 'sectnum',
u'linkziel-fußfnoten': 'target-notes',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#u'fußfnoten': 'footnotes',
#'zitate': 'citations',
}
"""German name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abkürzung': 'abbreviation',
'akronym': 'acronym',
u'code': 'code',
'index': 'index',
'tiefgestellt': 'subscript',
'hochgestellt': 'superscript',
'titel-referenz': 'title-reference',
'pep-referenz': 'pep-reference',
'rfc-referenz': 'rfc-reference',
'betonung': 'emphasis',
'fett': 'strong',
u'wörtlich': 'literal',
u'mathe': 'math',
'benannte-referenz': 'named-reference',
'unbenannte-referenz': 'anonymous-reference',
u'fußfnoten-referenz': 'footnote-reference',
'zitat-referenz': 'citation-reference',
'ersetzungs-referenz': 'substitution-reference',
'ziel': 'target',
'uri-referenz': 'uri-reference',
u'unverändert': 'raw',
u'roh': 'raw',}
"""Mapping of German role names to canonical role names for interpreted text.
"""
| apache-2.0 | -5,421,123,507,015,214,000 | 32.563107 | 105 | 0.599364 | false |
openfun/edx-platform | common/djangoapps/pipeline_mako/__init__.py | 140 | 2444 | from edxmako.shortcuts import render_to_string
from pipeline.conf import settings
from pipeline.packager import Packager
from pipeline.utils import guess_type
from static_replace import try_staticfiles_lookup
def compressed_css(package_name, raw=False):
package = settings.PIPELINE_CSS.get(package_name, {})
if package:
package = {package_name: package}
packager = Packager(css_packages=package, js_packages={})
package = packager.package_for('css', package_name)
if settings.PIPELINE:
return render_css(package, package.output_filename, raw=raw)
else:
paths = packager.compile(package.paths)
return render_individual_css(package, paths, raw=raw)
def render_css(package, path, raw=False):
template_name = package.template_name or "mako/css.html"
context = package.extra_context
url = try_staticfiles_lookup(path)
if raw:
url += "?raw"
context.update({
'type': guess_type(path, 'text/css'),
'url': url,
})
return render_to_string(template_name, context)
def render_individual_css(package, paths, raw=False):
tags = [render_css(package, path, raw) for path in paths]
return '\n'.join(tags)
def compressed_js(package_name):
package = settings.PIPELINE_JS.get(package_name, {})
if package:
package = {package_name: package}
packager = Packager(css_packages={}, js_packages=package)
package = packager.package_for('js', package_name)
if settings.PIPELINE:
return render_js(package, package.output_filename)
else:
paths = packager.compile(package.paths)
templates = packager.pack_templates(package)
return render_individual_js(package, paths, templates)
def render_js(package, path):
template_name = package.template_name or "mako/js.html"
context = package.extra_context
context.update({
'type': guess_type(path, 'text/javascript'),
'url': try_staticfiles_lookup(path)
})
return render_to_string(template_name, context)
def render_inline_js(package, js):
context = package.extra_context
context.update({
'source': js
})
return render_to_string("mako/inline_js.html", context)
def render_individual_js(package, paths, templates=None):
tags = [render_js(package, js) for js in paths]
if templates:
tags.append(render_inline_js(package, templates))
return '\n'.join(tags)
| agpl-3.0 | 2,685,088,027,618,464,300 | 29.17284 | 68 | 0.677987 | false |
takeshineshiro/horizon | openstack_dashboard/dashboards/project/networks/tests.py | 6 | 99697 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.utils.html import escape
import six
from horizon.workflows import views
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.subnets import tables\
as subnets_tables
from openstack_dashboard.dashboards.project.networks import tables\
as networks_tables
from openstack_dashboard.dashboards.project.networks import workflows
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
INDEX_URL = reverse('horizon:project:networks:index')
def form_data_subnet(subnet,
name=None, cidr=None, ip_version=None,
gateway_ip='', enable_dhcp=None,
allocation_pools=None,
dns_nameservers=None,
host_routes=None):
def get_value(value, default):
return default if value is None else value
data = {}
data['subnet_name'] = get_value(name, subnet.name)
data['cidr'] = get_value(cidr, subnet.cidr)
data['ip_version'] = get_value(ip_version, subnet.ip_version)
gateway_ip = subnet.gateway_ip if gateway_ip == '' else gateway_ip
data['gateway_ip'] = gateway_ip or ''
data['no_gateway'] = (gateway_ip is None)
data['enable_dhcp'] = get_value(enable_dhcp, subnet.enable_dhcp)
if data['ip_version'] == 6:
data['ipv6_modes'] = subnet.ipv6_modes
pools = get_value(allocation_pools, subnet.allocation_pools)
data['allocation_pools'] = _str_allocation_pools(pools)
nameservers = get_value(dns_nameservers, subnet.dns_nameservers)
data['dns_nameservers'] = _str_dns_nameservers(nameservers)
routes = get_value(host_routes, subnet.host_routes)
data['host_routes'] = _str_host_routes(routes)
return data
def form_data_no_subnet():
return {'subnet_name': '',
'cidr': '',
'ip_version': 4,
'gateway_ip': '',
'no_gateway': False,
'enable_dhcp': True,
'allocation_pools': '',
'dns_nameservers': '',
'host_routes': ''}
def _str_allocation_pools(allocation_pools):
if isinstance(allocation_pools, str):
return allocation_pools
return '\n'.join(['%s,%s' % (pool['start'], pool['end'])
for pool in allocation_pools])
def _str_dns_nameservers(dns_nameservers):
if isinstance(dns_nameservers, str):
return dns_nameservers
return '\n'.join(dns_nameservers)
def _str_host_routes(host_routes):
if isinstance(host_routes, str):
return host_routes
return '\n'.join(['%s,%s' % (route['destination'], route['nexthop'])
for route in host_routes])
class NetworkTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_index(self):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 5
quota_data['subnets']['available'] = 5
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_index_network_list_exception(self):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 5
quota_data['subnets']['available'] = 5
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).MultipleTimes().AndRaise(self.exceptions.neutron)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
self.assertEqual(len(res.context['networks_table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail(self):
self._test_network_detail()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_with_mac_learning(self):
self._test_network_detail(mac_learning=True)
def _test_network_detail(self, mac_learning=False):
quota_data = self.quota_usages.first()
quota_data['subnets']['available'] = 5
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_network_exception(self):
self._test_network_detail_network_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_network_exception_with_mac_learning(self):
self._test_network_detail_network_exception(mac_learning=True)
def _test_network_detail_network_exception(self, mac_learning=False):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndRaise(self.exceptions.neutron)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:detail', args=[network_id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_subnet_exception(self):
self._test_network_detail_subnet_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_subnet_exception_with_mac_learning(self):
self._test_network_detail_subnet_exception(mac_learning=True)
def _test_network_detail_subnet_exception(self, mac_learning=False):
network_id = self.networks.first().id
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 5
quota_data['subnets']['available'] = 5
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertEqual(len(subnets), 0)
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_port_exception(self):
self._test_network_detail_port_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_port_exception_with_mac_learning(self):
self._test_network_detail_port_exception(mac_learning=True)
def _test_network_detail_port_exception(self, mac_learning=False):
network_id = self.networks.first().id
quota_data = self.quota_usages.first()
quota_data['subnets']['available'] = 5
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertEqual(len(ports), 0)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_get(self,
test_with_profile=False):
if test_with_profile:
net_profiles = self.net_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:create')
res = self.client.get(url)
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.CreateNetwork.name)
expected_objs = ['<CreateNetworkInfo: createnetworkinfoaction>',
'<CreateSubnetInfo: createsubnetinfoaction>',
'<CreateSubnetDetail: createsubnetdetailaction>']
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_get_with_profile(self):
self.test_network_create_get(test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post(self,
test_with_profile=False):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_profile(self):
self.test_network_create_post(test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'subnet_create',
'profile_list',)})
def test_network_create_post_with_subnet(self,
test_with_profile=False,
test_with_ipv6=True):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
subnet_params = {'network_id': network.id,
'name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip,
'enable_dhcp': subnet.enable_dhcp}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
if not test_with_ipv6:
subnet.ip_version = 4
subnet_params['ip_version'] = subnet.ip_version
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
**subnet_params).AndReturn(subnet)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_w_profile(self):
self.test_network_create_post_with_subnet(test_with_profile=True)
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_ipv6': False})
def test_create_network_with_ipv6_disabled(self):
self.test_network_create_post_with_subnet(test_with_ipv6=False)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post_network_exception(self,
test_with_profile=False):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_nw_exception_w_profile(self):
self.test_network_create_post_network_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'profile_list')})
def test_network_create_post_with_subnet_network_exception(
self,
test_with_profile=False,
test_with_subnetpool=False,
):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_nw_exception_w_profile(self):
self.test_network_create_post_with_subnet_network_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'network_delete',
'subnet_create',
'profile_list',
'is_extension_supported',
'subnetpool_list',)})
def test_network_create_post_with_subnet_subnet_exception(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_delete(IsA(http.HttpRequest),
network.id)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_subnet_exception_w_profile(self):
self.test_network_create_post_with_subnet_subnet_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',
'is_extension_supported',
'subnetpool_list',)})
def test_network_create_post_with_subnet_nocidr(self,
test_with_profile=False,
test_with_snpool=False):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
if test_with_snpool:
form_data['subnetpool_id'] = ''
form_data['prefixlen'] = ''
form_data.update(form_data_subnet(subnet, cidr='',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, escape('Specify "Network Address", '
'"Address pool" or '
'clear "Create Subnet" checkbox.'))
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_no_cidr_w_profile(self):
self.test_network_create_post_with_subnet_nocidr(
test_with_profile=True)
def test_network_create_post_with_subnet_nocidr_nosubnetpool(self):
self.test_network_create_post_with_subnet_nocidr(
test_with_snpool=True)
@test.create_stubs({api.neutron: ('profile_list',
'is_extension_supported',
'subnetpool_list',)})
def test_network_create_post_with_subnet_cidr_without_mask(
self,
test_with_profile=False,
test_with_subnetpool=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
form_data['prefixlen'] = subnetpool.default_prefixlen
form_data.update(form_data_subnet(subnet, cidr='10.0.0.0',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = "The subnet in the Network Address is too small (/32)."
self.assertContains(res, expected_msg)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_cidr_without_mask_w_profile(self):
self.test_network_create_post_with_subnet_cidr_without_mask(
test_with_profile=True)
def test_network_create_post_with_subnet_cidr_without_mask_w_snpool(self):
self.test_network_create_post_with_subnet_cidr_without_mask(
test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('profile_list',
'is_extension_supported',
'subnetpool_list',)})
def test_network_create_post_with_subnet_cidr_inconsistent(
self,
test_with_profile=False,
test_with_subnetpool=False
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
form_data['prefixlen'] = subnetpool.default_prefixlen
form_data.update(form_data_subnet(subnet, cidr=cidr,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertContains(res, expected_msg)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_cidr_inconsistent_w_profile(self):
self.test_network_create_post_with_subnet_cidr_inconsistent(
test_with_profile=True)
def test_network_create_post_with_subnet_cidr_inconsistent_w_snpool(self):
self.test_network_create_post_with_subnet_cidr_inconsistent(
test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('profile_list',
'is_extension_supported',
'subnetpool_list',)})
def test_network_create_post_with_subnet_gw_inconsistent(
self,
test_with_profile=False,
test_with_subnetpool=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
form_data['prefixlen'] = subnetpool.default_prefixlen
form_data.update(form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_gw_inconsistent_w_profile(self):
self.test_network_create_post_with_subnet_gw_inconsistent(
test_with_profile=True)
def test_network_create_post_with_subnet_gw_inconsistent_w_snpool(self):
self.test_network_create_post_with_subnet_gw_inconsistent(
test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/update.html')
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get_exception(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndReturn(network)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post_exception(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete')})
def test_delete_network_no_subnet(self):
network = self.networks.first()
network.subnets = []
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete',
'subnet_delete')})
def test_delete_network_with_subnet(self):
network = self.networks.first()
network.subnets = [subnet.id for subnet in network.subnets]
subnet_id = network.subnets[0]
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest), shared=True)\
.AndReturn([])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet_id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete',
'subnet_delete')})
def test_delete_network_exception(self):
network = self.networks.first()
network.subnets = [subnet.id for subnet in network.subnets]
subnet_id = network.subnets[0]
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet_id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
class NetworkSubnetTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_get',
'subnet_get',)})
def test_subnet_detail(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/subnets/detail.html')
self.assertEqual(res.context['subnet'].id, subnet.id)
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail_exception(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
url = reverse('horizon:project:networks:addsubnet',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post(self, test_with_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes(self):
network = self.networks.list()[1]
subnet = self.subnets.list()[1]
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes_no_gateway(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet, gateway_ip=None)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_network_exception(self,
test_with_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {}
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_subnet_create_post_network_exception_with_subnetpool(self):
self.test_subnet_create_post_network_exception(
test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_subnet_exception(self,
test_with_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet, allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_cidr_inconsistent(self,
test_with_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data.update(form_data_subnet(subnet, cidr=cidr,
allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertFormErrors(res, 1, expected_msg)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
def test_subnet_create_post_cidr_inconsistent_with_subnetpool(self):
self.test_subnet_create_post_cidr_inconsistent(
test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_gw_inconsistent(self,
test_with_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data.update(form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
def test_subnet_create_post_gw_inconsistent_with_subnetpool(self):
self.test_subnet_create_post_gw_inconsistent(test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_pools_start_only(self,
test_w_snpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if test_w_snpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# Start only allocation_pools
allocation_pools = '10.0.0.2'
form_data.update(form_data_subnet(subnet,
allocation_pools=allocation_pools))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
def test_subnet_create_post_invalid_pools_start_only_with_subnetpool(self):
self.test_subnet_create_post_invalid_pools_start_only(
test_w_snpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_pools_three_entries(self,
t_w_snpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if t_w_snpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# pool with three entries
allocation_pools = '10.0.0.2,10.0.0.3,10.0.0.4'
form_data.update(form_data_subnet(subnet,
allocation_pools=allocation_pools))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
def test_subnet_create_post_invalid_pools_three_entries_w_subnetpool(self):
self.test_subnet_create_post_invalid_pools_three_entries(
t_w_snpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_pools_invalid_address(self,
t_w_snpl=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if t_w_snpl:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# end address is not a valid IP address
allocation_pools = '10.0.0.2,invalid_address'
form_data.update(form_data_subnet(subnet,
allocation_pools=allocation_pools))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[1])
def test_subnet_create_post_invalid_pools_invalid_address_w_snpool(self):
self.test_subnet_create_post_invalid_pools_invalid_address(
t_w_snpl=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_pools_ip_network(self,
test_w_snpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if test_w_snpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# start address is CIDR
allocation_pools = '10.0.0.2/24,10.0.0.5'
form_data.update(form_data_subnet(subnet,
allocation_pools=allocation_pools))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[0])
def test_subnet_create_post_invalid_pools_ip_network_with_subnetpool(self):
self.test_subnet_create_post_invalid_pools_ip_network(
test_w_snpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_pools_start_larger_than_end(self,
tsn=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if tsn:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# start address is larger than end address
allocation_pools = '10.0.0.254,10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start address is larger than end address '
'(value=%s)' % allocation_pools)
def test_subnet_create_post_invalid_pools_start_larger_than_end_tsn(self):
self.test_subnet_create_post_invalid_pools_start_larger_than_end(
tsn=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_nameservers(self,
test_w_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if test_w_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data.update(form_data_subnet(subnet,
dns_nameservers=dns_nameservers,
allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
def test_subnet_create_post_invalid_nameservers_with_subnetpool(self):
self.test_subnet_create_post_invalid_nameservers(
test_w_subnetpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_routes_destination_only(self,
tsn=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if tsn:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# Start only host_route
host_routes = '192.168.0.0/24'
form_data.update(form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
def test_subnet_create_post_invalid_routes_destination_only_w_snpool(self):
self.test_subnet_create_post_invalid_routes_destination_only(
tsn=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_routes_three_entries(self,
tsn=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if tsn:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data.update(form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
def test_subnet_create_post_invalid_routes_three_entries_with_tsn(self):
self.test_subnet_create_post_invalid_routes_three_entries(
tsn=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_routes_invalid_destination(self,
tsn=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if tsn:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data.update(form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
def test_subnet_create_post_invalid_routes_invalid_destination_tsn(self):
self.test_subnet_create_post_invalid_routes_invalid_destination(
tsn=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_routes_nexthop_ip_network(self,
tsn=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if tsn:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data.update(form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
def test_subnet_create_post_invalid_routes_nexthop_ip_network_tsn(self):
self.test_subnet_create_post_invalid_routes_nexthop_ip_network(
tsn=True)
@test.create_stubs({api.neutron: ('is_extension_supported',
'network_get',
'subnet_create',
'subnetpool_list',)})
def test_v6subnet_create_post(self):
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
network = self.networks.get(name="v6_net1")
subnet = self.subnets.get(name="v6_subnet1")
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_v6subnet_create_post_with_slaac_attributes(self):
network = self.networks.get(name="v6_net2")
subnet = self.subnets.get(name="v6_subnet2")
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
ipv6_address_mode='slaac',
ipv6_ra_mode='slaac')\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_gateway_ip(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
gateway_ip = '10.0.0.100'
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=gateway_ip,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_no_gateway(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=None,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_additional_attributes(self):
subnet = self.subnets.list()[1]
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
start = subnet.allocation_pools[0]['start']
end = subnet.allocation_pools[0]['end']
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=False,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes,
allocation_pools=[{'start': start,
'end': end}])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
enable_dhcp=False)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_gw_inconsistent(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_nameservers(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_destination_only(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_three_entries(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_invalid_destination(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_nexthop_ip_network(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete(self):
self._test_subnet_delete()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_with_mac_learning(self):
self._test_subnet_delete(mac_learning=True)
def _test_subnet_delete(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_exception(self):
self._test_subnet_delete_exception()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_exception_with_mac_learning(self):
self._test_subnet_delete_exception(mac_learning=True)
def _test_subnet_delete_exception(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
class NetworkPortTests(test.TestCase):
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_detail(self):
self._test_port_detail()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_detail_with_mac_learning(self):
self._test_port_detail(mac_learning=True)
def _test_port_detail(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(self.ports.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertTemplateUsed(res, 'project/networks/ports/detail.html')
self.assertEqual(res.context['port'].id, port.id)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get(self):
self._test_port_update_get()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get_with_mac_learning(self):
self._test_port_update_get(mac_learning=True)
def _test_port_update_get(self, mac_learning=False, binding=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest),
port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/ports/update.html')
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post(self):
self._test_port_update_post()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_with_mac_learning(self):
self._test_port_update_post(mac_learning=True)
def _test_port_update_post(self, mac_learning=False, binding=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception(self):
self._test_port_update_post_exception()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception_with_mac_learning(self):
self._test_port_update_post_exception(mac_learning=True)
def _test_port_update_post_exception(self, mac_learning=False,
binding=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
**extension_kwargs)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
class NetworkViewTests(test.TestCase):
def _test_create_button_disabled_when_quota_exceeded(
self, expected_string, network_quota=5, subnet_quota=5):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = network_quota
quota_data['subnets']['available'] = subnet_quota
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_network_create_button_disabled_when_quota_exceeded_index(self):
create_link = networks_tables.CreateNetwork()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (six.text_type(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='networks__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self._test_create_button_disabled_when_quota_exceeded(expected_string,
network_quota=0)
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_subnet_create_button_disabled_when_quota_exceeded_index(self):
network_id = self.networks.first().id
create_link = networks_tables.CreateSubnet()
url = reverse(create_link.get_link_url(), args=[network_id])
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (six.text_type(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' class='%s disabled' "\
"id='networks__row_%s__action_subnet'>%s</a>" \
% (url, " ".join(classes), network_id, link_name)
self._test_create_button_disabled_when_quota_exceeded(expected_string,
subnet_quota=0)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_subnet_create_button_disabled_when_quota_exceeded_detail(self):
network_id = self.networks.first().id
quota_data = self.quota_usages.first()
quota_data['subnets']['available'] = 0
api.neutron.network_get(
IsA(http.HttpRequest), network_id)\
.MultipleTimes().AndReturn(self.networks.first())
api.neutron.subnet_list(
IsA(http.HttpRequest), network_id=network_id)\
.AndReturn(self.subnets.list())
api.neutron.port_list(
IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'mac-learning')\
.AndReturn(False)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
self.assertItemsEqual(subnets, self.subnets.list())
class FakeTable(object):
kwargs = {'network_id': network_id}
create_link = subnets_tables.CreateSubnet()
create_link.table = FakeTable()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (six.text_type(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='subnets__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
| apache-2.0 | 6,345,434,674,920,206,000 | 43.191933 | 79 | 0.539053 | false |
cpg1111/kubernetes | cluster/juju/charms/trusty/kubernetes/unit_tests/lib/test_registrator.py | 96 | 2163 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from mock import MagicMock, patch
from path import Path
import pytest
import sys
d = Path('__file__').parent.abspath() / 'hooks'
sys.path.insert(0, d.abspath())
from lib.registrator import Registrator
class TestRegistrator():
def setup_method(self, method):
self.r = Registrator()
def test_data_type(self):
if type(self.r.data) is not dict:
pytest.fail("Invalid type")
@patch('json.loads')
@patch('httplib.HTTPConnection')
def test_register(self, httplibmock, jsonmock):
self.r.register('foo', 80, '/v1/test')
httplibmock.assert_called_with('foo', 80, timeout=12)
requestmock = httplibmock().request
requestmock.assert_called_with(
"POST", "/v1/test",
json.dumps(self.r.data),
{"Content-type": "application/json",
"Accept": "application/json"})
def test_command_succeeded(self):
response = MagicMock()
result = json.loads('{"status": "Failure", "kind": "Status", "code": 409, "apiVersion": "v1", "reason": "AlreadyExists", "details": {"kind": "node", "name": "10.200.147.200"}, "message": "node \\"10.200.147.200\\" already exists", "creationTimestamp": null}') # noqa
response.status = 200
self.r.command_succeeded(response, result)
response.status = 409
self.r.command_succeeded(response, result)
response.status = 500
with pytest.raises(RuntimeError):
self.r.command_succeeded(response, result)
| apache-2.0 | 398,718,075,530,056,960 | 35.05 | 275 | 0.662968 | false |
dubourg/openturns | python/test/t_DistFunc_noncentralchisquare.py | 8 | 2054 | #! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# NonCentralChiSquare related functions
# dNonCentralChiSquare
nuMin = 0.2
nuMax = 5.0
n1 = 5
lambdaMin = 0.2
lambdaMax = 5.0
n2 = 5
xMin = 0.1
xMax = 0.9
nX = 5
for i1 in range(n1):
nu = nuMin + (nuMax - nuMin) * i1 / (n1 - 1)
for i2 in range(n2):
lambda_ = lambdaMin + (lambdaMax - lambdaMin) * i2 / (n2 - 1)
for iX in range(nX):
x = xMin + (xMax - xMin) * iX / (nX - 1)
print("dNonCentralChiSquare(", nu, ", ", lambda_, ", %.12g" %
x, ")=%.6g" % DistFunc.dNonCentralChiSquare(nu, lambda_, x))
# pNonCentralChiSquare
nuMin = 0.2
nuMax = 5.0
n1 = 5
lambdaMin = 0.2
lambdaMax = 5.0
n2 = 5
xMin = 0.1
xMax = 0.9
nX = 5
for i1 in range(n1):
nu = nuMin + (nuMax - nuMin) * i1 / (n1 - 1)
for i2 in range(n2):
lambda_ = lambdaMin + (lambdaMax - lambdaMin) * i2 / (n2 - 1)
for iX in range(nX):
x = xMin + (xMax - xMin) * iX / (nX - 1)
print("pNonCentralChiSquare(", nu, ", ", lambda_, ", %.12g" % x, ")=%.6g" % DistFunc.pNonCentralChiSquare(
nu, lambda_, x), ", complementary=%.6g" % DistFunc.pNonCentralChiSquare(nu, lambda_, x, True))
# rNonCentralChiSquare
nuMin = 0.2
nuMax = 5.0
n1 = 5
lambdaMin = 0.2
lambdaMax = 5.0
n2 = 5
nR = 5
for i1 in range(n1):
nu = nuMin + (nuMax - nuMin) * i1 / (n1 - 1)
for i2 in range(n2):
lambda_ = lambdaMin + (lambdaMax - lambdaMin) * i2 / (n2 - 1)
for iR in range(nR):
print("rNonCentralChiSquare(", nu, ", ", lambda_, ")=%.6g" %
DistFunc.rNonCentralChiSquare(nu, lambda_))
except:
import sys
print("t_DistFunc_noncentralchisquare.py",
sys.exc_info()[0], sys.exc_info()[1])
| gpl-3.0 | -3,741,731,971,190,417,000 | 30.121212 | 123 | 0.510224 | false |
bopo/cookiecutter-django | {{cookiecutter.project_slug}}/config/settings/production.py | 2 | 11973 | # -*- coding: utf-8 -*-
"""
Production Configurations
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis for cache
{% if cookiecutter.use_sentry_for_error_reporting == 'y' %}
- Use sentry for error logging
{% endif %}
{% if cookiecutter.use_opbeat == 'y' %}
- Use opbeat for error reporting
{% endif %}
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
{% if cookiecutter.use_sentry_for_error_reporting == 'y' %}
import logging
{% endif %}
from .base import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
{%- if cookiecutter.use_sentry_for_error_reporting == 'y' %}
# raven sentry client
# See https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat', ]
{% endif %}
{%- if cookiecutter.use_whitenoise == 'y' %}
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware', ]
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
{% endif %}
{%- if cookiecutter.use_sentry_for_error_reporting == 'y' -%}
RAVEN_MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware']
MIDDLEWARE = RAVEN_MIDDLEWARE + MIDDLEWARE
{% endif %}
{%- if cookiecutter.use_opbeat == 'y' -%}
# opbeat integration
# See https://opbeat.com/languages/django/
INSTALLED_APPS += ['opbeat.contrib.django', ]
OPBEAT = {
'ORGANIZATION_ID': env('DJANGO_OPBEAT_ORGANIZATION_ID'),
'APP_ID': env('DJANGO_OPBEAT_APP_ID'),
'SECRET_TOKEN': env('DJANGO_OPBEAT_SECRET_TOKEN')
}
MIDDLEWARE = ['opbeat.contrib.django.middleware.OpbeatAPMMiddleware', ] + MIDDLEWARE
{% endif %}
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['{{cookiecutter.domain_name}}', ])
# END SITE CONFIGURATION
INSTALLED_APPS += ['gunicorn', ]
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ['storages', ]
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
{% if cookiecutter.use_whitenoise == 'y' -%}
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
{% else %}
# See:http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = 'https://s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME
{%- endif %}
# Static Assets
# ------------------------
{% if cookiecutter.use_whitenoise == 'y' -%}
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
{% else %}
STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ['collectfast', ] + INSTALLED_APPS
{%- endif %}
{% if cookiecutter.use_compressor == 'y'-%}
# COMPRESSOR
# ------------------------------------------------------------------------------
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_URL = STATIC_URL
COMPRESS_ENABLED = env.bool('COMPRESS_ENABLED', default=True)
{%- endif %}
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='{{cookiecutter.project_name}} <noreply@{{cookiecutter.domain_name}}>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[{{cookiecutter.project_name}}]')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ['anymail', ]
ANYMAIL = {
'MAILGUN_API_KEY': env('DJANGO_MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_SENDER_DOMAIN')
}
EMAIL_BACKEND = 'anymail.backends.mailgun.MailgunBackend'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
{% if cookiecutter.use_elasticbeanstalk_experimental.lower() == 'y' -%}
# Uses Amazon RDS for database hosting, which doesn't follow the Heroku-style spec
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': env('RDS_DB_NAME'),
'USER': env('RDS_USERNAME'),
'PASSWORD': env('RDS_PASSWORD'),
'HOST': env('RDS_HOSTNAME'),
'PORT': env('RDS_PORT'),
}
}
{% else %}
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
{%- endif %}
# CACHING
# ------------------------------------------------------------------------------
{% if cookiecutter.use_elasticbeanstalk_experimental.lower() == 'y' -%}
REDIS_LOCATION = 'redis://{}:{}/0'.format(
env('REDIS_ENDPOINT_ADDRESS'),
env('REDIS_PORT')
)
{% else %}
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
{%- endif %}
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
{% if cookiecutter.use_sentry_for_error_reporting == 'y' %}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry', ],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console', ],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry', ],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
{% elif cookiecutter.use_sentry_for_error_reporting == 'n' %}
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false', ],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', ],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins', ],
'propagate': True
}
}
}
{% endif %}
# Custom Admin URL, use {% raw %}{% url 'admin:index' %}{% endraw %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| bsd-3-clause | -2,353,119,871,720,951,300 | 35.614679 | 117 | 0.602689 | false |
tukutela/Kay-Framework | kay/lib/werkzeug/contrib/atom.py | 25 | 14976 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': u'unbekannter Autor'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url, True))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version, True))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author')
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base, True)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
| bsd-3-clause | 2,204,160,742,799,888,600 | 42.661808 | 78 | 0.551482 | false |
fo2rist/infra-strike | backend/venv/Lib/site-packages/pip/_vendor/distlib/locators.py | 129 | 50493 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
name1, name2 = name1.lower(), name2.lower()
if name1 == name2:
result = True
else:
# distribute replaces '-' by '_' in project names, so it
# can tell where the version starts in a filename.
result = name1.replace('_', '-') == name2.replace('_', '-')
return result
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e:
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd:
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
| lgpl-3.0 | 4,091,003,129,755,938,300 | 38.946994 | 95 | 0.524489 | false |
crobby/sahara | sahara/tests/unit/plugins/cdh/test_config_helper.py | 11 | 1492 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_3_0 import config_helper as c_h
from sahara.tests.unit import base
from sahara.tests.unit.plugins.cdh import utils as ctu
class ConfigHelperTestCase(base.SaharaTestCase):
def test_is_swift_enabled(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertTrue(c_h.is_swift_enabled(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.ENABLE_SWIFT.name: False}})
self.assertFalse(c_h.is_swift_enabled(cluster))
def test_get_swift_lib_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(c_h.DEFAULT_SWIFT_LIB_URL,
c_h.get_swift_lib_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.SWIFT_LIB_URL.name: 'spam'}})
self.assertEqual('spam', c_h.get_swift_lib_url(cluster))
| apache-2.0 | -1,070,721,578,638,276,500 | 39.324324 | 74 | 0.69571 | false |
tsdgeos/snapcraft | snapcraft/tests/test_formatting_utils.py | 7 | 1952 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from snapcraft import formatting_utils
from snapcraft import tests
class HumanizeListTestCases(tests.TestCase):
def test_no_items(self):
items = []
output = formatting_utils.humanize_list(items, 'and')
self.assertEqual(output, '')
def test_one_item(self):
items = ['foo']
output = formatting_utils.humanize_list(items, 'and')
self.assertEqual(output, "'foo'")
def test_two_items(self):
items = ['foo', 'bar']
output = formatting_utils.humanize_list(items, 'and')
self.assertEqual(output, "'bar' and 'foo'",
"Expected 'bar' before 'foo' due to sorting")
def test_three_items(self):
items = ['foo', 'bar', 'baz']
output = formatting_utils.humanize_list(items, 'and')
self.assertEqual(output, "'bar', 'baz', and 'foo'")
def test_four_items(self):
items = ['foo', 'bar', 'baz', 'qux']
output = formatting_utils.humanize_list(items, 'and')
self.assertEqual(output, "'bar', 'baz', 'foo', and 'qux'")
def test_another_conjunction(self):
items = ['foo', 'bar', 'baz', 'qux']
output = formatting_utils.humanize_list(items, 'or')
self.assertEqual(output, "'bar', 'baz', 'foo', or 'qux'")
| gpl-3.0 | -4,966,179,277,543,992,000 | 36.538462 | 71 | 0.64293 | false |
nirvn/QGIS | tests/src/python/test_qgsvectorlayercache.py | 22 | 3911 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsVectorLayerCache.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '08/06/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QVariant, Qt
from qgis.PyQt.QtGui import QPainter
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsWkbTypes,
QgsVectorLayer,
QgsVectorLayerCache,
QgsRectangle,
QgsFeature,
QgsFeatureRequest,
QgsGeometry,
QgsPointXY,
QgsField,
QgsFields,
QgsCoordinateReferenceSystem,
QgsProject,
QgsPoint,
NULL)
from qgis.testing import start_app, unittest
from featuresourcetestbase import FeatureSourceTestCase
from utilities import unitTestDataPath
start_app()
class TestQgsVectorLayerCache(unittest.TestCase, FeatureSourceTestCase):
@classmethod
def getSource(cls):
cache = QgsVectorLayerCache(cls.vl, 100)
return cache
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer for FeatureSourceTestCase
cls.vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory')
assert (cls.vl.isValid())
f1 = QgsFeature(5)
f1.setAttributes([5, -200, NULL, 'NuLl', '5'])
f1.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
f2 = QgsFeature(3)
f2.setAttributes([3, 300, 'Pear', 'PEaR', '3'])
f3 = QgsFeature(1)
f3.setAttributes([1, 100, 'Orange', 'oranGe', '1'])
f3.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
f4 = QgsFeature(2)
f4.setAttributes([2, 200, 'Apple', 'Apple', '2'])
f4.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
f5 = QgsFeature(4)
f5.setAttributes([4, 400, 'Honey', 'Honey', '4'])
f5.setGeometry(QgsGeometry.fromWkt('Point (-65.32 78.3)'))
assert cls.vl.dataProvider().addFeatures([f1, f2, f3, f4, f5])
cls.source = QgsVectorLayerCache(cls.vl, 100)
def testGetFeaturesSubsetAttributes2(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testGetFeaturesNoGeometry(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testUniqueValues(self):
""" Skip unique values test - not implemented by the cache (yet)
"""
pass
def testMinimumValue(self):
""" Skip min values test - not implemented by the cache (yet)
"""
pass
def testMaximumValue(self):
""" Skip max values test - not implemented by the cache (yet)
"""
pass
def testAllFeatureIds(self):
""" Skip allFeatureIds test - not implemented by the cache (yet)
"""
pass
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 8,751,182,244,045,306,000 | 32.715517 | 156 | 0.611864 | false |
esakellari/root | interpreter/llvm/src/tools/clang/www/builtins.py | 99 | 6633 | #!/usr/bin/env python
import sys, fileinput
err=0
# Giant associative set of builtin->intrinsic mappings where clang doesn't
# implement the builtin since the vector operation works by default.
repl_map = {
'__builtin_ia32_addps': '_mm_add_ps',
'__builtin_ia32_addsd': '_mm_add_sd',
'__builtin_ia32_addpd': '_mm_add_pd',
'__builtin_ia32_addss': '_mm_add_ss',
'__builtin_ia32_paddb128': '_mm_add_epi8',
'__builtin_ia32_paddw128': '_mm_add_epi16',
'__builtin_ia32_paddd128': '_mm_add_epi32',
'__builtin_ia32_paddq128': '_mm_add_epi64',
'__builtin_ia32_subps': '_mm_sub_ps',
'__builtin_ia32_subsd': '_mm_sub_sd',
'__builtin_ia32_subpd': '_mm_sub_pd',
'__builtin_ia32_subss': '_mm_sub_ss',
'__builtin_ia32_psubb128': '_mm_sub_epi8',
'__builtin_ia32_psubw128': '_mm_sub_epi16',
'__builtin_ia32_psubd128': '_mm_sub_epi32',
'__builtin_ia32_psubq128': '_mm_sub_epi64',
'__builtin_ia32_mulsd': '_mm_mul_sd',
'__builtin_ia32_mulpd': '_mm_mul_pd',
'__builtin_ia32_mulps': '_mm_mul_ps',
'__builtin_ia32_mulss': '_mm_mul_ss',
'__builtin_ia32_pmullw128': '_mm_mullo_epi16',
'__builtin_ia32_divsd': '_mm_div_sd',
'__builtin_ia32_divpd': '_mm_div_pd',
'__builtin_ia32_divps': '_mm_div_ps',
'__builtin_ia32_subss': '_mm_div_ss',
'__builtin_ia32_andpd': '_mm_and_pd',
'__builtin_ia32_andps': '_mm_and_ps',
'__builtin_ia32_pand128': '_mm_and_si128',
'__builtin_ia32_andnpd': '_mm_andnot_pd',
'__builtin_ia32_andnps': '_mm_andnot_ps',
'__builtin_ia32_pandn128': '_mm_andnot_si128',
'__builtin_ia32_orpd': '_mm_or_pd',
'__builtin_ia32_orps': '_mm_or_ps',
'__builtin_ia32_por128': '_mm_or_si128',
'__builtin_ia32_xorpd': '_mm_xor_pd',
'__builtin_ia32_xorps': '_mm_xor_ps',
'__builtin_ia32_pxor128': '_mm_xor_si128',
'__builtin_ia32_cvtps2dq': '_mm_cvtps_epi32',
'__builtin_ia32_cvtsd2ss': '_mm_cvtsd_ss',
'__builtin_ia32_cvtsi2sd': '_mm_cvtsi32_sd',
'__builtin_ia32_cvtss2sd': '_mm_cvtss_sd',
'__builtin_ia32_cvttsd2si': '_mm_cvttsd_si32',
'__builtin_ia32_vec_ext_v2df': '_mm_cvtsd_f64',
'__builtin_ia32_loadhpd': '_mm_loadh_pd',
'__builtin_ia32_loadlpd': '_mm_loadl_pd',
'__builtin_ia32_loadlv4si': '_mm_loadl_epi64',
'__builtin_ia32_cmpeqps': '_mm_cmpeq_ps',
'__builtin_ia32_cmpltps': '_mm_cmplt_ps',
'__builtin_ia32_cmpleps': '_mm_cmple_ps',
'__builtin_ia32_cmpgtps': '_mm_cmpgt_ps',
'__builtin_ia32_cmpgeps': '_mm_cmpge_ps',
'__builtin_ia32_cmpunordps': '_mm_cmpunord_ps',
'__builtin_ia32_cmpneqps': '_mm_cmpneq_ps',
'__builtin_ia32_cmpnltps': '_mm_cmpnlt_ps',
'__builtin_ia32_cmpnleps': '_mm_cmpnle_ps',
'__builtin_ia32_cmpngtps': '_mm_cmpngt_ps',
'__builtin_ia32_cmpordps': '_mm_cmpord_ps',
'__builtin_ia32_cmpeqss': '_mm_cmpeq_ss',
'__builtin_ia32_cmpltss': '_mm_cmplt_ss',
'__builtin_ia32_cmpless': '_mm_cmple_ss',
'__builtin_ia32_cmpunordss': '_mm_cmpunord_ss',
'__builtin_ia32_cmpneqss': '_mm_cmpneq_ss',
'__builtin_ia32_cmpnltss': '_mm_cmpnlt_ss',
'__builtin_ia32_cmpnless': '_mm_cmpnle_ss',
'__builtin_ia32_cmpngtss': '_mm_cmpngt_ss',
'__builtin_ia32_cmpngess': '_mm_cmpnge_ss',
'__builtin_ia32_cmpordss': '_mm_cmpord_ss',
'__builtin_ia32_movss': '_mm_move_ss',
'__builtin_ia32_movsd': '_mm_move_sd',
'__builtin_ia32_movhlps': '_mm_movehl_ps',
'__builtin_ia32_movlhps': '_mm_movelh_ps',
'__builtin_ia32_movqv4si': '_mm_move_epi64',
'__builtin_ia32_unpckhps': '_mm_unpackhi_ps',
'__builtin_ia32_unpckhpd': '_mm_unpackhi_pd',
'__builtin_ia32_punpckhbw128': '_mm_unpackhi_epi8',
'__builtin_ia32_punpckhwd128': '_mm_unpackhi_epi16',
'__builtin_ia32_punpckhdq128': '_mm_unpackhi_epi32',
'__builtin_ia32_punpckhqdq128': '_mm_unpackhi_epi64',
'__builtin_ia32_unpcklps': '_mm_unpacklo_ps',
'__builtin_ia32_unpcklpd': '_mm_unpacklo_pd',
'__builtin_ia32_punpcklbw128': '_mm_unpacklo_epi8',
'__builtin_ia32_punpcklwd128': '_mm_unpacklo_epi16',
'__builtin_ia32_punpckldq128': '_mm_unpacklo_epi32',
'__builtin_ia32_punpcklqdq128': '_mm_unpacklo_epi64',
'__builtin_ia32_cmpeqpd': '_mm_cmpeq_pd',
'__builtin_ia32_cmpltpd': '_mm_cmplt_pd',
'__builtin_ia32_cmplepd': '_mm_cmple_pd',
'__builtin_ia32_cmpgtpd': '_mm_cmpgt_pd',
'__builtin_ia32_cmpgepd': '_mm_cmpge_pd',
'__builtin_ia32_cmpunordpd': '_mm_cmpunord_pd',
'__builtin_ia32_cmpneqpd': '_mm_cmpneq_pd',
'__builtin_ia32_cmpnltpd': '_mm_cmpnlt_pd',
'__builtin_ia32_cmpnlepd': '_mm_cmpnle_pd',
'__builtin_ia32_cmpngtpd': '_mm_cmpngt_pd',
'__builtin_ia32_cmpngepd': '_mm_cmpnge_pd',
'__builtin_ia32_cmpordpd': '_mm_cmpord_pd',
'__builtin_ia32_cmpeqsd': '_mm_cmpeq_sd',
'__builtin_ia32_cmpltsd': '_mm_cmplt_sd',
'__builtin_ia32_cmplesd': '_mm_cmple_sd',
'__builtin_ia32_cmpunordsd': '_mm_cmpunord_sd',
'__builtin_ia32_cmpneqsd': '_mm_cmpneq_sd',
'__builtin_ia32_cmpnltsd': '_mm_cmpnlt_sd',
'__builtin_ia32_cmpnlesd': '_mm_cmpnle_sd',
'__builtin_ia32_cmpordsd': '_mm_cmpord_sd',
'__builtin_ia32_cvtsi642ss': '_mm_cvtsi64_ss',
'__builtin_ia32_cvttss2si64': '_mm_cvtss_si64',
'__builtin_ia32_shufps': '_mm_shuffle_ps',
'__builtin_ia32_shufpd': '_mm_shuffle_pd',
'__builtin_ia32_pshufhw': '_mm_shufflehi_epi16',
'__builtin_ia32_pshuflw': '_mm_shufflelo_epi16',
'__builtin_ia32_pshufd': '_mm_shuffle_epi32',
'__builtin_ia32_movshdup': '_mm_movehdup_ps',
'__builtin_ia32_movsldup': '_mm_moveldup_ps',
'__builtin_ia32_maxps': '_mm_max_ps',
'__builtin_ia32_pslldi128': '_mm_slli_epi32',
'__builtin_ia32_vec_set_v16qi': '_mm_insert_epi8',
'__builtin_ia32_vec_set_v8hi': '_mm_insert_epi16',
'__builtin_ia32_vec_set_v4si': '_mm_insert_epi32',
'__builtin_ia32_vec_set_v2di': '_mm_insert_epi64',
'__builtin_ia32_vec_set_v4hi': '_mm_insert_pi16',
'__builtin_ia32_vec_ext_v16qi': '_mm_extract_epi8',
'__builtin_ia32_vec_ext_v8hi': '_mm_extract_epi16',
'__builtin_ia32_vec_ext_v4si': '_mm_extract_epi32',
'__builtin_ia32_vec_ext_v2di': '_mm_extract_epi64',
'__builtin_ia32_vec_ext_v4hi': '_mm_extract_pi16',
'__builtin_ia32_vec_ext_v4sf': '_mm_extract_ps'
}
# Special unhandled cases:
# __builtin_ia32_vec_ext_*(__P, idx) -> _mm_store_sd/_mm_storeh_pd
# depending on index. No abstract insert/extract for these oddly.
unhandled = [
'__builtin_ia32_vec_ext_v2df',
'__builtin_ia32_vec_ext_v2si',
]
def report_repl(builtin, repl):
sys.stderr.write("%s:%d: x86 builtin %s used, replaced with %s\n" % (fileinput.filename(), fileinput.filelineno(), builtin, repl))
def report_cant(builtin):
sys.stderr.write("%s:%d: x86 builtin %s used, too many replacements\n" % (fileinput.filename(), fileinput.filelineno(), builtin))
for line in fileinput.input(inplace=1):
for builtin, repl in repl_map.iteritems():
if builtin in line:
line = line.replace(builtin, repl)
report_repl(builtin, repl)
for unh in unhandled:
if unh in line:
report_cant(unh)
sys.stdout.write(line)
sys.exit(err)
| lgpl-2.1 | -152,825,105,743,407,040 | 39.693252 | 132 | 0.662295 | false |
hamiltont/CouchPotatoServer | couchpotato/core/notifications/email/__init__.py | 7 | 1913 | from .main import Email
def start():
return Email()
config = [{
'name': 'email',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'email',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'from',
'label': 'Send e-mail from',
},
{
'name': 'to',
'label': 'Send e-mail to',
},
{
'name': 'smtp_server',
'label': 'SMTP server',
},
{ 'name': 'smtp_port',
'label': 'SMTP server port',
'default': '25',
'type': 'int',
},
{
'name': 'ssl',
'label': 'Enable SSL',
'default': 0,
'type': 'bool',
},
{
'name': 'starttls',
'label': 'Enable StartTLS',
'default': 0,
'type': 'bool',
},
{
'name': 'smtp_user',
'label': 'SMTP user',
},
{
'name': 'smtp_pass',
'label': 'SMTP password',
'type': 'password',
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
| gpl-3.0 | 4,988,467,638,644,959,000 | 27.552239 | 79 | 0.263983 | false |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/test/test_structseq.py | 57 | 4010 | import os
import time
import unittest
from test import support
class StructSeqTest(unittest.TestCase):
def test_tuple(self):
t = time.gmtime()
self.assertIsInstance(t, tuple)
astuple = tuple(t)
self.assertEqual(len(t), len(astuple))
self.assertEqual(t, astuple)
# Check that slicing works the same way; at one point, slicing t[i:j] with
# 0 < i < j could produce NULLs in the result.
for i in range(-len(t), len(t)):
self.assertEqual(t[i:], astuple[i:])
for j in range(-len(t), len(t)):
self.assertEqual(t[i:j], astuple[i:j])
for j in range(-len(t), len(t)):
self.assertEqual(t[:j], astuple[:j])
self.assertRaises(IndexError, t.__getitem__, -len(t)-1)
self.assertRaises(IndexError, t.__getitem__, len(t))
for i in range(-len(t), len(t)-1):
self.assertEqual(t[i], astuple[i])
def test_repr(self):
t = time.gmtime()
self.assertTrue(repr(t))
t = time.gmtime(0)
self.assertEqual(repr(t),
"time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, "
"tm_min=0, tm_sec=0, tm_wday=3, tm_yday=1, tm_isdst=0)")
# os.stat() gives a complicated struct sequence.
st = os.stat(__file__)
rep = repr(st)
self.assertTrue(rep.startswith(os.name + ".stat_result"))
self.assertIn("st_mode=", rep)
self.assertIn("st_ino=", rep)
self.assertIn("st_dev=", rep)
def test_concat(self):
t1 = time.gmtime()
t2 = t1 + tuple(t1)
for i in range(len(t1)):
self.assertEqual(t2[i], t2[i+len(t1)])
def test_repeat(self):
t1 = time.gmtime()
t2 = 3 * t1
for i in range(len(t1)):
self.assertEqual(t2[i], t2[i+len(t1)])
self.assertEqual(t2[i], t2[i+2*len(t1)])
def test_contains(self):
t1 = time.gmtime()
for item in t1:
self.assertIn(item, t1)
self.assertNotIn(-42, t1)
def test_hash(self):
t1 = time.gmtime()
self.assertEqual(hash(t1), hash(tuple(t1)))
def test_cmp(self):
t1 = time.gmtime()
t2 = type(t1)(t1)
self.assertEqual(t1, t2)
self.assertTrue(not (t1 < t2))
self.assertTrue(t1 <= t2)
self.assertTrue(not (t1 > t2))
self.assertTrue(t1 >= t2)
self.assertTrue(not (t1 != t2))
def test_fields(self):
t = time.gmtime()
self.assertEqual(len(t), t.n_fields)
self.assertEqual(t.n_fields, t.n_sequence_fields+t.n_unnamed_fields)
def test_constructor(self):
t = time.struct_time
self.assertRaises(TypeError, t)
self.assertRaises(TypeError, t, None)
self.assertRaises(TypeError, t, "123")
self.assertRaises(TypeError, t, "123", dict={})
self.assertRaises(TypeError, t, "123456789", dict=None)
s = "123456789"
self.assertEqual("".join(t(s)), s)
def test_eviltuple(self):
class Exc(Exception):
pass
# Devious code could crash structseqs' contructors
class C:
def __getitem__(self, i):
raise Exc
def __len__(self):
return 9
self.assertRaises(Exc, time.struct_time, C())
def test_reduce(self):
t = time.gmtime()
x = t.__reduce__()
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
t = time.gmtime()
L = list(t)
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(list(t[start:stop:step]),
L[start:stop:step])
def test_main():
support.run_unittest(StructSeqTest)
if __name__ == "__main__":
test_main()
| apache-2.0 | -5,098,176,806,424,302,000 | 30.085271 | 82 | 0.540399 | false |
dgjustice/ansible | lib/ansible/modules/cloud/ovirt/ovirt_permissions_facts.py | 13 | 4468 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_permissions_facts
short_description: Retrieve facts about one or more oVirt permissions
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt permissions."
notes:
- "This module creates a new top-level C(ovirt_permissions) fact, which
contains a list of permissions."
options:
user_name:
description:
- "Username of the the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
group_name:
description:
- "Name of the the group to manage."
authz_name:
description:
- "Authorization provider of the user/group. In previous versions of oVirt known as domain."
required: true
aliases: ['domain']
namespace:
description:
- "Namespace of the authorization provider, where user/group resides."
required: false
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all permissions of user with username C(john):
- ovirt_permissions_facts:
user_name: john
authz_name: example.com-authz
- debug:
var: ovirt_permissions
'''
RETURN = '''
ovirt_permissions:
description: "List of dictionaries describing the permissions. Permission attribues are mapped to dictionary keys,
all permissions attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/permission."
returned: On success.
type: list
'''
import traceback
try:
import ovirtsdk4 as sdk
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_link_name,
ovirt_facts_full_argument_spec,
search_by_name,
)
def _permissions_service(connection, module):
if module.params['user_name']:
service = connection.system_service().users_service()
entity = search_by_name(service, module.params['user_name'])
else:
service = connection.system_service().groups_service()
entity = search_by_name(service, module.params['group_name'])
if entity is None:
raise Exception("User/Group wasn't found.")
return service.service(entity.id).permissions_service()
def main():
argument_spec = ovirt_facts_full_argument_spec(
authz_name=dict(required=True, aliases=['domain']),
user_name=dict(rdefault=None),
group_name=dict(default=None),
namespace=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
permissions_service = _permissions_service(connection, module)
permissions = []
for p in permissions_service.list():
newperm = dict()
for key, value in p.__dict__.items():
if value and isinstance(value, sdk.Struct):
newperm[key[1:]] = get_link_name(connection, value)
permissions.append(newperm)
module.exit_json(
changed=False,
ansible_facts=dict(ovirt_permissions=permissions),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == '__main__':
main()
| gpl-3.0 | 815,344,931,991,338,400 | 31.143885 | 151 | 0.666517 | false |
karlcow/ymir | ymir/utils/make_post.py | 1 | 5023 | from datetime import datetime
from glob import glob
import locale
from pprint import pprint
import re
import sys
from textwrap import dedent
import mistune
from PIL import Image
ROOT = '/Users/karl/Sites/la-grange.net'
INDENTATION = re.compile(r'\n\s{2,}')
META = re.compile(r'^(\w+):([^\n]*)\n')
PATH = re.compile(r'^.*(\d{4})/(\d{2})/(\d{2})/.*')
TEMPLATE = """date: {date}
prev: {prev}
title: {title}
url: {url}
style: /2019/style
"""
class GrangeRenderer(mistune.HTMLRenderer):
"""Adjusted renderer for La Grange."""
def get_img_size(self, image_path):
"""extract width and height of an image."""
full_path = ROOT + image_path
try:
with Image.open(full_path) as im:
return im.size
except FileNotFoundError as e:
print('TOFIX: Image file path incorrect')
sys.exit(f' {e}')
def image(self, src, alt="", title=None):
width, height = self.get_img_size(src)
if title:
return dedent(f"""
<figure>
<img src="{src}"
alt="{alt}"
width="{width}" height="{height}" />
<figcaption>{title}</figcaption>
</figure>
""")
else:
s = f'<img src="{src}" alt="{alt}" width="{width}" height="{height}" />' # noqa
return s
def paragraph(self, text):
# In case of a figure, we do not want the (non-standard) paragraph.
# david larlet's code idea
if text.strip().startswith("<figure>"):
return text
return f"<p>{text}</p>\n"
def parse(text):
"""Parse the given text into metadata and strip it for a Markdown parser.
:param text: text to be parsed
"""
rv = {}
m = META.match(text)
while m:
key = m.group(1)
value = m.group(2)
value = INDENTATION.sub('\n', value.strip())
if not value:
sys.exit("ERROR: Some meta are missing")
rv[key] = value
text = text[len(m.group(0)):]
m = META.match(text)
return rv, text.lstrip()
def get_draft(entry_path):
"""Read the draft.
It returns a tuple with:
- meta: dict
- text: str
"""
try:
with open(entry_path) as entry:
text = entry.read()
except FileNotFoundError as e:
print('TOFIX: draft file path incorrect')
sys.exit(f' {e}')
else:
return parse(text)
def add_id(html_text):
"""Post process to add certain ids."""
# Add id to links section
html_text = html_text.replace(
'<h2>sur le bord du chemin</h2>',
'<h2 id="links">sur le bord du chemin</h2>')
return html_text
def main():
"""Main workflow."""
locale.setlocale(locale.LC_ALL, 'fr_FR')
import argparse
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument("file_path", type=Path)
p = parser.parse_args()
entry_path = p.file_path
template_path = f'{ROOT}/2019/12/04/article_tmpl.html'
with open(template_path) as tmpfile:
blog_tmp = tmpfile.read()
# Read the draft post
meta, markdown_text = get_draft(entry_path)
pprint(meta)
prev_url = meta['prev']
# Read the previous blog entry
with open(ROOT + prev_url + '.html') as prev_entry:
from bs4 import BeautifulSoup
text_prev = prev_entry.read()
htmldata = BeautifulSoup(text_prev, features="lxml")
prev_title = htmldata.find('title').text
prev_title = prev_title.replace(' - Carnets Web de La Grange', '')
# Meta extraction
# Created
created_timestamp = '{datestr}T23:59:59+09:00'.format(datestr=meta['date'])
d = datetime.fromisoformat(meta['date'])
day = d.day
day_path = f"{d:%d}"
year = d.year
month = f"{d:%m}"
month_name = f"{d:%B}"
# special rendering
renderer = GrangeRenderer()
markdown = mistune.create_markdown(
renderer=renderer, plugins=['strikethrough'], escape=False)
html_text = markdown(markdown_text)
# Post processing of markdown text
html_text = add_id(html_text)
# metadata
metadata = {
'title': meta['title'],
'created_timestamp': created_timestamp,
'day': day,
'year': year,
'month': month,
'month_name': month_name,
'updated_timestamp': created_timestamp,
'updated': meta['date'],
'prev_url': meta['prev'],
'prev_title': prev_title,
'post_text': html_text,
'day_path': day_path,
'url': meta['url'],
'stylepath': meta['style'],
}
blog_post = blog_tmp.format(**metadata)
dest = ROOT + '/{year}/{month}/{day_path}/{url}.html'.format(**metadata)
print(dest)
with open(dest, 'w') as blogpost:
blogpost.write(blog_post)
def extract_date(path):
full_date = PATH.match(path)
return '-'.join(full_date.groups())
if __name__ == "__main__":
main()
| mit | -107,159,644,344,080,420 | 27.867816 | 92 | 0.565598 | false |
alsrgv/tensorflow | tensorflow/contrib/distributions/python/ops/vector_laplace_linear_operator.py | 21 | 11027 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vectorized Laplace distribution class, directly using LinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import laplace
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"VectorLaplaceLinearOperator"
]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
class VectorLaplaceLinearOperator(
transformed_distribution.TransformedDistribution):
"""The vectorization of the Laplace distribution on `R^k`.
The vector laplace distribution is defined over `R^k`, and parameterized by
a (batch of) length-`k` `loc` vector (the means) and a (batch of) `k x k`
`scale` matrix: `covariance = 2 * scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-||y||_1) / Z,
y = inv(scale) @ (x - loc),
Z = 2**k |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||_1` denotes the `l1` norm of `y`, `sum_i |y_i|.
The VectorLaplace distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Laplace(loc=0, scale=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorLaplace` and `Vector` distributions in TensorFlow.
The `VectorLaplace` is a non-standard distribution that has useful properties.
The marginals `Y_1, ..., Y_k` are *not* Laplace random variables, due to
the fact that the sum of Laplace random variables is not Laplace.
Instead, `Y` is a vector whose components are linear combinations of Laplace
random variables. Thus, `Y` lives in the vector space generated by `vectors`
of Laplace distributions. This allows the user to decide the mean and
covariance (by setting `loc` and `scale`), while preserving some properties of
the Laplace distribution. In particular, the tails of `Y_i` will be (up to
polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Laplace random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate VectorLaplace with some desired covariance.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
# Divide scale by sqrt(2) so that the final covariance will be what we want.
vla = tfd.VectorLaplaceLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorLowerTriangular(scale / tf.sqrt(2.)))
# Covariance agrees with cholesky(cov) parameterization.
vla.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an`R^3` observation; return a scalar.
vla.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Vector Laplace's.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vla = tfd.VectorLaplaceLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
vla.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="VectorLaplaceLinearOperator"):
"""Construct Vector Laplace distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = 2 * scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents):
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(VectorLaplaceLinearOperator, self).__init__(
distribution=laplace.Laplace(
loc=array_ops.zeros([], dtype=scale.dtype),
scale=array_ops.ones([], dtype=scale.dtype)),
bijector=bijectors.AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(VectorLaplaceLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(VectorLaplaceLinearOperator, self)._prob(x)
def _mean(self):
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
if self.loc is None:
return array_ops.zeros(shape, self.dtype)
if has_static_shape and shape == self.loc.get_shape():
return array_ops.identity(self.loc)
# Add dummy tensor of zeros to broadcast. This is only necessary if shape
# != self.loc.shape, but we could not determine if this is the case.
return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype)
def _covariance(self):
# Let
# W = (w1,...,wk), with wj ~ iid Laplace(0, 1).
# Then this distribution is
# X = loc + LW,
# and since E[X] = loc,
# Cov(X) = E[LW W^T L^T] = L E[W W^T] L^T.
# Since E[wi wj] = 0 if i != j, and 2 if i == j, we have
# Cov(X) = 2 LL^T
if distribution_util.is_diagonal_scale(self.scale):
return 2. * array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return 2. * self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return 2. * math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
2. * self.scale.matmul(self.scale.to_dense()))
else:
return 2. * array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return np.sqrt(2) * math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
return self._mean()
| apache-2.0 | 851,180,430,821,488,500 | 35.392739 | 80 | 0.64995 | false |
xsynergy510x/android_external_chromium_org | tools/telemetry/telemetry/page/page_set_archive_info.py | 26 | 6409 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import re
import shutil
import tempfile
from telemetry.util import cloud_storage
class PageSetArchiveInfo(object):
def __init__(self, file_path, data, ignore_archive=False):
self._file_path = file_path
self._base_dir = os.path.dirname(file_path)
# Ensure directory exists.
if not os.path.exists(self._base_dir):
os.makedirs(self._base_dir)
# Download all .wpr files.
if not ignore_archive:
for archive_path in data['archives']:
archive_path = self._WprFileNameToPath(archive_path)
try:
cloud_storage.GetIfChanged(archive_path)
except (cloud_storage.CredentialsError, cloud_storage.PermissionError):
if os.path.exists(archive_path):
# If the archive exists, assume the user recorded their own and
# simply warn.
logging.warning('Need credentials to update WPR archive: %s',
archive_path)
# Map from the relative path (as it appears in the metadata file) of the
# .wpr file to a list of page names it supports.
self._wpr_file_to_page_names = data['archives']
# Map from the page name to a relative path (as it appears in the metadata
# file) of the .wpr file.
self._page_name_to_wpr_file = dict()
# Find out the wpr file names for each page.
for wpr_file in data['archives']:
page_names = data['archives'][wpr_file]
for page_name in page_names:
self._page_name_to_wpr_file[page_name] = wpr_file
self.temp_target_wpr_file_path = None
@classmethod
def FromFile(cls, file_path, ignore_archive=False):
if os.path.exists(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
return cls(file_path, data, ignore_archive=ignore_archive)
return cls(file_path, {'archives': {}}, ignore_archive=ignore_archive)
def WprFilePathForPage(self, page):
if self.temp_target_wpr_file_path:
return self.temp_target_wpr_file_path
wpr_file = self._page_name_to_wpr_file.get(page.display_name, None)
if wpr_file is None:
# Some old page sets always use the URL to identify a page rather than the
# display_name, so try to look for that.
wpr_file = self._page_name_to_wpr_file.get(page.url, None)
if wpr_file:
return self._WprFileNameToPath(wpr_file)
return None
def AddNewTemporaryRecording(self, temp_wpr_file_path=None):
if temp_wpr_file_path is None:
temp_wpr_file_handle, temp_wpr_file_path = tempfile.mkstemp()
os.close(temp_wpr_file_handle)
self.temp_target_wpr_file_path = temp_wpr_file_path
def AddRecordedPages(self, pages):
if not pages:
os.remove(self.temp_target_wpr_file_path)
return
(target_wpr_file, target_wpr_file_path) = self._NextWprFileName()
for page in pages:
self._SetWprFileForPage(page.display_name, target_wpr_file)
shutil.move(self.temp_target_wpr_file_path, target_wpr_file_path)
# Update the hash file.
with open(target_wpr_file_path + '.sha1', 'wb') as f:
f.write(cloud_storage.CalculateHash(target_wpr_file_path))
f.flush()
self._WriteToFile()
self._DeleteAbandonedWprFiles()
def _DeleteAbandonedWprFiles(self):
# Update the metadata so that the abandoned wpr files don't have empty page
# name arrays.
abandoned_wpr_files = self._AbandonedWprFiles()
for wpr_file in abandoned_wpr_files:
del self._wpr_file_to_page_names[wpr_file]
# Don't fail if we're unable to delete some of the files.
wpr_file_path = self._WprFileNameToPath(wpr_file)
try:
os.remove(wpr_file_path)
except Exception:
logging.warning('Failed to delete file: %s' % wpr_file_path)
def _AbandonedWprFiles(self):
abandoned_wpr_files = []
for wpr_file, page_names in self._wpr_file_to_page_names.iteritems():
if not page_names:
abandoned_wpr_files.append(wpr_file)
return abandoned_wpr_files
def _WriteToFile(self):
"""Writes the metadata into the file passed as constructor parameter."""
metadata = dict()
metadata['description'] = (
'Describes the Web Page Replay archives for a page set. Don\'t edit by '
'hand! Use record_wpr for updating.')
metadata['archives'] = self._wpr_file_to_page_names.copy()
# Don't write data for abandoned archives.
abandoned_wpr_files = self._AbandonedWprFiles()
for wpr_file in abandoned_wpr_files:
del metadata['archives'][wpr_file]
with open(self._file_path, 'w') as f:
json.dump(metadata, f, indent=4)
f.flush()
def _WprFileNameToPath(self, wpr_file):
return os.path.abspath(os.path.join(self._base_dir, wpr_file))
def _NextWprFileName(self):
"""Creates a new file name for a wpr archive file."""
# The names are of the format "some_thing_number.wpr". Read the numbers.
highest_number = -1
base = None
for wpr_file in self._wpr_file_to_page_names:
match = re.match(r'(?P<BASE>.*)_(?P<NUMBER>[0-9]+)\.wpr', wpr_file)
if not match:
raise Exception('Illegal wpr file name ' + wpr_file)
highest_number = max(int(match.groupdict()['NUMBER']), highest_number)
if base and match.groupdict()['BASE'] != base:
raise Exception('Illegal wpr file name ' + wpr_file +
', doesn\'t begin with ' + base)
base = match.groupdict()['BASE']
if not base:
# If we're creating a completely new info file, use the base name of the
# page set file.
base = os.path.splitext(os.path.basename(self._file_path))[0]
new_filename = '%s_%03d.wpr' % (base, highest_number + 1)
return new_filename, self._WprFileNameToPath(new_filename)
def _SetWprFileForPage(self, page_name, wpr_file):
"""For modifying the metadata when we're going to record a new archive."""
old_wpr_file = self._page_name_to_wpr_file.get(page_name, None)
if old_wpr_file:
self._wpr_file_to_page_names[old_wpr_file].remove(page_name)
self._page_name_to_wpr_file[page_name] = wpr_file
if wpr_file not in self._wpr_file_to_page_names:
self._wpr_file_to_page_names[wpr_file] = []
self._wpr_file_to_page_names[wpr_file].append(page_name)
| bsd-3-clause | -2,591,528,340,399,150,600 | 38.319018 | 80 | 0.662194 | false |
kzky/python-online-machine-learning-library | pa/passive_aggressive_2.py | 1 | 6472 | import numpy as np
import scipy as sp
import logging as logger
import time
import pylab as pl
from collections import defaultdict
from sklearn.metrics import confusion_matrix
class PassiveAggressiveII(object):
"""
Passive Aggressive-II algorithm: squared hinge loss PA.
References:
- http://jmlr.org/papers/volume7/crammer06a/crammer06a.pdf
This model is only applied to binary classification.
"""
def __init__(self, fname, delimiter = " ", C = 1, n_scan = 10):
"""
model initialization.
"""
logger.basicConfig(level=logger.DEBUG)
logger.info("init starts")
self.n_scan = 10
self.data = defaultdict()
self.model = defaultdict()
self.cache = defaultdict()
self._load(fname, delimiter)
self._init_model(C)
logger.info("init finished")
def _load(self, fname, delimiter = " "):
"""
Load data set specified with filename.
data format must be as follows (space-separated file as default),
l_1 x_11 x_12 x_13 ... x_1m
l_2 x_21 x_22 ... x_2m
...
l_n x_n1 x_n2 ... x_nm
l_i must be {1, -1} because of binary classifier.
Arguments:
- `fname`: file name.
- `delimiter`: delimiter of a file.
"""
logger.info("load data starts")
# load data
self.data["data"] = np.loadtxt(fname, delimiter = delimiter)
self.data["n_sample"] = self.data["data"].shape[0]
self.data["f_dim"] = self.data["data"].shape[1] - 1
# binalize
self._binalize(self.data["data"])
# normalize
self.normalize(self.data["data"][:, 1:])
logger.info("load data finished")
def _binalize(self, data):
"""
Binalize label of data.
Arguments:
- `data`: dataset.
"""
logger.info("init starts")
# binary check
labels = data[:, 0]
classes = np.unique(labels)
if classes.size != 2:
print "label must be a binary value."
exit(1)
# convert binary lables to {1, -1}
for i in xrange(labels.size):
if labels[i] == classes[0]:
labels[i] = 1
else:
labels[i] = -1
# set classes
self.data["classes"] = classes
logger.info("init finished")
def normalize(self, samples):
"""
nomalize sample, such that sqrt(x^2) = 1
Arguments:
- `samples`: dataset without labels.
"""
logger.info("normalize starts")
for i in xrange(0, self.data["n_sample"]):
samples[i, :] = self._normalize(samples[i, :])
logger.info("normalize finished")
def _normalize(self, sample):
norm = np.sqrt(sample.dot(sample))
sample = sample/norm
return sample
def _init_model(self, C):
"""
Initialize model.
"""
logger.info("init model starts")
self.model["w"] = np.ndarray(self.data["f_dim"] + 1) # model paremter
self.model["C"] = C # aggressive parameter
logger.info("init model finished")
def _learn(self, ):
"""
Learn internally.
"""
def _update(self, label, sample, margin):
"""
Update model parameter internally.
update rule is as follows,
w = w + y (1 - m)/(||x||_2^2 + C) * x
Arguments:
- `label`: label = {1, -1}
- `sample`: sample, or feature vector
"""
# add bias
sample = self._add_bias(sample)
norm = sample.dot(sample)
w = self.model["w"] + label * (1 - margin)/(norm + self.model["C"]) * sample
self.model["w"] = w
def _predict_value(self, sample):
"""
predict value of \w^T * x
Arguments:
- `sample`:
"""
return self.model["w"].dot(self._add_bias(sample))
def _add_bias(self, sample):
return np.hstack((sample, 1))
def learn(self, ):
"""
Learn.
"""
logger.info("learn starts")
data = self.data["data"]
# learn
for i in xrange(0, self.n_scan):
for i in xrange(0, self.data["n_sample"]):
sample = data[i, 1:]
label = data[i, 0]
pred_val = self._predict_value(sample)
margin = label * pred_val
if margin < 1:
self._update(label, sample, margin)
logger.info("learn finished")
def predict(self, sample):
"""
predict {1, -1} base on \w^T * x
Arguments:
- `sample`:
"""
pred_val = self._predict_value(sample)
self.cache["pred_val"] = pred_val
if pred_val >=0:
return 1
else:
return -1
def update(self, label, sample):
"""
update model.
Arguments:
- `sample`: sample, or feature vector
- `pred_val`: predicted value i.e., w^T * sample
"""
margin = label * self.model["pred_val"]
if margin < 1:
_update(label, sample, margin)
@classmethod
def examplify(cls, fname, delimiter = " ", C = 1 , n_scan = 3):
"""
Example of how to use
"""
# learn
st = time.time()
model = PassiveAggressiveII(fname, delimiter, C , n_scan)
model.learn()
et = time.time()
print "learning time: %f[s]" % (et - st)
# predict (after learning)
data = np.loadtxt(fname, delimiter = " ")
model._binalize(data)
n_sample = data.shape[0]
y_label = data[:, 0]
y_pred = np.ndarray(n_sample)
for i in xrange(0, n_sample):
sample = data[i, 1:]
y_pred[i] = model.predict(sample)
# show result
cm = confusion_matrix(y_label, y_pred)
print cm
print "accurary: %d [%%]" % (np.sum(cm.diagonal()) * 100.0/np.sum(cm))
if __name__ == '__main__':
fname = "/home/kzk/datasets/uci_csv/liver.csv"
#fname = "/home/kzk/datasets/uci_csv/ad.csv"
print "dataset is", fname
PassiveAggressiveII.examplify(fname, delimiter = " ", C = 1, n_scan = 100)
| bsd-3-clause | -8,372,055,500,977,262,000 | 27.13913 | 87 | 0.502936 | false |
divergentdave/inspectors-general | inspectors/nea.py | 2 | 6711 | #!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from utils import utils, inspector, admin
# http://arts.gov/oig
archive = 2005
# options:
# standard since/year options for a year range to fetch from.
# report_id: only bother to process a single report
#
# Notes for IG's web team:
# - Fix MISSING_IDS
AUDIT_REPORTS_URL = "http://arts.gov/oig/reports/audits"
SPECIAL_REVIEWS_URL = "http://arts.gov/oig/reports/specials"
SEMIANNUAL_REPORTS_URL = "http://arts.gov/oig/reports/semi-annual"
PEER_REVIEWS_URL = "http://arts.gov/oig/reports/external-peer-reviews"
FISMA_REPORTS_URL = "http://arts.gov/inspector-general/reports/internal-reviews"
REPORT_URLS = {
"audit": AUDIT_REPORTS_URL,
"evaluation": SPECIAL_REVIEWS_URL,
"semiannual_report": SEMIANNUAL_REPORTS_URL,
"peer_review": PEER_REVIEWS_URL,
"fisma": FISMA_REPORTS_URL,
}
MISSING_IDS = [
"EA-perimeter-security-test-reload",
]
REPORT_PUBLISHED_MAP = {
"2013-Peer-Review": datetime.datetime(2013, 12, 13),
"2010-Peer-Review": datetime.datetime(2010, 8, 30),
"2007-Peer-Review": datetime.datetime(2007, 3, 28),
"mississippi-limited-audit-revised": datetime.datetime(2015, 11, 3),
"maaf-final-report": datetime.datetime(2015, 5, 6),
"louisiana-final-audit": datetime.datetime(2014, 12, 22),
"DCCAH-Final-Report": datetime.datetime(2013, 9, 23),
"MN-State-Arts-Board-LSA": datetime.datetime(2013, 3, 15),
"MTG-LS-redacted": datetime.datetime(2013, 3, 1),
"AMW-LSA-Final-Report": datetime.datetime(2013, 1, 11),
"APAP-LSA-Report-080312": datetime.datetime(2012, 8, 3),
"Illinois-Arts-Council-Report": datetime.datetime(2012, 4, 4),
"American-Samoa": datetime.datetime(2011, 7, 15),
"MSAC_Report_1": datetime.datetime(2011, 7, 25),
"Family-Resources-Evaluation-Report": datetime.datetime(2009, 10, 30),
"Virginia-Commission": datetime.datetime(2009, 8, 12),
"Wisconsin-Arts-Board-Final-Report": datetime.datetime(2009, 6, 15),
"PCA-Final-Report_0": datetime.datetime(2009, 4, 3),
"hrac-final-debarment-report-5-13-2015": datetime.datetime(2015, 5, 13),
"northwest-heritage-resources-final-report": datetime.datetime(2014, 11, 19),
"2015-confluences-final-report": datetime.datetime(2014, 10, 20),
"State-Education-Agency-DIrectors-SCE-07-14": datetime.datetime(2014, 7, 16),
"Academy-of-American-Poets-SCE-7-14": datetime.datetime(2014, 7, 10),
"Lincoln-Center-Final-SCE": datetime.datetime(2014, 5, 28),
"American-Documentary-SCE-14-02": datetime.datetime(2014, 5, 19),
"BRIC-Arts-SCE-3-25-14": datetime.datetime(2014, 3, 25),
"Philadelphia-Orchestra-Association": datetime.datetime(2013, 3, 27),
"Greater-Philadelphia-Alliance": datetime.datetime(2013, 2, 7),
"FA-Report-NFT-Redacted": datetime.datetime(2013, 8, 28),
"mtg-report-disposition-closeout-11-14": datetime.datetime(2013, 6, 5),
"AFTA": datetime.datetime(2012, 9, 4),
"SAH": datetime.datetime(2012, 7, 9),
"APAP-Evaluation": datetime.datetime(2012, 6, 20),
"DCASE": datetime.datetime(2012, 5, 1),
"NBM": datetime.datetime(2011, 10, 24),
"BSO": datetime.datetime(2011, 9, 7),
"DSOHSCE": datetime.datetime(2010, 8, 5),
"Mosaic": datetime.datetime(2010, 4, 30),
"UMS": datetime.datetime(2010, 1, 28),
"gulf-coast-youth-choirs": datetime.datetime(2009, 9, 30),
"michigan-opera-theater": datetime.datetime(2009, 9, 30),
"Florida-Orchestra-Report": datetime.datetime(2009, 9, 28),
"artsandculturalaffairsweb": datetime.datetime(2009, 9, 23),
"Sphinx-Organization": datetime.datetime(2009, 9, 23),
"VirginIslandEvaluationReport": datetime.datetime(2009, 3, 25),
"WoodlandPatternEvaluationReport": datetime.datetime(2008, 10, 8),
"VSAEvaluationReport": datetime.datetime(2008, 10, 7),
"TricklockEvaluationReport": datetime.datetime(2008, 10, 6),
"LosReyesEvaluationReport": datetime.datetime(2008, 10, 2),
"MusicTheatreGroup-Redacted-2008": datetime.datetime(2007, 11, 21),
"LS-16-02-NASAA-Final-Report": datetime.datetime(2016, 2, 29),
"Letter-of-Comment-NEA-01-27-17": datetime.datetime(2017, 1, 27),
"Art-21-Report-SCE-17-01": datetime.datetime(2017, 7, 27),
}
def run(options):
year_range = inspector.year_range(options, archive)
only_report_id = options.get('report_id')
# Pull the reports
for report_type, url in sorted(REPORT_URLS.items()):
doc = utils.beautifulsoup_from_url(url)
results = doc.select("div.field-item li")
if not results:
results = doc.select("div.field-item tr")
if not results:
raise inspector.NoReportsFoundError("National Endowment for the Arts (%s)" % report_type)
for result in results:
report = report_from(result, url, report_type, year_range)
if report:
# debugging convenience: can limit to single report
if only_report_id and (report['report_id'] != only_report_id):
continue
inspector.save_report(report)
def report_from(result, landing_url, report_type, year_range):
link = result.find("a")
if not link:
return
title = link.text
report_url = urljoin(landing_url, link.get('href'))
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
published_on = None
try:
published_on_text = result.select("td")[1].text.strip()
published_on = datetime.datetime.strptime(published_on_text, '%m/%d/%y')
except (ValueError, IndexError):
pass
try:
published_on_text = result.select("td")[1].text.strip()
published_on = datetime.datetime.strptime(published_on_text, '%m/%d/%Y')
except (ValueError, IndexError):
pass
if not published_on:
try:
published_on_text = title.split("-")[-1].split("–")[-1].strip()
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
except ValueError:
pass
if not published_on:
if report_id in REPORT_PUBLISHED_MAP:
published_on = REPORT_PUBLISHED_MAP[report_id]
if not published_on:
admin.log_no_date("nea", report_id, title, report_url)
return
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'nea',
'inspector_url': 'http://arts.gov/oig',
'agency': 'nea',
'agency_name': 'National Endowment for the Arts',
'type': report_type,
'landing_url': landing_url,
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if report_id in MISSING_IDS:
report['unreleased'] = True
report['missing'] = True
report['url'] = None
return report
utils.run(run) if (__name__ == "__main__") else None
| cc0-1.0 | 6,132,775,661,354,363,000 | 36.480447 | 95 | 0.688031 | false |
kiith-sa/QGIS | python/plugins/processing/admintools/httplib2/__init__.py | 43 | 70449 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : November 2006
Copyright : (C) 2012 by Joe Gregorio
Email : joe at bitworking dot org
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer ([email protected])",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.7.6"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import urllib
import base64
import os
import copy
import calendar
import time
import random
import errno
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
try:
import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
RETRIES = 2
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, useful for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-ride this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer ([email protected])"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class AllHosts(object):
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
bypass_hosts = ()
def __init__(self, proxy_type, proxy_host, proxy_port,
proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type = proxy_type
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_rdns = proxy_rdns
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port,
self.proxy_rdns, self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
@classmethod
def from_environment(cls, method='http'):
"""
Read proxy info from the environment variables.
"""
if method not in ['http', 'https']:
return
env_var = method + '_proxy'
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
pi = cls.from_url(url, method)
no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', ''))
bypass_hosts = []
if no_proxy:
bypass_hosts = no_proxy.split(',')
# special case, no_proxy=* means all hosts bypassed
if no_proxy == '*':
bypass_hosts = AllHosts
pi.bypass_hosts = bypass_hosts
return pi
@classmethod
def from_url(cls, url, method='http'):
"""
Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if '@' in url[1]:
ident, host_port = url[1].split('@', 1)
if ':' in ident:
username, password = ident.split(':', 1)
else:
password = ident
else:
host_port = url[1]
if ':' in host_port:
host, port = host_port.split(':', 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
return cls(
proxy_type = proxy_type,
proxy_host = host,
proxy_port = port,
proxy_user = username or None,
proxy_pass = password or None,
)
def applies_to(self, hostname):
return not self.bypass_host(hostname)
def bypass_host(self, hostname):
"""Has this host been excluded from the proxy config"""
if self.bypass_hosts is AllHosts:
return True
bypass = False
for domain in self.bypass_hosts:
if hostname.endswith(domain):
bypass = True
return bypass
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
else:
use_proxy = False
if use_proxy and proxy_rdns:
host = proxy_host
port = proxy_port
else:
host = self.host
port = self.port
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if use_proxy:
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s) ************" % (self.host, self.port)
if use_proxy:
print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
self.sock.connect((self.host, self.port) + sa[2:])
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
else:
use_proxy = False
if use_proxy and proxy_rdns:
host = proxy_host
port = proxy_port
else:
host = self.host
port = self.port
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
host, port, 0, socket.SOCK_STREAM):
try:
if use_proxy:
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
from google.appengine.api.urlfetch import DownloadError
from google.appengine.api.urlfetch import ResponseTooLargeError
from google.appengine.api.urlfetch import SSLCertificateError
class ResponseDict(dict):
"""Is a dictionary that also has a read() method, so
that it can pass itself off as an httlib.HTTPResponse()."""
def read(self):
pass
class AppEngineHttpConnection(object):
"""Emulates an httplib.HTTPConnection object, but actually uses the Google
App Engine urlfetch library. This allows the timeout to be properly used on
Google App Engine, and avoids using httplib, which on Google App Engine is
just another wrapper around urlfetch.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
self.host = host
self.port = port
self.timeout = timeout
if key_file or cert_file or proxy_info or ca_certs:
raise NotSupportedOnThisPlatform()
self.response = None
self.scheme = 'http'
self.validate_certificate = not disable_ssl_certificate_validation
self.sock = True
def request(self, method, url, body, headers):
# Calculate the absolute URI, which fetch requires
netloc = self.host
if self.port:
netloc = '%s:%s' % (self.host, self.port)
absolute_uri = '%s://%s%s' % (self.scheme, netloc, url)
try:
try: # 'body' can be a stream.
body = body.read()
except AttributeError:
pass
response = fetch(absolute_uri, payload=body, method=method,
headers=headers, allow_truncated=False, follow_redirects=False,
deadline=self.timeout,
validate_certificate=self.validate_certificate)
self.response = ResponseDict(response.headers)
self.response['status'] = str(response.status_code)
self.response['reason'] = httplib.responses.get(response.status_code, 'Ok')
self.response.status = response.status_code
setattr(self.response, 'read', lambda : response.content)
# Make sure the exceptions raised match the exceptions expected.
except InvalidURLError:
raise socket.gaierror('')
except (DownloadError, ResponseTooLargeError, SSLCertificateError):
raise httplib.HTTPException()
def getresponse(self):
if self.response:
return self.response
else:
raise httplib.HTTPException()
def set_debuglevel(self, level):
pass
def connect(self):
pass
def close(self):
pass
class AppEngineHttpsConnection(AppEngineHttpConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
AppEngineHttpConnection.__init__(self, host, port, key_file, cert_file,
strict, timeout, proxy_info, ca_certs, disable_ssl_certificate_validation)
self.scheme = 'https'
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except ImportError:
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None,
proxy_info=ProxyInfo.from_environment,
ca_certs=None, disable_ssl_certificate_validation=False):
"""If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
ProxyInfo.from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(RETRIES):
try:
if conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if conn.sock is None:
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i < RETRIES-1:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
conn.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if 'authorization' in headers and not self.forward_authorization_headers:
del headers['authorization']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if scheme == 'https':
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if (hasattr(proxy_info, 'applies_to')
and not proxy_info.applies_to(hostname)):
proxy_info = None
return proxy_info
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key.lower()] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key.lower()] = value
self.status = int(self.get('status', self.status))
self.reason = self.get('reason', self.reason)
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| gpl-2.0 | -188,720,780,688,926,820 | 40.562832 | 235 | 0.579398 | false |
pulilab/django-collectform | docs/conf.py | 1 | 8195 | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import collectform
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-collectform'
copyright = u'2014, Viktor Nagy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = collectform.__version__
# The full version, including alpha/beta/rc tags.
release = collectform.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-collectformdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-collectform.tex', u'django-collectform Documentation',
u'Viktor Nagy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-collectform', u'django-collectform Documentation',
[u'Viktor Nagy'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-collectform', u'django-collectform Documentation',
u'Viktor Nagy', 'django-collectform', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause | -3,650,898,579,533,927,400 | 31.26378 | 80 | 0.708359 | false |
0x90sled/catapult | third_party/beautifulsoup4/scripts/demonstrate_parser_differences.py | 73 | 2976 | """Demonstrate how different parsers parse the same markup.
Beautiful Soup can use any of a number of different parsers. Every
parser should behave more or less the same on valid markup, and
Beautiful Soup's unit tests make sure this is the case. But every
parser handles invalid markup differently. Even different versions of
the same parser handle invalid markup differently. So instead of unit
tests I've created this educational demonstration script.
The file demonstration_markup.txt contains many lines of HTML. This
script tests each line of markup against every parser you have
installed, and prints out how each parser sees that markup. This may
help you choose a parser, or understand why Beautiful Soup presents
your document the way it does.
"""
import os
import sys
from bs4 import BeautifulSoup
parsers = ['html.parser']
try:
from bs4.builder import _lxml
parsers.append('lxml')
except ImportError, e:
pass
try:
from bs4.builder import _html5lib
parsers.append('html5lib')
except ImportError, e:
pass
class Demonstration(object):
def __init__(self, markup):
self.results = {}
self.markup = markup
def run_against(self, *parser_names):
uniform_results = True
previous_output = None
for parser in parser_names:
try:
soup = BeautifulSoup(self.markup, parser)
if markup.startswith("<div>"):
# Extract the interesting part
output = soup.div
else:
output = soup
except Exception, e:
output = "[EXCEPTION] %s" % str(e)
self.results[parser] = output
if previous_output is None:
previous_output = output
elif previous_output != output:
uniform_results = False
return uniform_results
def dump(self):
print "%s: %s" % ("Markup".rjust(13), self.markup.encode("utf8"))
for parser, output in self.results.items():
print "%s: %s" % (parser.rjust(13), output.encode("utf8"))
different_results = []
uniform_results = []
print "= Testing the following parsers: %s =" % ", ".join(parsers)
print
input_file = sys.stdin
if sys.stdin.isatty():
for filename in [
"demonstration_markup.txt",
os.path.join("scripts", "demonstration_markup.txt")]:
if os.path.exists(filename):
input_file = open(filename)
for markup in input_file:
demo = Demonstration(markup.decode("utf8").strip().replace("\\n", "\n"))
is_uniform = demo.run_against(*parsers)
if is_uniform:
uniform_results.append(demo)
else:
different_results.append(demo)
print "== Markup that's handled the same in every parser =="
print
for demo in uniform_results:
demo.dump()
print
print "== Markup that's not handled the same in every parser =="
print
for demo in different_results:
demo.dump()
print
| bsd-3-clause | -5,007,886,513,142,338,000 | 30.326316 | 76 | 0.646841 | false |
rlaverde/scorecard_cps | performance_indicators_project/performance_indicators_project/urls.py | 1 | 2366 | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='index.html'), name='home'),
url(r'^config$', TemplateView.as_view(template_name='edit_models.html'), name='editmodels'),
url(r'^map/$', TemplateView.as_view(template_name='strategic_map.html'), name='map'),
url(r'^accounts/', include('accounts.urls', namespace='accounts')),
url(r'^perspectives/', include('perspectives.urls', namespace='perspectives')),
url(r'^targets/', include('perspectives.urls_target', namespace='targets')),
url(r'^initiatives/', include('perspectives.urls_initiative', namespace='initiatives')),
url(r'^resources/', include('perspectives.urls_resource', namespace='resources')),
url(r'^incharge/', include('perspectives.urls_incharge', namespace='incharge')),
url(r'^committee/', include('perspectives.urls_committee', namespace='committees')),
url(r'^indicators/', include('indicators.urls', namespace='indicators')),
url(r'^main_indicators/', include('indicators.urls_main_indicator', namespace='main_indicators')),
url(r'^parameters/', include('indicators.urls_parameter', namespace='parameters')),
url(r'^periods/', include('periods.urls', namespace='periods')),
url(r'^reports/', include('periods.urls_report', namespace='reports')),
# Examples:
# url(r'^$', 'performance_indicators_project.views.home', name='home'),
# url(r'^performance_indicators_project/', include('performance_indicators_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| gpl-3.0 | -5,547,789,848,119,740,000 | 45.392157 | 106 | 0.684277 | false |
AlMikFox3/Ciphers | VignereCipher/vignere_cipher.py | 1 | 1867 | import time
import random
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def encrypt():
fname = input('Enter the name/path of the file to be encrypted : ')
f = open(fname, 'r')
msg = f.read()
f.close()
#key = input ('Enter Security Key (character string) for encryption :')
key = ''
kl = random.randint(10,17)
for i in range (0,kl):
n = random.randint(0,25)
key+= LETTERS[n]
keyf = ((str(time.ctime())).replace(' ','_')).replace(':','_')
f = open('key' + keyf,'w')
f.write(str(key))
print ('key_'+keyf+" generated....")
enc = ''
keyindex = 0
for symbol in msg :
num = LETTERS.find(symbol.upper())
if num != -1 :
num += LETTERS.find(key[keyindex])
num = num % len(LETTERS)
enc += LETTERS[num]
keyindex += 1
if (keyindex == len(key)):
keyindex = 0
else :
enc += symbol
keyf = ((str(time.ctime())).replace(' ','_')).replace(':','_')
f = open('enc' + keyf,'w')
f.write(str(enc))
f.close()
print ('ENCRYPTION SUCCESSFUL ! enc_'+keyf+" generated....")
def decrypt():
fname = input('Enter the name/path of the file to be decrypted : ')
f = open(fname, 'r')
msg = f.read()
f.close()
key = input ('Enter Security Key (character string) for decryption :')
enc = ''
keyindex = 0
for symbol in msg :
num = LETTERS.find(symbol.upper())
if num != -1 :
num -= LETTERS.find(key[keyindex])
num = num % len(LETTERS)
enc += LETTERS[num]
keyindex += 1
if (keyindex == len(key)):
keyindex = 0
else :
enc += symbol
keyf = ((str(time.ctime())).replace(' ','_')).replace(':','_')
f = open('dec' + keyf,'w')
f.write(str(enc))
f.close()
print ('DECRYPTION SUCCESSFUL ! dec_'+keyf+" generated....")
mode = input ('E/D ? - ')
if(mode == 'E' or mode == 'e'):
encrypt()
elif (mode == 'D' or mode == 'd'):
decrypt() | mit | -3,632,193,124,106,009,000 | 21.961538 | 72 | 0.557043 | false |
lamdnhan/osf.io | website/addons/github/views/config.py | 37 | 2946 | # -*- coding: utf-8 -*-
import httplib as http
from flask import request
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_permission
from website.project.decorators import must_not_be_registration
from website.project.decorators import must_have_addon
from ..api import GitHub
@must_be_logged_in
def github_set_user_config(**kwargs):
return {}
@must_have_permission('write')
@must_have_addon('github', 'node')
@must_not_be_registration
def github_set_config(**kwargs):
auth = kwargs['auth']
user = auth.user
node_settings = kwargs['node_addon']
node = node_settings.owner
user_settings = node_settings.user_settings
# If authorized, only owner can change settings
if user_settings and user_settings.owner != user:
raise HTTPError(http.BAD_REQUEST)
# Parse request
github_user_name = request.json.get('github_user', '')
github_repo_name = request.json.get('github_repo', '')
# Verify that repo exists and that user can access
connection = GitHub.from_settings(user_settings)
repo = connection.repo(github_user_name, github_repo_name)
if repo is None:
if user_settings:
message = (
'Cannot access repo. Either the repo does not exist '
'or your account does not have permission to view it.'
)
else:
message = (
'Cannot access repo.'
)
return {'message': message}, http.BAD_REQUEST
if not github_user_name or not github_repo_name:
raise HTTPError(http.BAD_REQUEST)
changed = (
github_user_name != node_settings.user or
github_repo_name != node_settings.repo
)
# Update hooks
if changed:
# Delete existing hook, if any
node_settings.delete_hook()
# Update node settings
node_settings.user = github_user_name
node_settings.repo = github_repo_name
# Log repo select
node.add_log(
action='github_repo_linked',
params={
'project': node.parent_id,
'node': node._id,
'github': {
'user': github_user_name,
'repo': github_repo_name,
}
},
auth=auth,
)
# Add new hook
if node_settings.user and node_settings.repo:
node_settings.add_hook(save=False)
node_settings.save()
return {}
@must_have_permission('write')
@must_have_addon('github', 'node')
def github_set_privacy(**kwargs):
github = kwargs['node_addon']
private = request.form.get('private')
if private is None:
raise HTTPError(http.BAD_REQUEST)
connection = GitHub.from_settings(github.user_settings)
connection.set_privacy(github.user, github.repo, private)
| apache-2.0 | 5,729,601,981,306,867,000 | 25.781818 | 70 | 0.61575 | false |
vk-brain/sketal | plugins/outsource/outsource_emotions_detector.py | 2 | 4569 | from handler.base_plugin import CommandPlugin
import aiohttp, json, time
class EmotionsDetectorPlugin(CommandPlugin):
__slots__ = ("key", "dirt", "clean_time", "requests_amount", "time_delta")
def __init__(self, *commands, prefixes=None, strict=False, key=None, time_delta=60, requests_amount=15):
"""Answers with results of detecting emotions on sent image."""
if not key:
raise AttributeError("You didn't specified key! You can get it here: https://azure.microsoft.com/ru-ru/services/cognitive-services/face/")
if not commands:
commands = ("эмоции",)
super().__init__(*commands, prefixes=prefixes, strict=strict)
self.key = key
self.dirt = 0
self.time_delta = time_delta
self.clean_time = time.time() + time_delta
self.requests_amount = requests_amount
self.description = [f"Детектор эмоций",
f"{self.command_example()} - распознать эмоции на лице'."]
async def process_message(self, msg):
if self.dirt >= self.requests_amount:
if time.time() >= self.clean_time:
self.dirt = 0
self.clean_time = time.time() + self.time_delta
else:
return await msg.answer('Лимит запросов исчерпан! Попробуйте через минуту или две.')
photo = False
for k, v in msg.brief_attaches.items():
if '_type' in k and v == "photo":
photo = True
break
if not photo:
return await msg.answer('Вы не прислали фото!')
attach = (await msg.get_full_attaches())[0]
if not attach.url:
return await msg.answer('Вы не прислали фото!')
uri_base = 'https://westcentralus.api.cognitive.microsoft.com'
# Request headers.
headers = {'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': self.key}
params = {'returnFaceId': 'true', 'returnFaceLandmarks': 'false', 'returnFaceAttributes': 'age,gender,emotion'}
body = {'url': attach.url}
try: # Execute the REST API call and get the response.
self.dirt += 1
async with aiohttp.ClientSession() as sess:
async with sess.post(uri_base + '/face/v1.0/detect', data=None, json=body, headers=headers, params=params) as resp:
response = await resp.text()
parsed = json.loads(response)
answer = ""
for i, e in enumerate(parsed):
age = e["faceAttributes"]["age"]
sex = "женский" if e["faceAttributes"]['gender'] == "female" else "мужской"
fear = e["faceAttributes"]["emotion"]["fear"]
anger = e["faceAttributes"]["emotion"]["anger"]
contempt = e["faceAttributes"]["emotion"]["contempt"]
disgust = e["faceAttributes"]["emotion"]["disgust"]
happiness = e["faceAttributes"]["emotion"]["happiness"]
neutral = e["faceAttributes"]["emotion"]["neutral"]
sadness = e["faceAttributes"]["emotion"]["sadness"]
surprise = e["faceAttributes"]["emotion"]["surprise"]
answer += f"Анализ фотографии (лицо #{i + 1})\n💁♂️Возраст: {age}\n👫Пол: {sex}\n😵Страх: {fear}\n😤Злость: {anger}\n" \
f"😐Презрение: {contempt}\n🤢Отвращение: {disgust}\n🙂Счастье: {happiness}\n" \
f"😶Нейтральность: {neutral}\n😔Грусть: {sadness}\n😯Удивление: {surprise}\n\n"
if not answer:
raise ValueError("No answer")
return await msg.answer(answer)
except TypeError:
return await msg.answer(chat_id=msg.chat_id, message="Ошибочка! Наверное, мой ключ доступа перестал работать.")
except ValueError:
pass
except Exception as e:
import traceback
traceback.print_exc()
await msg.answer(chat_id=msg.chat_id, message="Не удалось обнаружить лицо на фотографии")
| mit | -1,500,214,486,721,104,000 | 40.792079 | 150 | 0.551528 | false |
joerg84/arangodb | 3rdParty/V8/v5.7.0.0/tools/gyp/test/mac/gyptest-app.py | 34 | 4074 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are built correctly.
"""
import TestGyp
import TestMac
import os
import plistlib
import subprocess
import sys
if sys.platform in ('darwin', 'win32'):
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
def CheckFileXMLPropertyList(file):
output = subprocess.check_output(['file', file])
# The double space after XML is intentional.
if not 'XML document text' in output:
print 'File: Expected XML document text, got %s' % output
test.fail_test()
def ExpectEq(expected, actual):
if expected != actual:
print >>sys.stderr, 'Expected "%s", got "%s"' % (expected, actual)
test.fail_test()
def ls(path):
'''Returns a list of all files in a directory, relative to the directory.'''
result = []
for dirpath, _, files in os.walk(path):
for f in files:
result.append(os.path.join(dirpath, f)[len(path) + 1:])
return result
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='app-bundle')
test.build('test.gyp', test.ALL, chdir='app-bundle')
# Binary
test.built_file_must_exist('Test App Gyp.app/Contents/MacOS/Test App Gyp',
chdir='app-bundle')
# Info.plist
info_plist = test.built_file_path('Test App Gyp.app/Contents/Info.plist',
chdir='app-bundle')
test.must_exist(info_plist)
test.must_contain(info_plist, 'com.google.Test-App-Gyp') # Variable expansion
test.must_not_contain(info_plist, '${MACOSX_DEPLOYMENT_TARGET}');
CheckFileXMLPropertyList(info_plist)
if test.format != 'make':
# TODO: Synthesized plist entries aren't hooked up in the make generator.
machine = subprocess.check_output(['sw_vers', '-buildVersion']).rstrip('\n')
plist = plistlib.readPlist(info_plist)
ExpectEq(machine, plist['BuildMachineOSBuild'])
# Prior to Xcode 5.0.0, SDKROOT (and thus DTSDKName) was only defined if
# set in the Xcode project file. Starting with that version, it is always
# defined.
expected = ''
if TestMac.Xcode.Version() >= '0500':
version = TestMac.Xcode.SDKVersion()
expected = 'macosx' + version
ExpectEq(expected, plist['DTSDKName'])
sdkbuild = TestMac.Xcode.SDKBuild()
if not sdkbuild:
# Above command doesn't work in Xcode 4.2.
sdkbuild = plist['BuildMachineOSBuild']
ExpectEq(sdkbuild, plist['DTSDKBuild'])
ExpectEq(TestMac.Xcode.Version(), plist['DTXcode'])
ExpectEq(TestMac.Xcode.Build(), plist['DTXcodeBuild'])
# Resources
strings_files = ['InfoPlist.strings', 'utf-16be.strings', 'utf-16le.strings']
for f in strings_files:
strings = test.built_file_path(
os.path.join('Test App Gyp.app/Contents/Resources/English.lproj', f),
chdir='app-bundle')
test.must_exist(strings)
# Xcodes writes UTF-16LE with BOM.
contents = open(strings, 'rb').read()
if not contents.startswith('\xff\xfe' + '/* Localized'.encode('utf-16le')):
test.fail_test()
test.built_file_must_exist(
'Test App Gyp.app/Contents/Resources/English.lproj/MainMenu.nib',
chdir='app-bundle')
# Packaging
test.built_file_must_exist('Test App Gyp.app/Contents/PkgInfo',
chdir='app-bundle')
test.built_file_must_match('Test App Gyp.app/Contents/PkgInfo', 'APPLause',
chdir='app-bundle')
# Check that no other files get added to the bundle.
if set(ls(test.built_file_path('Test App Gyp.app', chdir='app-bundle'))) != \
set(['Contents/MacOS/Test App Gyp',
'Contents/Info.plist',
'Contents/Resources/English.lproj/MainMenu.nib',
'Contents/PkgInfo',
] +
[os.path.join('Contents/Resources/English.lproj', f)
for f in strings_files]):
test.fail_test()
test.pass_test()
| apache-2.0 | -6,283,517,442,637,507,000 | 32.95 | 80 | 0.65513 | false |
georgestarcher/TA-SyncKVStore | bin/ta_synckvstore/solnlib/packages/splunklib/searchcommands/search_command.py | 4 | 38354 | # coding=utf-8
#
# Copyright © 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
# Absolute imports
from ..client import Service
from collections import namedtuple
try:
from collections import OrderedDict # must be python 2.7
except ImportError:
from ..ordereddict import OrderedDict
from copy import deepcopy
from cStringIO import StringIO
from itertools import chain, ifilter, imap, islice, izip
from logging import _levelNames, getLevelName, getLogger
try:
from shutil import make_archive
except ImportError:
# Used for recording, skip on python 2.6
pass
from time import time
from urllib import unquote
from urlparse import urlsplit
from warnings import warn
from xml.etree import ElementTree
import os
import sys
import re
import csv
import tempfile
import traceback
# Relative imports
from . internals import (
CommandLineParser,
CsvDialect,
InputHeader,
Message,
MetadataDecoder,
MetadataEncoder,
ObjectView,
Recorder,
RecordWriterV1,
RecordWriterV2,
json_encode_string)
from . import Boolean, Option, environment
# ----------------------------------------------------------------------------------------------------------------------
# P1 [ ] TODO: Log these issues against ChunkedExternProcessor
#
# 1. Implement requires_preop configuration setting.
# This configuration setting is currently rejected by ChunkedExternProcessor.
#
# 2. Rename type=events as type=eventing for symmetry with type=reporting and type=streaming
# Eventing commands process records on the events pipeline. This change effects ChunkedExternProcessor.cpp,
# eventing_command.py, and generating_command.py.
#
# 3. For consistency with SCPV1, commands.conf should not require filename setting when chunked = true
# The SCPV1 processor uses <stanza-name>.py as the default filename. The ChunkedExternProcessor should do the same.
# P1 [ ] TODO: Verify that ChunkedExternProcessor complains if a streaming_preop has a type other than 'streaming'
# It once looked like sending type='reporting' for the streaming_preop was accepted.
# ----------------------------------------------------------------------------------------------------------------------
# P2 [ ] TODO: Consider bumping None formatting up to Option.Item.__str__
class SearchCommand(object):
""" Represents a custom search command.
"""
def __init__(self):
# Variables that may be used, but not altered by derived classes
class_name = self.__class__.__name__
self._logger, self._logging_configuration = getLogger(class_name), environment.logging_configuration
# Variables backing option/property values
self._configuration = self.ConfigurationSettings(self)
self._input_header = InputHeader()
self._fieldnames = None
self._finished = None
self._metadata = None
self._options = None
self._protocol_version = None
self._search_results_info = None
self._service = None
# Internal variables
self._default_logging_level = self._logger.level
self._record_writer = None
self._records = None
def __str__(self):
text = ' '.join(chain((type(self).name, str(self.options)), [] if self.fieldnames is None else self.fieldnames))
return text
# region Options
@Option
def logging_configuration(self):
""" **Syntax:** logging_configuration=<path>
**Description:** Loads an alternative logging configuration file for
a command invocation. The logging configuration file must be in Python
ConfigParser-format. Path names are relative to the app root directory.
"""
return self._logging_configuration
@logging_configuration.setter
def logging_configuration(self, value):
self._logger, self._logging_configuration = environment.configure_logging(self.__class__.__name__, value)
@Option
def logging_level(self):
""" **Syntax:** logging_level=[CRITICAL|ERROR|WARNING|INFO|DEBUG|NOTSET]
**Description:** Sets the threshold for the logger of this command invocation. Logging messages less severe than
`logging_level` will be ignored.
"""
return getLevelName(self._logger.getEffectiveLevel())
@logging_level.setter
def logging_level(self, value):
if value is None:
value = self._default_logging_level
if isinstance(value, (bytes, unicode)):
try:
level = _levelNames[value.upper()]
except KeyError:
raise ValueError('Unrecognized logging level: {}'.format(value))
else:
try:
level = int(value)
except ValueError:
raise ValueError('Unrecognized logging level: {}'.format(value))
self._logger.setLevel(level)
record = Option(doc='''
**Syntax: record=<bool>
**Description:** When `true`, records the interaction between the command and splunkd. Defaults to `false`.
''', default=False, validate=Boolean())
show_configuration = Option(doc='''
**Syntax:** show_configuration=<bool>
**Description:** When `true`, reports command configuration as an informational message. Defaults to `false`.
''', default=False, validate=Boolean())
# endregion
# region Properties
@property
def configuration(self):
""" Returns the configuration settings for this command.
"""
return self._configuration
@property
def fieldnames(self):
""" Returns the fieldnames specified as argument to this command.
"""
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
@property
def input_header(self):
""" Returns the input header for this command.
:return: The input header for this command.
:rtype: InputHeader
"""
warn(
'SearchCommand.input_header is deprecated and will be removed in a future release. '
'Please use SearchCommand.metadata instead.', DeprecationWarning, 2)
return self._input_header
@property
def logger(self):
""" Returns the logger for this command.
:return: The logger for this command.
:rtype:
"""
return self._logger
@property
def metadata(self):
return self._metadata
@property
def options(self):
""" Returns the options specified as argument to this command.
"""
if self._options is None:
self._options = Option.View(self)
return self._options
@property
def protocol_version(self):
return self._protocol_version
@property
def search_results_info(self):
""" Returns the search results info for this command invocation.
The search results info object is created from the search results info file associated with the command
invocation.
:return: Search results info:const:`None`, if the search results info file associated with the command
invocation is inaccessible.
:rtype: SearchResultsInfo or NoneType
"""
if self._search_results_info is not None:
return self._search_results_info
if self._protocol_version == 1:
try:
path = self._input_header['infoPath']
except KeyError:
return None
else:
assert self._protocol_version == 2
try:
dispatch_dir = self._metadata.searchinfo.dispatch_dir
except AttributeError:
return None
path = os.path.join(dispatch_dir, 'info.csv')
try:
with open(path, 'rb') as f:
reader = csv.reader(f, dialect=CsvDialect)
fields = reader.next()
values = reader.next()
except IOError as error:
if error.errno == 2:
self.logger.error('Search results info file {} does not exist.'.format(json_encode_string(path)))
return
raise
def convert_field(field):
return (field[1:] if field[0] == '_' else field).replace('.', '_')
decode = MetadataDecoder().decode
def convert_value(value):
try:
return decode(value) if len(value) > 0 else value
except ValueError:
return value
info = ObjectView(dict(imap(lambda (f, v): (convert_field(f), convert_value(v)), izip(fields, values))))
try:
count_map = info.countMap
except AttributeError:
pass
else:
count_map = count_map.split(';')
n = len(count_map)
info.countMap = dict(izip(islice(count_map, 0, n, 2), islice(count_map, 1, n, 2)))
try:
msg_type = info.msgType
msg_text = info.msg
except AttributeError:
pass
else:
messages = ifilter(lambda (t, m): t or m, izip(msg_type.split('\n'), msg_text.split('\n')))
info.msg = [Message(message) for message in messages]
del info.msgType
try:
info.vix_families = ElementTree.fromstring(info.vix_families)
except AttributeError:
pass
self._search_results_info = info
return info
@property
def service(self):
""" Returns a Splunk service object for this command invocation or None.
The service object is created from the Splunkd URI and authentication token passed to the command invocation in
the search results info file. This data is not passed to a command invocation by default. You must request it by
specifying this pair of configuration settings in commands.conf:
.. code-block:: python
enableheader = true
requires_srinfo = true
The :code:`enableheader` setting is :code:`true` by default. Hence, you need not set it. The
:code:`requires_srinfo` setting is false by default. Hence, you must set it.
:return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both
:code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
of :code:`None` is returned.
"""
if self._service is not None:
return self._service
metadata = self._metadata
if metadata is None:
return None
try:
searchinfo = self._metadata.searchinfo
except AttributeError:
return None
splunkd_uri = searchinfo.splunkd_uri
if splunkd_uri is None:
return None
uri = urlsplit(splunkd_uri, allow_fragments=False)
self._service = Service(
scheme=uri.scheme, host=uri.hostname, port=uri.port, app=searchinfo.app, token=searchinfo.session_key)
return self._service
# endregion
# region Methods
def error_exit(self, error, message=None):
self.write_error(error.message if message is None else message)
self.logger.error('Abnormal exit: %s', error)
exit(1)
def finish(self):
""" Flushes the output buffer and signals that this command has finished processing data.
:return: :const:`None`
"""
self._record_writer.flush(finished=True)
def flush(self):
""" Flushes the output buffer.
:return: :const:`None`
"""
self._record_writer.flush(partial=True)
def prepare(self):
""" Prepare for execution.
This method should be overridden in search command classes that wish to examine and update their configuration
or option settings prior to execution. It is called during the getinfo exchange before command metadata is sent
to splunkd.
:return: :const:`None`
:rtype: NoneType
"""
pass
def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
""" Process data.
:param argv: Command line arguments.
:type argv: list or tuple
:param ifile: Input data file.
:type ifile: file
:param ofile: Output data file.
:type ofile: file
:return: :const:`None`
:rtype: NoneType
"""
if len(argv) > 1:
self._process_protocol_v1(argv, ifile, ofile)
else:
self._process_protocol_v2(argv, ifile, ofile)
def _map_input_header(self):
metadata = self._metadata
searchinfo = metadata.searchinfo
self._input_header.update(
allowStream=None,
infoPath=os.path.join(searchinfo.dispatch_dir, 'info.csv'),
keywords=None,
preview=metadata.preview,
realtime=searchinfo.earliest_time != 0 and searchinfo.latest_time != 0,
search=searchinfo.search,
sid=searchinfo.sid,
splunkVersion=searchinfo.splunk_version,
truncated=None)
def _map_metadata(self, argv):
source = SearchCommand._MetadataSource(argv, self._input_header, self.search_results_info)
def _map(metadata_map):
metadata = {}
for name, value in metadata_map.iteritems():
if isinstance(value, dict):
value = _map(value)
else:
transform, extract = value
if extract is None:
value = None
else:
value = extract(source)
if not (value is None or transform is None):
value = transform(value)
metadata[name] = value
return ObjectView(metadata)
self._metadata = _map(SearchCommand._metadata_map)
_metadata_map = {
'action':
(lambda v: 'getinfo' if v == '__GETINFO__' else 'execute' if v == '__EXECUTE__' else None, lambda s: s.argv[1]),
'preview':
(bool, lambda s: s.input_header.get('preview')),
'searchinfo': {
'app':
(lambda v: v.ppc_app, lambda s: s.search_results_info),
'args':
(None, lambda s: s.argv),
'dispatch_dir':
(os.path.dirname, lambda s: s.input_header.get('infoPath')),
'earliest_time':
(lambda v: float(v.rt_earliest) if len(v.rt_earliest) > 0 else 0.0, lambda s: s.search_results_info),
'latest_time':
(lambda v: float(v.rt_latest) if len(v.rt_latest) > 0 else 0.0, lambda s: s.search_results_info),
'owner':
(None, None),
'raw_args':
(None, lambda s: s.argv),
'search':
(unquote, lambda s: s.input_header.get('search')),
'session_key':
(lambda v: v.auth_token, lambda s: s.search_results_info),
'sid':
(None, lambda s: s.input_header.get('sid')),
'splunk_version':
(None, lambda s: s.input_header.get('splunkVersion')),
'splunkd_uri':
(lambda v: v.splunkd_uri, lambda s: s.search_results_info),
'username':
(lambda v: v.ppc_user, lambda s: s.search_results_info)}}
_MetadataSource = namedtuple(b'Source', (b'argv', b'input_header', b'search_results_info'))
def _prepare_protocol_v1(self, argv, ifile, ofile):
debug = environment.splunklib_logger.debug
# Provide as much context as possible in advance of parsing the command line and preparing for execution
self._input_header.read(ifile)
self._protocol_version = 1
self._map_metadata(argv)
debug(' metadata=%r, input_header=%r', self._metadata, self._input_header)
try:
tempfile.tempdir = self._metadata.searchinfo.dispatch_dir
except AttributeError:
raise RuntimeError('{}.metadata.searchinfo.dispatch_dir is undefined'.format(self.__class__.__name__))
debug(' tempfile.tempdir=%r', tempfile.tempdir)
CommandLineParser.parse(self, argv[2:])
self.prepare()
if self.record:
self.record = False
record_argv = [argv[0], argv[1], str(self._options), ' '.join(self.fieldnames)]
ifile, ofile = self._prepare_recording(record_argv, ifile, ofile)
self._record_writer.ofile = ofile
ifile.record(str(self._input_header), '\n\n')
if self.show_configuration:
self.write_info(self.name + ' command configuration: ' + str(self._configuration))
return ifile # wrapped, if self.record is True
def _prepare_recording(self, argv, ifile, ofile):
# Create the recordings directory, if it doesn't already exist
recordings = os.path.join(environment.splunk_home, 'var', 'run', 'splunklib.searchcommands', 'recordings')
if not os.path.isdir(recordings):
os.makedirs(recordings)
# Create input/output recorders from ifile and ofile
recording = os.path.join(recordings, self.__class__.__name__ + '-' + repr(time()) + '.' + self._metadata.action)
ifile = Recorder(recording + '.input', ifile)
ofile = Recorder(recording + '.output', ofile)
# Archive the dispatch directory--if it exists--so that it can be used as a baseline in mocks)
dispatch_dir = self._metadata.searchinfo.dispatch_dir
if dispatch_dir is not None: # __GETINFO__ action does not include a dispatch_dir
root_dir, base_dir = os.path.split(dispatch_dir)
make_archive(recording + '.dispatch_dir', 'gztar', root_dir, base_dir, logger=self.logger)
# Save a splunk command line because it is useful for developing tests
with open(recording + '.splunk_cmd', 'wb') as f:
f.write('splunk cmd python '.encode())
f.write(os.path.basename(argv[0]).encode())
for arg in islice(argv, 1, len(argv)):
f.write(' '.encode())
f.write(arg.encode())
return ifile, ofile
def _process_protocol_v1(self, argv, ifile, ofile):
debug = environment.splunklib_logger.debug
class_name = self.__class__.__name__
debug('%s.process started under protocol_version=1', class_name)
self._record_writer = RecordWriterV1(ofile)
# noinspection PyBroadException
try:
if argv[1] == '__GETINFO__':
debug('Writing configuration settings')
ifile = self._prepare_protocol_v1(argv, ifile, ofile)
self._record_writer.write_record(dict(
(n, ','.join(v) if isinstance(v, (list, tuple)) else v) for n, v in self._configuration.iteritems()))
self.finish()
elif argv[1] == '__EXECUTE__':
debug('Executing')
ifile = self._prepare_protocol_v1(argv, ifile, ofile)
self._records = self._records_protocol_v1
self._metadata.action = 'execute'
self._execute(ifile, None)
else:
message = (
'Command {0} appears to be statically configured for search command protocol version 1 and static '
'configuration is unsupported by splunklib.searchcommands. Please ensure that '
'default/commands.conf contains this stanza:\n'
'[{0}]\n'
'filename = {1}\n'
'enableheader = true\n'
'outputheader = true\n'
'requires_srinfo = true\n'
'supports_getinfo = true\n'
'supports_multivalues = true\n'
'supports_rawargs = true'.format(self.name, os.path.basename(argv[0])))
raise RuntimeError(message)
except (SyntaxError, ValueError) as error:
self.write_error(unicode(error))
self.flush()
exit(0)
except SystemExit:
self.flush()
raise
except:
self._report_unexpected_error()
self.flush()
exit(1)
debug('%s.process finished under protocol_version=1', class_name)
def _process_protocol_v2(self, argv, ifile, ofile):
""" Processes records on the `input stream optionally writing records to the output stream.
:param ifile: Input file object.
:type ifile: file or InputType
:param ofile: Output file object.
:type ofile: file or OutputType
:return: :const:`None`
"""
debug = environment.splunklib_logger.debug
class_name = self.__class__.__name__
debug('%s.process started under protocol_version=2', class_name)
self._protocol_version = 2
# Read search command metadata from splunkd
# noinspection PyBroadException
try:
debug('Reading metadata')
metadata, body = self._read_chunk(ifile)
action = getattr(metadata, 'action', None)
if action != 'getinfo':
raise RuntimeError('Expected getinfo action, not {}'.format(action))
if len(body) > 0:
raise RuntimeError('Did not expect data for getinfo action')
self._metadata = deepcopy(metadata)
searchinfo = self._metadata.searchinfo
searchinfo.earliest_time = float(searchinfo.earliest_time)
searchinfo.latest_time = float(searchinfo.latest_time)
searchinfo.search = unquote(searchinfo.search)
self._map_input_header()
debug(' metadata=%r, input_header=%r', self._metadata, self._input_header)
try:
tempfile.tempdir = self._metadata.searchinfo.dispatch_dir
except AttributeError:
raise RuntimeError('%s.metadata.searchinfo.dispatch_dir is undefined'.format(class_name))
debug(' tempfile.tempdir=%r', tempfile.tempdir)
except:
self._record_writer = RecordWriterV2(ofile)
self._report_unexpected_error()
self.finish()
exit(1)
# Write search command configuration for consumption by splunkd
# noinspection PyBroadException
try:
self._record_writer = RecordWriterV2(ofile, getattr(self._metadata, 'maxresultrows', None))
self.fieldnames = []
self.options.reset()
args = self.metadata.searchinfo.args
error_count = 0
debug('Parsing arguments')
if args and type(args) == list:
for arg in args:
result = arg.split('=', 1)
if len(result) == 1:
self.fieldnames.append(result[0])
else:
name, value = result
try:
option = self.options[name]
except KeyError:
self.write_error('Unrecognized option: {}={}'.format(name, value))
error_count += 1
continue
try:
option.value = value
except ValueError:
self.write_error('Illegal value: {}={}'.format(name, value))
error_count += 1
continue
missing = self.options.get_missing()
if missing is not None:
if len(missing) == 1:
self.write_error('A value for "{}" is required'.format(missing[0]))
else:
self.write_error('Values for these required options are missing: {}'.format(', '.join(missing)))
error_count += 1
if error_count > 0:
exit(1)
debug(' command: %s', unicode(self))
debug('Preparing for execution')
self.prepare()
if self.record:
ifile, ofile = self._prepare_recording(argv, ifile, ofile)
self._record_writer.ofile = ofile
# Record the metadata that initiated this command after removing the record option from args/raw_args
info = self._metadata.searchinfo
for attr in 'args', 'raw_args':
setattr(info, attr, [arg for arg in getattr(info, attr) if not arg.startswith('record=')])
metadata = MetadataEncoder().encode(self._metadata)
ifile.record('chunked 1.0,', unicode(len(metadata)), ',0\n', metadata)
if self.show_configuration:
self.write_info(self.name + ' command configuration: ' + str(self._configuration))
debug(' command configuration: %s', self._configuration)
except SystemExit:
self._record_writer.write_metadata(self._configuration)
self.finish()
raise
except:
self._record_writer.write_metadata(self._configuration)
self._report_unexpected_error()
self.finish()
exit(1)
self._record_writer.write_metadata(self._configuration)
# Execute search command on data passing through the pipeline
# noinspection PyBroadException
try:
debug('Executing under protocol_version=2')
self._records = self._records_protocol_v2
self._metadata.action = 'execute'
self._execute(ifile, None)
except SystemExit:
self.finish()
raise
except:
self._report_unexpected_error()
self.finish()
exit(1)
debug('%s.process completed', class_name)
def write_debug(self, message, *args):
self._record_writer.write_message('DEBUG', message, *args)
def write_error(self, message, *args):
self._record_writer.write_message('ERROR', message, *args)
def write_fatal(self, message, *args):
self._record_writer.write_message('FATAL', message, *args)
def write_info(self, message, *args):
self._record_writer.write_message('INFO', message, *args)
def write_warning(self, message, *args):
self._record_writer.write_message('WARN', message, *args)
def write_metric(self, name, value):
""" Writes a metric that will be added to the search inspector.
:param name: Name of the metric.
:type name: basestring
:param value: A 4-tuple containing the value of metric :param:`name` where
value[0] = Elapsed seconds or :const:`None`.
value[1] = Number of invocations or :const:`None`.
value[2] = Input count or :const:`None`.
value[3] = Output count or :const:`None`.
The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
:return: :const:`None`.
"""
self._record_writer.write_metric(name, value)
# P2 [ ] TODO: Support custom inspector values
@staticmethod
def _decode_list(mv):
return [match.replace('$$', '$') for match in SearchCommand._encoded_value.findall(mv)]
_encoded_value = re.compile(r'\$(?P<item>(?:\$\$|[^$])*)\$(?:;|$)') # matches a single value in an encoded list
def _execute(self, ifile, process):
""" Default processing loop
:param ifile: Input file object.
:type ifile: file
:param process: Bound method to call in processing loop.
:type process: instancemethod
:return: :const:`None`.
:rtype: NoneType
"""
self._record_writer.write_records(process(self._records(ifile)))
self.finish()
@staticmethod
def _read_chunk(ifile):
# noinspection PyBroadException
try:
header = ifile.readline()
except Exception as error:
raise RuntimeError('Failed to read transport header: {}'.format(error))
if not header:
return None
match = SearchCommand._header.match(header)
if match is None:
raise RuntimeError('Failed to parse transport header: {}'.format(header))
metadata_length, body_length = match.groups()
metadata_length = int(metadata_length)
body_length = int(body_length)
try:
metadata = ifile.read(metadata_length)
except Exception as error:
raise RuntimeError('Failed to read metadata of length {}: {}'.format(metadata_length, error))
decoder = MetadataDecoder()
try:
metadata = decoder.decode(metadata)
except Exception as error:
raise RuntimeError('Failed to parse metadata of length {}: {}'.format(metadata_length, error))
# if body_length <= 0:
# return metadata, ''
try:
body = ifile.read(body_length)
except Exception as error:
raise RuntimeError('Failed to read body of length {}: {}'.format(body_length, error))
return metadata, body
_header = re.compile(r'chunked\s+1.0\s*,\s*(\d+)\s*,\s*(\d+)\s*\n')
def _records_protocol_v1(self, ifile):
reader = csv.reader(ifile, dialect=CsvDialect)
try:
fieldnames = reader.next()
except StopIteration:
return
mv_fieldnames = dict([(name, name[len('__mv_'):]) for name in fieldnames if name.startswith('__mv_')])
if len(mv_fieldnames) == 0:
for values in reader:
yield OrderedDict(izip(fieldnames, values))
return
for values in reader:
record = OrderedDict()
for fieldname, value in izip(fieldnames, values):
if fieldname.startswith('__mv_'):
if len(value) > 0:
record[mv_fieldnames[fieldname]] = self._decode_list(value)
elif fieldname not in record:
record[fieldname] = value
yield record
def _records_protocol_v2(self, ifile):
while True:
result = self._read_chunk(ifile)
if not result:
return
metadata, body = result
action = getattr(metadata, 'action', None)
if action != 'execute':
raise RuntimeError('Expected execute action, not {}'.format(action))
finished = getattr(metadata, 'finished', False)
self._record_writer.is_flushed = False
if len(body) > 0:
reader = csv.reader(StringIO(body), dialect=CsvDialect)
try:
fieldnames = reader.next()
except StopIteration:
return
mv_fieldnames = dict([(name, name[len('__mv_'):]) for name in fieldnames if name.startswith('__mv_')])
if len(mv_fieldnames) == 0:
for values in reader:
yield OrderedDict(izip(fieldnames, values))
else:
for values in reader:
record = OrderedDict()
for fieldname, value in izip(fieldnames, values):
if fieldname.startswith('__mv_'):
if len(value) > 0:
record[mv_fieldnames[fieldname]] = self._decode_list(value)
elif fieldname not in record:
record[fieldname] = value
yield record
if finished:
return
self.flush()
def _report_unexpected_error(self):
error_type, error, tb = sys.exc_info()
origin = tb
while origin.tb_next is not None:
origin = origin.tb_next
filename = origin.tb_frame.f_code.co_filename
lineno = origin.tb_lineno
message = '{0} at "{1}", line {2:d} : {3}'.format(error_type.__name__, filename, lineno, error)
environment.splunklib_logger.error(message + '\nTraceback:\n' + ''.join(traceback.format_tb(tb)))
self.write_error(message)
# endregion
# region Types
class ConfigurationSettings(object):
""" Represents the configuration settings common to all :class:`SearchCommand` classes.
"""
def __init__(self, command):
self.command = command
def __repr__(self):
""" Converts the value of this instance to its string representation.
The value of this ConfigurationSettings instance is represented as a string of comma-separated
:code:`(name, value)` pairs.
:return: String representation of this instance
"""
definitions = type(self).configuration_setting_definitions
settings = imap(
lambda setting: repr((setting.name, setting.__get__(self), setting.supporting_protocols)), definitions)
return '[' + ', '.join(settings) + ']'
def __str__(self):
""" Converts the value of this instance to its string representation.
The value of this ConfigurationSettings instance is represented as a string of comma-separated
:code:`name=value` pairs. Items with values of :const:`None` are filtered from the list.
:return: String representation of this instance
"""
text = ', '.join(imap(lambda (name, value): name + '=' + json_encode_string(unicode(value)), self.iteritems()))
return text
# region Methods
@classmethod
def fix_up(cls, command_class):
""" Adjusts and checks this class and its search command class.
Derived classes typically override this method. It is used by the :decorator:`Configuration` decorator to
fix up the :class:`SearchCommand` class it adorns. This method is overridden by :class:`EventingCommand`,
:class:`GeneratingCommand`, :class:`ReportingCommand`, and :class:`StreamingCommand`, the base types for
all other search commands.
:param command_class: Command class targeted by this class
"""
return
def iteritems(self):
definitions = type(self).configuration_setting_definitions
version = self.command.protocol_version
return ifilter(
lambda (name, value): value is not None, imap(
lambda setting: (setting.name, setting.__get__(self)), ifilter(
lambda setting: setting.is_supported_by_protocol(version), definitions)))
pass # endregion
pass # endregion
SearchMetric = namedtuple(b'SearchMetric', (b'elapsed_seconds', b'invocation_count', b'input_count', b'output_count'))
def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None):
""" Instantiates and executes a search command class
This function implements a `conditional script stanza <https://docs.python.org/2/library/__main__.html>`_ based on the value of
:code:`module_name`::
if module_name is None or module_name == '__main__':
# execute command
Call this function at module scope with :code:`module_name=__name__`, if you would like your module to act as either
a reusable module or a standalone program. Otherwise, if you wish this function to unconditionally instantiate and
execute :code:`command_class`, pass :const:`None` as the value of :code:`module_name`.
:param command_class: Search command class to instantiate and execute.
:type command_class: type
:param argv: List of arguments to the command.
:type argv: list or tuple
:param input_file: File from which the command will read data.
:type input_file: :code:`file`
:param output_file: File to which the command will write data.
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`basestring`
:returns: :const:`None`
**Example**
.. code-block:: python
:linenos:
#!/usr/bin/env python
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand, module_name=__name__)
Dispatches the :code:`SomeStreamingCommand`, if and only if :code:`__name__` is equal to :code:`'__main__'`.
**Example**
.. code-block:: python
:linenos:
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand)
Unconditionally dispatches :code:`SomeStreamingCommand`.
"""
assert issubclass(command_class, SearchCommand)
if module_name is None or module_name == '__main__':
command_class().process(argv, input_file, output_file)
| mit | -3,616,265,967,801,299,500 | 33.771532 | 131 | 0.58457 | false |
seankelly/buildbot | master/buildbot/test/fake/state.py | 10 | 1108 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
class State(object):
"""
A simple class you can use to keep track of state throughout
a test. Just assign whatever you want to its attributes. Its
constructor provides a shortcut to setting initial values for
attributes
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
| gpl-2.0 | -6,415,961,869,295,347,000 | 35.933333 | 79 | 0.737365 | false |
SatoshiNXSimudrone/sl4a-damon-clone | python-modules/pybluez/examples/advanced/inquiry-with-rssi.py | 20 | 5076 | # performs a simple device inquiry, followed by a remote name request of each
# discovered device
import os
import sys
import struct
import _bluetooth as bluez
def printpacket(pkt):
for c in pkt:
sys.stdout.write("%02x " % struct.unpack("B",c)[0])
print
def read_inquiry_mode(sock):
"""returns the current mode, or -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# read_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# first read the current inquiry mode.
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE )
pkt = sock.recv(255)
status,mode = struct.unpack("xxxxxxBB", pkt)
if status != 0: mode = -1
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return mode
def write_inquiry_mode(sock, mode):
"""returns 0 on success, -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# write_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# send the command!
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE, struct.pack("B", mode) )
pkt = sock.recv(255)
status = struct.unpack("xxxxxxB", pkt)[0]
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
if status != 0: return -1
return 0
def device_inquiry_with_with_rssi(sock):
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# perform a device inquiry on bluetooth device #0
# The inquiry should last 8 * 1.28 = 10.24 seconds
# before the inquiry is performed, bluez should flush its cache of
# previously discovered devices
flt = bluez.hci_filter_new()
bluez.hci_filter_all_events(flt)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
duration = 4
max_responses = 255
cmd_pkt = struct.pack("BBBBB", 0x33, 0x8b, 0x9e, duration, max_responses)
bluez.hci_send_cmd(sock, bluez.OGF_LINK_CTL, bluez.OCF_INQUIRY, cmd_pkt)
results = []
done = False
while not done:
pkt = sock.recv(255)
ptype, event, plen = struct.unpack("BBB", pkt[:3])
if event == bluez.EVT_INQUIRY_RESULT_WITH_RSSI:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
rssi = struct.unpack("b", pkt[1+13*nrsp+i])[0]
results.append( ( addr, rssi ) )
print "[%s] RSSI: [%d]" % (addr, rssi)
elif event == bluez.EVT_INQUIRY_COMPLETE:
done = True
elif event == bluez.EVT_CMD_STATUS:
status, ncmd, opcode = struct.unpack("BBH", pkt[3:7])
if status != 0:
print "uh oh..."
printpacket(pkt[3:7])
done = True
elif event == bluez.EVT_INQUIRY_RESULT:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
results.append( ( addr, -1 ) )
print "[%s] (no RRSI)" % addr
else:
print "unrecognized packet type 0x%02x" % ptype
print "event ", event
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return results
dev_id = 0
try:
sock = bluez.hci_open_dev(dev_id)
except:
print "error accessing bluetooth device..."
sys.exit(1)
try:
mode = read_inquiry_mode(sock)
except Exception, e:
print "error reading inquiry mode. "
print "Are you sure this a bluetooth 1.2 device?"
print e
sys.exit(1)
print "current inquiry mode is %d" % mode
if mode != 1:
print "writing inquiry mode..."
try:
result = write_inquiry_mode(sock, 1)
except Exception, e:
print "error writing inquiry mode. Are you sure you're root?"
print e
sys.exit(1)
if result != 0:
print "error while setting inquiry mode"
print "result: %d" % result
device_inquiry_with_with_rssi(sock)
| apache-2.0 | -5,797,278,975,011,760,000 | 31.538462 | 77 | 0.61643 | false |
james4424/nest-simulator | pynest/nest/tests/test_sp/test_sp_manager.py | 8 | 4250 | # -*- coding: utf-8 -*-
#
# test_sp_manager.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import unittest
from .utils import extract_dict_a_from_b
__author__ = 'naveau'
class TestStructuralPlasticityManager(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
nest.set_verbosity('M_INFO')
self.exclude_synapse_model = [
'stdp_dopamine_synapse',
'stdp_dopamine_synapse_lbl',
'stdp_dopamine_synapse_hpc',
'stdp_dopamine_synapse_hpc_lbl',
'gap_junction',
'gap_junction_lbl'
]
def test_register_synapses(self):
for syn_model in nest.Models('synapses'):
if syn_model not in self.exclude_synapse_model:
nest.ResetKernel()
nest.SetDefaults(syn_model, {'delay': 0.5})
syn_dict = {
'model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2'
}
nest.SetKernelStatus({
'min_delay': 0.1,
'max_delay': 1.0,
'structural_plasticity_synapses': {'syn1': syn_dict}
})
kernel_status = nest.GetKernelStatus(
'structural_plasticity_synapses')
self.assertIn('syn1', kernel_status)
self.assertEqual(kernel_status['syn1'], extract_dict_a_from_b(
kernel_status['syn1'], syn_dict))
def test_min_max_delay_using_default_delay(self):
nest.ResetKernel()
delay = 1.0
syn_model = 'static_synapse'
nest.SetStructuralPlasticityStatus(
{
'structural_plasticity_synapses': {
'syn1': {
'model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2',
}
}
}
)
self.assertLessEqual(nest.GetKernelStatus('min_delay'), delay)
self.assertGreaterEqual(nest.GetKernelStatus('max_delay'), delay)
def test_synapse_creation(self):
for syn_model in nest.Models('synapses'):
if syn_model not in self.exclude_synapse_model:
nest.ResetKernel()
syn_dict = {
'model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2'
}
nest.SetStructuralPlasticityStatus({
'structural_plasticity_synapses': {'syn1': syn_dict}
})
neurons = nest.Create('iaf_neuron', 2, {
'synaptic_elements': {
'SE1': {'z': 10.0, 'growth_rate': 0.0},
'SE2': {'z': 10.0, 'growth_rate': 0.0}
}
})
nest.EnableStructuralPlasticity()
nest.Simulate(10.0)
status = nest.GetStatus(neurons, 'synaptic_elements')
for st_neuron in status:
self.assertEqual(10, st_neuron['SE1']['z_connected'])
self.assertEqual(10, st_neuron['SE2']['z_connected'])
self.assertEqual(
20, len(nest.GetConnections(neurons, neurons, syn_model)))
break
def suite():
test_suite = unittest.makeSuite(TestStructuralPlasticityManager, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -6,459,008,892,099,325,000 | 35.016949 | 78 | 0.534588 | false |
tonybaloney/st2contrib | packs/networking_utils/actions/is_valid_ip.py | 4 | 2527 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
from st2actions.runners.pythonrunner import Action
class IsValidIpAction(Action):
def run(self, ip_address, no_loopback=False, only_v4=False, only_v6=False):
"""
Is this a valid IP address?
Args:
ip_address: The IP address to validate.
no_loopback: Raise an exception for Loopback addresses.
only_v4: Raise an exception for IPv6 addresses.
only_v6: Raise an exception for IPv4 addresses.
Raises:
ValueError: On invalid IP, loopback or when requesting only v4/v6
be considered valid.
Returns:
dict: With extra information about the IP address.
"""
# As ipaddress is a backport from Python 3.3+ it errors if the
# ip address is a string and not unicode.
ip_obj = ipaddress.ip_address(unicode(ip_address))
results = {'version': ip_obj.version,
'is_private': ip_obj.is_private,
'is_link_local': ip_obj.is_link_local,
'is_unspecified': ip_obj.is_unspecified,
'reverse_pointer': ip_obj.reverse_pointer,
'is_multicast': ip_obj.is_multicast,
'is_reserved': ip_obj.is_reserved,
'is_loopback': ip_obj.is_loopback}
if only_v6 and ip_obj.version == 4:
raise ValueError("Valid IPv4 address, but IPv6 is required.")
elif only_v4 and ip_obj.version == 6:
raise ValueError("Valid IPv6 address, but IPv4 is required.")
if no_loopback and ip_obj.is_loopback:
raise ValueError("Address is a IPv{} loopback address".format(
ip_obj.version))
return results
| apache-2.0 | 5,450,980,193,425,089 | 39.758065 | 79 | 0.645429 | false |
XDestination/mongo-connector | mongo_connector/doc_managers/doc_manager_simulator.py | 2 | 6559 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to serve as proxy for the target engine for testing.
Receives documents from the oplog worker threads and indexes them
into the backend.
Please look at the Solr and ElasticSearch doc manager classes for a sample
implementation with real systems.
"""
from threading import RLock
from mongo_connector import constants
from mongo_connector.errors import OperationFailed
from mongo_connector.doc_managers.doc_manager_base import DocManagerBase
from mongo_connector.compat import u
class DocumentStore(dict):
def __init__(self):
self._lock = RLock()
def __getitem__(self, key):
with self._lock:
return super(DocumentStore, self).__getitem__(key)
def __setitem__(self, key, value):
with self._lock:
return super(DocumentStore, self).__setitem__(key, value)
def __iter__(self):
def __myiter__():
with self._lock:
for item in super(DocumentStore, self).__iter__():
yield item
return __myiter__()
class Entry(object):
def __init__(self, doc, ns, ts):
self.doc, self.ns, self.ts = doc, ns, ts
self._id = self.doc['_id']
@property
def meta_dict(self):
return {'_id': self._id, 'ns': self.ns, '_ts': self.ts}
@property
def merged_dict(self):
d = self.doc.copy()
d.update(**self.meta_dict)
return d
def update(self, ns, ts):
self.ns, self.ts = ns, ts
class DocManager(DocManagerBase):
"""BackendSimulator emulates both a target DocManager and a server.
The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url=None, unique_key='_id',
auto_commit_interval=None,
chunk_size=constants.DEFAULT_MAX_BULK, **kwargs):
"""Creates a dictionary to hold document id keys mapped to the
documents as values.
"""
self.unique_key = unique_key
self.auto_commit_interval = auto_commit_interval
self.doc_dict = DocumentStore()
self.url = url
self.chunk_size = chunk_size
self.kwargs = kwargs
def stop(self):
"""Stops any running threads in the DocManager.
"""
pass
def update(self, document_id, update_spec, namespace, timestamp):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
document = self.doc_dict[document_id].doc
updated = self.apply_update(document, update_spec)
if "_id" in updated:
updated.pop("_id")
updated[self.unique_key] = document_id
self.upsert(updated, namespace, timestamp)
return updated
def upsert(self, doc, namespace, timestamp):
"""Adds a document to the doc dict.
"""
# Allow exceptions to be triggered (for testing purposes)
if doc.get('_upsert_exception'):
raise Exception("upsert exception")
doc_id = doc["_id"]
self.doc_dict[doc_id] = Entry(doc=doc, ns=namespace, ts=timestamp)
def insert_file(self, f, namespace, timestamp):
"""Inserts a file to the doc dict.
"""
doc = f.get_metadata()
doc['content'] = f.read()
self.doc_dict[f._id] = Entry(doc=doc, ns=namespace, ts=timestamp)
def remove(self, document_id, namespace, timestamp):
"""Removes the document from the doc dict.
"""
try:
entry = self.doc_dict[document_id]
entry.doc = None
entry.update(namespace, timestamp)
except KeyError:
raise OperationFailed("Document does not exist: %s"
% u(document_id))
def search(self, start_ts, end_ts):
"""Searches through all documents and finds all documents that were
modified or deleted within the range.
Since we have very few documents in the doc dict when this is called,
linear search is fine. This method is only used by rollbacks to query
all the documents in the target engine within a certain timestamp
window. The input will be two longs (converted from Bson timestamp)
which specify the time range. The start_ts refers to the timestamp
of the last oplog entry after a rollback. The end_ts is the timestamp
of the last document committed to the backend.
"""
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.ts <= end_ts or entry.ts >= start_ts:
yield entry.meta_dict
def commit(self):
"""Simply passes since we're not using an engine that needs commiting.
"""
pass
def get_last_doc(self):
"""Searches through the doc dict to find the document that was
modified or deleted most recently."""
return max(self.doc_dict.values(), key=lambda x: x.ts).meta_dict
def handle_command(self, command_doc, namespace, timestamp):
pass
def _search(self):
"""Returns all documents in the doc dict.
This function is not a part of the DocManager API, and is only used
to simulate searching all documents from a backend.
"""
results = []
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.doc is not None:
results.append(entry.merged_dict)
return results
def _delete(self):
"""Deletes all documents.
This function is not a part of the DocManager API, and is only used
to simulate deleting all documents from a backend.
"""
self.doc_dict = {}
| apache-2.0 | 2,279,154,945,056,189,200 | 32.984456 | 79 | 0.626467 | false |
shakamunyi/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/gaussian_test.py | 5 | 5286 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
class GaussianTest(tf.test.TestCase):
def testGaussianLogPDF(self):
with tf.Session():
batch_size = 6
mu = tf.constant([3.0] * batch_size)
sigma = tf.constant([math.sqrt(10.0)] * batch_size)
mu_v = 3.0
sigma_v = np.sqrt(10.0)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
expected_log_pdf = np.log(
1 / np.sqrt(2 * np.pi) / sigma_v
* np.exp(-1.0 / (2 * sigma_v**2) * (x - mu_v)**2))
log_pdf = gaussian.log_pdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
pdf = gaussian.pdf(x)
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testGaussianLogPDFMultidimensional(self):
with tf.Session():
batch_size = 6
mu = tf.constant([[3.0, -3.0]] * batch_size)
sigma = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
mu_v = np.array([3.0, -3.0])
sigma_v = np.array([np.sqrt(10.0), np.sqrt(15.0)])
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
expected_log_pdf = np.log(
1 / np.sqrt(2 * np.pi) / sigma_v
* np.exp(-1.0 / (2 * sigma_v**2) * (x - mu_v)**2))
log_pdf = gaussian.log_pdf(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(expected_log_pdf, log_pdf_values)
pdf = gaussian.pdf(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testGaussianCDF(self):
with tf.Session():
batch_size = 6
mu = tf.constant([3.0] * batch_size)
sigma = tf.constant([math.sqrt(10.0)] * batch_size)
mu_v = 3.0
sigma_v = np.sqrt(10.0)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
erf_fn = np.vectorize(math.erf)
# From Wikipedia
expected_cdf = 0.5 * (1.0 + erf_fn((x - mu_v)/(sigma_v*np.sqrt(2))))
cdf = gaussian.cdf(x)
self.assertAllClose(expected_cdf, cdf.eval())
def testGaussianEntropy(self):
with tf.Session():
mu_v = np.array([1.0, 1.0, 1.0])
sigma_v = np.array([[1.0, 2.0, 3.0]]).T
gaussian = tf.contrib.distributions.Gaussian(mu=mu_v, sigma=sigma_v)
sigma_broadcast = mu_v * sigma_v
expected_entropy = 0.5 * np.log(2*np.pi*np.exp(1)*sigma_broadcast**2)
self.assertAllClose(expected_entropy, gaussian.entropy().eval())
def testGaussianSample(self):
with tf.Session():
mu = tf.constant(3.0)
sigma = tf.constant(math.sqrt(10.0))
mu_v = 3.0
sigma_v = np.sqrt(10.0)
n = tf.constant(100000)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
samples = gaussian.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(sample_values.mean(), mu_v, atol=1e-2)
self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)
def testGaussianSampleMultiDimensional(self):
with tf.Session():
batch_size = 2
mu = tf.constant([[3.0, -3.0]] * batch_size)
sigma = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
mu_v = [3.0, -3.0]
sigma_v = [np.sqrt(10.0), np.sqrt(15.0)]
n = tf.constant(100000)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
samples = gaussian.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (100000, batch_size, 2))
self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-2)
self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-2)
self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)
def testNegativeSigmaFails(self):
with tf.Session():
gaussian = tf.contrib.distributions.Gaussian(
mu=[1.],
sigma=[-5.],
name='G')
with self.assertRaisesOpError(
r'should contain only positive values'):
gaussian.mean.eval()
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -8,212,078,274,667,906,000 | 36.489362 | 80 | 0.608778 | false |
digimarc/django | django/contrib/admin/migrations/0001_initial.py | 142 | 1657 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.admin.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action_time', models.DateTimeField(auto_now=True, verbose_name='action time')),
('object_id', models.TextField(null=True, verbose_name='object id', blank=True)),
('object_repr', models.CharField(max_length=200, verbose_name='object repr')),
('action_flag', models.PositiveSmallIntegerField(verbose_name='action flag')),
('change_message', models.TextField(verbose_name='change message', blank=True)),
('content_type', models.ForeignKey(to_field='id', blank=True, to='contenttypes.ContentType', null=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-action_time',),
'db_table': 'django_admin_log',
'verbose_name': 'log entry',
'verbose_name_plural': 'log entries',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.admin.models.LogEntryManager()),
],
),
]
| bsd-3-clause | 8,316,047,165,614,899,000 | 40.425 | 121 | 0.573929 | false |
KevinFasusi/supplychainpy | supplychainpy/_helpers/_config_file_paths.py | 1 | 2886 | # Copyright (c) 2015-2016, The Authors and Contributors
# <see AUTHORS file>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from supplychainpy._helpers._pickle_config import deserialise_config, serialise_config
APP_DIR = os.path.dirname(__file__, )
REL_PATH_GENETIC_ALGORITHM = '../sample_data/population_genome.txt'
REL_PATH_DASH = 'dash.pickle'
REL_PATH_ARCHIVE = '../../_archive/'
REL_PATH_CSV_MANAGEMENT_CONFIG = '../_pickled/csv_management_config.pickle'
REL_PATH_APPLICATION_CONFIG = '../_pickled/application_config.pickle'
REL_PATH_PICKLE = '../_pickled/'
ABS_FILE_PATH_DASH = os.path.abspath(os.path.join(APP_DIR, '../_pickled/', REL_PATH_DASH))
ABS_FILE_PATH_APPLICATION_CONFIG = os.path.abspath(os.path.join(APP_DIR, '../_pickled/', REL_PATH_APPLICATION_CONFIG))
ABS_FILE_PATH_CSV_MANAGEMENT_CONFIG = os.path.abspath(os.path.join(APP_DIR, REL_PATH_CSV_MANAGEMENT_CONFIG))
ABS_FILE_PATH_ARCHIVE = os.path.abspath(os.path.join(APP_DIR, REL_PATH_ARCHIVE))
ABS_FILE_GENETIC_ALGORITHM = os.path.abspath(os.path.join(APP_DIR, REL_PATH_ARCHIVE))
ABS_FILE_PICKLE = os.path.abspath(os.path.join(APP_DIR, REL_PATH_PICKLE))
def main():
print(ABS_FILE_PICKLE)
#config = deserialise_config(ABS_FILE_PATH_APPLICATION_CONFIG)
#config['file']= 'data4.csv'
#serialise_config(config, ABS_FILE_PATH_APPLICATION_CONFIG)
#print(deserialise_config(ABS_FILE_PATH_APPLICATION_CONFIG))
if __name__ == '__main__':
main()
| bsd-3-clause | 5,789,370,260,308,640,000 | 53.45283 | 118 | 0.761954 | false |
manasi24/tempest | tempest/api_schema/response/compute/v2_1/services.py | 23 | 2372 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_services = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'services': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'string'],
'pattern': '^[a-zA-Z!]*@[0-9]+$'},
'zone': {'type': 'string'},
'host': {'type': 'string'},
'state': {'type': 'string'},
'binary': {'type': 'string'},
'status': {'type': 'string'},
'updated_at': {'type': ['string', 'null']},
'disabled_reason': {'type': ['string', 'null']}
},
'additionalProperties': False,
'required': ['id', 'zone', 'host', 'state', 'binary',
'status', 'updated_at', 'disabled_reason']
}
}
},
'additionalProperties': False,
'required': ['services']
}
}
enable_service = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'service': {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'binary': {'type': 'string'},
'host': {'type': 'string'}
},
'additionalProperties': False,
'required': ['status', 'binary', 'host']
}
},
'additionalProperties': False,
'required': ['service']
}
}
| apache-2.0 | -3,718,721,133,452,647,400 | 35.492308 | 78 | 0.447302 | false |
cryptofun/honey | share/seeds/generate-seeds.py | 1 | 4185 | #!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef H_CHAINPARAMSSEEDS\n')
g.write('#define H_CHAINPARAMSSEEDS\n')
g.write('// List of fixed seed nodes for the honey network\n')
g.write('// AUTOGENERATED by contrib/devtools/generate-seeds.py\n\n')
g.write('// Each line contains a 16-byte IPv6 address and a port.\n')
g.write('// IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 15714)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 25714)
g.write('#endif\n')
if __name__ == '__main__':
main()
| mit | 2,950,685,174,326,540,300 | 30.946565 | 98 | 0.579211 | false |
githubutilities/LeetCode | Python/invert-binary-tree.py | 3 | 2242 | # Time: O(n)
# Space: O(h)
#
# Invert a binary tree.
#
# 4
# / \
# 2 7
# / \ / \
# 1 3 6 9
# to
# 4
# / \
# 7 2
# / \ / \
# 9 6 3 1
#
# Time: O(n)
# Space: O(w), w is the max number of the nodes of the levels.
# BFS solution.
class Queue:
def __init__(self):
self.data = collections.deque()
def push(self, x):
self.data.append(x)
def peek(self):
return self.data[0]
def pop(self):
return self.data.popleft()
def size(self):
return len(self.data)
def empty(self):
return len(self.data) == 0
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {TreeNode}
def invertTree(self, root):
if root is not None:
nodes = Queue()
nodes.push(root)
while not nodes.empty():
node = nodes.pop()
node.left, node.right = node.right, node.left
if node.left is not None:
nodes.push(node.left)
if node.right is not None:
nodes.push(node.right)
return root
# Time: O(n)
# Space: O(h)
# Stack solution.
class Solution2:
# @param {TreeNode} root
# @return {TreeNode}
def invertTree(self, root):
if root is not None:
nodes = []
nodes.append(root)
while nodes:
node = nodes.pop()
node.left, node.right = node.right, node.left
if node.left is not None:
nodes.append(node.left)
if node.right is not None:
nodes.append(node.right)
return root
# Time: O(n)
# Space: O(h)
# DFS, Recursive solution.
class Solution3:
# @param {TreeNode} root
# @return {TreeNode}
def invertTree(self, root):
if root is not None:
root.left, root.right = self.invertTree(root.right), \
self.invertTree(root.left)
return root
| mit | -2,893,924,365,451,776,000 | 22.354167 | 66 | 0.484835 | false |
RachellCalhoun/craftsite | crafts/migrations/0001_initial.py | 1 | 1079 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CraftPost',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('title', models.CharField(max_length=200)),
('text', models.TextField(null=True, blank=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(null=True, blank=True)),
('photo', models.ImageField(null=True, upload_to='', blank=True)),
('link', models.URLField(null=True, blank=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| gpl-3.0 | 3,635,434,343,512,694,000 | 36.206897 | 114 | 0.600556 | false |
pombreda/formalchemy | formalchemy/tests/__init__.py | 2 | 13869 | # -*- coding: utf-8 -*-
import os
import glob
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
from BeautifulSoup import BeautifulSoup # required for html prettification
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
logging.getLogger('sqlalchemy').setLevel(logging.ERROR)
from formalchemy.fields import Field, SelectFieldRenderer, FieldRenderer, TextFieldRenderer, EscapingReadonlyRenderer
import formalchemy.fatypes as types
def ls(*args):
dirname = os.path.dirname(__file__)
args = list(args)
args.append('*')
files = glob.glob(os.path.join(dirname, *args))
files.sort()
for f in files:
if os.path.isdir(f):
print 'D %s' % os.path.basename(f)
else:
print '- %s' % os.path.basename(f)
def cat(*args):
filename = os.path.join(os.path.dirname(__file__), *args)
print open(filename).read()
engine = create_engine('sqlite://')
Session = scoped_session(sessionmaker(autoflush=False, bind=engine))
Base = declarative_base(engine, mapper=Session.mapper)
class One(Base):
__tablename__ = 'ones'
id = Column(Integer, primary_key=True)
class Two(Base):
__tablename__ = 'twos'
id = Column(Integer, primary_key=True)
foo = Column(Integer, default='133', nullable=True)
class TwoInterval(Base):
__tablename__ = 'two_interval'
id = Column(Integer, primary_key=True)
foo = Column(Interval, nullable=False)
class TwoFloat(Base):
__tablename__ = 'two_floats'
id = Column(Integer, primary_key=True)
foo = Column(Float, nullable=False)
from decimal import Decimal
class TwoNumeric(Base):
__tablename__ = 'two_numerics'
id = Column(Integer, primary_key=True)
foo = Column(Numeric, nullable=True)
class Three(Base):
__tablename__ = 'threes'
id = Column(Integer, primary_key=True)
foo = Column(Text, nullable=True)
bar = Column(Text, nullable=True)
class CheckBox(Base):
__tablename__ = 'checkboxes'
id = Column(Integer, primary_key=True)
field = Column(Boolean, nullable=False)
class PrimaryKeys(Base):
__tablename__ = 'primary_keys'
id = Column(Integer, primary_key=True)
id2 = Column(String(10), primary_key=True)
field = Column(String(10), nullable=False)
class Binaries(Base):
__tablename__ = 'binaries'
id = Column(Integer, primary_key=True)
file = Column(LargeBinary, nullable=True)
class ConflictNames(Base):
__tablename__ = 'conflict_names'
id = Column(Integer, primary_key=True)
model = Column(String, nullable=True)
data = Column(String, nullable=True)
session = Column(String, nullable=True)
vertices = Table('vertices', Base.metadata,
Column('id', Integer, primary_key=True),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __composite_values__(self):
return [self.x, self.y]
def __eq__(self, other):
return other.x == self.x and other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
class Vertex(object):
pass
Session.mapper(Vertex, vertices, properties={
'start':composite(Point, vertices.c.x1, vertices.c.y1),
'end':composite(Point, vertices.c.x2, vertices.c.y2)
})
class PointFieldRenderer(FieldRenderer):
def render(self, **kwargs):
from formalchemy import helpers as h
data = self.field.parent.data
x_name = self.name + '-x'
y_name = self.name + '-y'
x_value = (data is not None and x_name in data) and data[x_name] or str(self.field.value and self.field.value.x or '')
y_value = (data is not None and y_name in data) and data[y_name] or str(self.field.value and self.field.value.y or '')
return h.text_field(x_name, value=x_value) + h.text_field(y_name, value=y_value)
def deserialize(self):
data = self.field.parent.data.getone(self.name + '-x'), self.field.parent.data.getone(self.name + '-y')
return Point(*[int(i) for i in data])
# todo? test a CustomBoolean, using a TypeDecorator --
# http://www.sqlalchemy.org/docs/04/types.html#types_custom
# probably need to add _renderer attr and check
# isinstance(getattr(myclass, '_renderer', type(myclass)), Boolean)
# since the custom class shouldn't really inherit from Boolean
properties = Table('properties', Base.metadata,
Column('id', Integer, primary_key=True),
Column('a', Integer))
class Property(Base):
__table__ = properties
foo = column_property(properties.c.a.label('foo'))
# bar = column_property(properties.c.a) # TODO
class Recursive(Base):
__tablename__ = 'recursives'
id = Column(Integer, primary_key=True)
foo = Column(Text, nullable=True)
parent_id = Column(Integer, ForeignKey("recursives.id"))
parent = relation('Recursive', primaryjoin=parent_id==id, uselist=False, remote_side=parent_id)
class Synonym(Base):
__tablename__ = 'synonyms'
id = Column(Integer, primary_key=True)
_foo = Column(Text, nullable=True)
def _set_foo(self, foo):
self._foo = "SOMEFOO " + foo
def _get_foo(self):
return self._foo
foo = synonym('_foo', descriptor=property(_get_foo, _set_foo))
class OTOChild(Base):
__tablename__ = 'one_to_one_child'
id = Column(Integer, primary_key=True)
baz = Column(Text, nullable=False)
def __unicode__(self):
return self.baz
def __repr__(self):
return '<OTOChild %s>' % self.baz
class OTOParent(Base):
__tablename__ = 'one_to_one_parent'
id = Column(Integer, primary_key=True)
oto_child_id = Column(Integer, ForeignKey('one_to_one_child.id'), nullable=False)
child = relation(OTOChild, uselist=False)
class Order(Base):
__tablename__ = 'orders'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
quantity = Column(Integer, nullable=False)
def __unicode__(self):
return 'Quantity: %s' % self.quantity
def __repr__(self):
return '<Order for user %s: %s>' % (self.user_id, self.quantity)
class OptionalOrder(Base): # the user is optional, not the order
__tablename__ = 'optional_orders'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'))
quantity = Column(Integer)
user = relation('User')
def __unicode__(self):
return 'Quantity: %s' % self.quantity
def __repr__(self):
return '<OptionalOrder for user %s: %s>' % (self.user_id, self.quantity)
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
email = Column(Unicode(40), unique=True, nullable=False)
password = Column(Unicode(20), nullable=False)
name = Column(Unicode(30))
orders = relation(Order, backref='user', order_by='Order.quantity')
orders_dl = dynamic_loader(Order)
def __unicode__(self):
return self.name
def __repr__(self):
return '<User %s>' % self.name
def __html__(self):
return '<a href="mailto:%s">%s</a>' % (self.email, self.name)
class NaturalOrder(Base):
__tablename__ = 'natural_orders'
id = Column(Integer, primary_key=True)
user_email = Column(String, ForeignKey('natural_users.email'), nullable=False)
quantity = Column(Integer, nullable=False)
def __repr__(self):
return 'Quantity: %s' % self.quantity
class NaturalUser(Base):
__tablename__ = 'natural_users'
email = Column(Unicode(40), primary_key=True)
password = Column(Unicode(20), nullable=False)
name = Column(Unicode(30))
orders = relation(NaturalOrder, backref='user')
def __repr__(self):
return self.name
class Function(Base):
__tablename__ = 'functions'
foo = Column(TIMESTAMP, primary_key=True, default=func.current_timestamp())
# test property order for non-declarative mapper
addresses = Table('email_addresses', Base.metadata,
Column('address_id', Integer, Sequence('address_id_seq', optional=True), primary_key = True),
Column('address', String(40)),
)
users2 = Table('users2', Base.metadata,
Column('user_id', Integer, Sequence('user_id_seq', optional=True), primary_key = True),
Column('address_id', Integer, ForeignKey(addresses.c.address_id)),
Column('name', String(40), nullable=False)
)
class Address(object): pass
class User2(object): pass
mapper(Address, addresses)
mapper(User2, users2, properties={'address': relation(Address)})
class Manual(object):
a = Field()
b = Field(type=types.Integer).dropdown([('one', 1), ('two', 2)], multiple=True)
d = Field().textarea((80, 10))
class OrderUser(Base):
__tablename__ = 'order_users'
user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
order_id = Column(Integer, ForeignKey('orders.id'), primary_key=True)
user = relation(User)
order = relation(Order)
def __repr__(self):
return 'OrderUser(%s, %s)' % (self.user_id, self.order_id)
class OrderUserTag(Base):
__table__ = Table('order_user_tags', Base.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, nullable=False),
Column('order_id', Integer, nullable=False),
Column('tag', String, nullable=False),
ForeignKeyConstraint(['user_id', 'order_id'], ['order_users.user_id', 'order_users.order_id']))
order_user = relation(OrderUser)
class Order__User(Base):
__table__ = join(Order.__table__, User.__table__).alias('__orders__users')
class Aliases(Base):
__tablename__ = 'table_with_aliases'
id = Column(Integer, primary_key=True)
text = Column('row_text', Text)
Base.metadata.create_all()
session = Session()
primary1 = PrimaryKeys(id=1, id2='22', field='value1')
primary2 = PrimaryKeys(id=1, id2='33', field='value2')
parent = OTOParent()
parent.child = OTOChild(baz='baz')
bill = User(email='[email protected]',
password='1234',
name='Bill')
john = User(email='[email protected]',
password='5678',
name='John')
order1 = Order(user=bill, quantity=10)
order2 = Order(user=john, quantity=5)
order3 = Order(user=john, quantity=6)
nbill = NaturalUser(email='[email protected]',
password='1234',
name='Natural Bill')
njohn = NaturalUser(email='[email protected]',
password='5678',
name='Natural John')
norder1 = NaturalOrder(user=nbill, quantity=10)
norder2 = NaturalOrder(user=njohn, quantity=5)
orderuser1 = OrderUser(user_id=1, order_id=1)
orderuser2 = OrderUser(user_id=1, order_id=2)
conflict_names = ConflictNames(data='data', model='model', session='session')
session.commit()
from formalchemy import config
from formalchemy.forms import FieldSet as DefaultFieldSet
from formalchemy.tables import Grid as DefaultGrid
from formalchemy.fields import Field
from formalchemy import templates
from formalchemy.validators import ValidationError
if templates.HAS_MAKO:
if not isinstance(config.engine, templates.MakoEngine):
raise ValueError('MakoEngine is not the default engine: %s' % config.engine)
else:
raise ImportError('mako is required for testing')
def pretty_html(html):
if isinstance(html, unicode):
html = html.encode('utf-8')
soup = BeautifulSoup(str(html))
return soup.prettify().strip()
class FieldSet(DefaultFieldSet):
def render(self, lang=None):
if self.readonly:
html = pretty_html(DefaultFieldSet.render(self))
for name, engine in templates.engines.items():
if isinstance(engine, config.engine.__class__):
continue
html_engine = pretty_html(engine('fieldset_readonly', fieldset=self))
assert html == html_engine, (name, html, html_engine)
return html
html = pretty_html(DefaultFieldSet.render(self))
for name, engine in templates.engines.items():
if isinstance(engine, config.engine.__class__):
continue
html_engine = pretty_html(engine('fieldset', fieldset=self))
assert html == html_engine, (name, html, html_engine)
return html
class Grid(DefaultGrid):
def render(self, lang=None):
if self.readonly:
html = pretty_html(DefaultGrid.render(self))
for name, engine in templates.engines.items():
if isinstance(engine, config.engine.__class__):
continue
html_engine = pretty_html(engine('grid_readonly', collection=self))
assert html == html_engine, (name, html, html_engine)
return html
html = pretty_html(DefaultGrid.render(self))
for name, engine in templates.engines.items():
if isinstance(engine, config.engine.__class__):
continue
html_engine = pretty_html(engine('grid', collection=self))
assert html == html_engine, (name, html, html_engine)
return html
original_renderers = FieldSet.default_renderers.copy()
def configure_and_render(fs, **options):
fs.configure(**options)
return fs.render()
if not hasattr(__builtins__, 'sorted'):
# 2.3 support
def sorted(L, key=lambda a: a):
L = list(L)
L.sort(lambda a, b: cmp(key(a), key(b)))
return L
class ImgRenderer(TextFieldRenderer):
def render(self, *args, **kwargs):
return '<img src="%s">' % self.value
import fake_module
fake_module.__dict__.update({
'fs': FieldSet(User),
})
import sys
sys.modules['library'] = fake_module
| mit | -8,906,925,635,919,196,000 | 33.329208 | 126 | 0.641791 | false |
da4089/simplefix | simplefix/parser.py | 1 | 8483 | #! /usr/bin/env python
########################################################################
# SimpleFIX
# Copyright (C) 2016-2020, David Arnold.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
########################################################################
from .constants import EQUALS_BYTE, SOH_BYTE
from .message import FixMessage, fix_val
from .data import RAW_DATA_TAGS, RAW_LEN_TAGS
# By default, messages are terminated by the Checksum (10) tag.
DEFAULT_STOP_TAG = 10
class FixParser(object): # skipcq: PYL-R0205
"""FIX protocol message parser.
This class translates FIX application messages in raw (wire)
format into instance of the FixMessage class.
It does not perform any validation of the fields, their presence
or absence in a particular message, the data types of fields, or
the values of enumerations.
It is suitable for streaming processing, accumulating byte data
from a network connection, and returning complete messages as they
are delivered, potentially in multiple fragments."""
def __init__(self):
"""Constructor."""
# Internal buffer used to accumulate message data.
self.buf = b""
# Parsed "tag=value" pairs, removed from the buffer, but not
# yet returned as a message.
self.pairs = []
# Copy raw field length tags.
self.raw_len_tags = RAW_LEN_TAGS[:]
# Copy raw field data tags.
self.raw_data_tags = RAW_DATA_TAGS[:]
# Parsed length of data field.
self.raw_len = 0
# Stop tag (default).
self.stop_tag = DEFAULT_STOP_TAG
# Stop character (optional).
self.stop_char = None
def add_raw(self, length_tag, value_tag):
"""Define the tags used for a private raw data field.
:param length_tag: tag number of length field.
:param value_tag: tag number of value field.
Data fields are not terminated by the SOH character as is usual for
FIX, but instead have a second, preceding field that specifies the
length of the value in bytes. The parser is initialised with all the
data fields defined in FIX.5.0, but if your application uses private
data fields, you can add them here, and the parser will process them
correctly. """
self.raw_len_tags.append(length_tag)
self.raw_data_tags.append(value_tag)
def remove_raw(self, length_tag, value_tag):
"""Remove the tags for a data type field.
:param length_tag: tag number of the length field.
:param value_tag: tag number of the value field.
You can remove either private or standard data field definitions in
case a particular application uses them for a field of a different
type. """
self.raw_len_tags.remove(length_tag)
self.raw_data_tags.remove(value_tag)
def reset(self):
"""Reset the internal parser state.
This will discard any appended buffer content, and any fields
parsed so far."""
self.buf = b""
self.pairs = []
self.raw_len = 0
def set_message_terminator(self, tag=None, char=None):
"""Set the end-of-message detection scheme.
:param tag: FIX tag number of terminating field. Default is 10.
:param char: Alternative, terminating character.
By default, messages are terminated by the FIX Checksum (10)
field. This can be changed to use a different tag, or a reserved
character using this function.
Note that only one of 'tag' or 'char' should be set, using a
named parameter."""
if tag is not None and char is not None:
raise ValueError("Only supply one of 'tag' or 'char'.")
if tag is not None:
self.stop_tag = tag
self.stop_char = None
else:
self.stop_tag = None
bs = char.encode() if type(char) is str else char
self.stop_char = bs[0]
def append_buffer(self, buf):
"""Append a byte string to the parser buffer.
:param buf: byte string to append.
The parser maintains an internal buffer of bytes to be parsed.
As raw data is read, it can be appended to this buffer. Each
call to get_message() will try to remove the bytes of a
complete messages from the head of the buffer."""
self.buf += fix_val(buf)
def get_buffer(self):
"""Return a reference to the internal buffer."""
return self.buf
def get_message(self):
"""Process the accumulated buffer and return the first message.
If the buffer starts with FIX fields other than BeginString
(8), these are discarded until the start of a message is
found.
If no BeginString (8) field is found, this function returns
None. Similarly, if (after a BeginString) no Checksum (10)
field is found, the function returns None.
Otherwise, it returns a simplefix.FixMessage instance
initialised with the fields from the first complete message
found in the buffer."""
# Break buffer into tag=value pairs.
start = 0
point = 0
in_tag = True
tag = 0
while point < len(self.buf):
c = self.buf[point]
if in_tag and c == EQUALS_BYTE:
tag_string = self.buf[start:point]
point += 1
tag = int(tag_string)
if tag in self.raw_data_tags and self.raw_len > 0:
if self.raw_len > len(self.buf) - point:
break
value = self.buf[point:point + self.raw_len]
self.pairs.append((tag, value))
self.buf = self.buf[point + self.raw_len + 1:]
point = 0
self.raw_len = 0
start = point
else:
in_tag = False
start = point
elif c == self.stop_char:
if start != point:
value = self.buf[start:point]
self.pairs.append((tag, value))
self.buf = self.buf[point + 1:]
else:
self.buf = self.buf[1:]
break
elif c == SOH_BYTE:
value = self.buf[start:point]
self.pairs.append((tag, value))
self.buf = self.buf[point + 1:]
if tag == self.stop_tag:
break
start = 0
point = -1
in_tag = True
if tag in self.raw_len_tags:
self.raw_len = int(value)
point += 1
# Check first pair is FIX BeginString.
while self.pairs and self.pairs[0][0] != 8:
# Discard pairs until we find the beginning of a message.
self.pairs.pop(0)
if len(self.pairs) == 0:
return None
# Look for checksum.
if self.stop_tag is not None:
if self.pairs[-1][0] != self.stop_tag:
return None
# Found checksum, so we have a complete message.
m = FixMessage()
for tag, value in self.pairs:
m.append_pair(tag, value)
self.pairs = []
return m
########################################################################
| mit | -5,796,780,153,357,733,000 | 33.909465 | 79 | 0.585878 | false |
hoangminhitvn/flask | flask/lib/python2.7/site-packages/werkzeug/_compat.py | 448 | 6184 | import sys
import operator
import functools
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
int_to_byte = chr
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
iter_bytes = lambda x: iter(x)
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls.__name__, ', '.join(
'%s=%r' % (field, self[index])
for index, field in enumerate(cls._fields)
))
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode('utf-8')
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return lambda x: x
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return str(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
def iter_bytes(b):
return map(int_to_byte, b)
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return lambda x: x
return lambda x: x.encode('latin1')
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError('Cannot mix str and bytes arguments (got %s)'
% repr(tup))
return tup
try_coerce_native = _identity
def wsgi_get_bytes(s):
return s.encode('latin1')
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.encode('latin1').decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s.decode('latin1', errors)
return s.encode(charset).decode('latin1', errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
allow_none_charset=False):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
| bsd-3-clause | 4,384,212,287,664,493,000 | 29.613861 | 83 | 0.594276 | false |
sogelink/ansible | lib/ansible/modules/network/layer3/net_vrf.py | 96 | 1854 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_vrf
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage VRFs on network devices
description:
- This module provides declarative management of VRFs
on network devices.
options:
name:
description:
- Name of the VRF.
interfaces:
description:
- List of interfaces the VRF should be configured on.
aggregate:
description: List of VRFs definitions
purge:
description:
- Purge VRFs not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the VRF configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: Create VRF named MANAGEMENT
net_vrf:
name: MANAGEMENT
- name: remove VRF named MANAGEMENT
net_vrf:
name: MANAGEMENT
state: absent
- name: Create aggregate of VRFs with purge
net_vrf:
aggregate:
- { name: test4, rd: "1:204" }
- { name: test5, rd: "1:205" }
state: present
purge: yes
- name: Delete aggregate of VRFs
net_vrf:
aggregate:
- name: test2
- name: test3
- name: test4
- name: test5
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- vrf definition MANAGEMENT
"""
| gpl-3.0 | -6,503,136,363,621,961,000 | 22.175 | 93 | 0.648328 | false |
AOSP-S4-KK/platform_external_chromium_org | chrome/common/extensions/docs/server2/api_data_source_test.py | 23 | 10290 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import unittest
from api_data_source import (_JSCModel,
_FormatValue,
_GetEventByNameFromEvents)
from branch_utility import ChannelInfo
from extensions_paths import EXTENSIONS
from file_system import FileNotFoundError
from future import Future
from object_store_creator import ObjectStoreCreator
from reference_resolver import ReferenceResolver
from server_instance import ServerInstance
from test_data.canned_data import (CANNED_API_FILE_SYSTEM_DATA, CANNED_BRANCHES)
from test_data.api_data_source.canned_trunk_fs import CANNED_TRUNK_FS_DATA
from test_file_system import TestFileSystem
from third_party.json_schema_compiler.memoize import memoize
def _MakeLink(href, text):
return '<a href="%s">%s</a>' % (href, text)
def _GetType(dict_, name):
for type_ in dict_['types']:
if type_['name'] == name:
return type_
class _FakeAvailabilityFinder(object):
def GetApiAvailability(self, version):
return ChannelInfo('stable', '396', 5)
class _FakeHostFileSystemProvider(object):
def __init__(self, file_system_data):
self._file_system_data = file_system_data
def GetTrunk(self):
return self.GetBranch('trunk')
@memoize
def GetBranch(self, branch):
return TestFileSystem(self._file_system_data[str(branch)])
class _FakeSamplesDataSource(object):
def Create(self, request):
return {}
# Sad irony :(
class _FakeAPIDataSource(object):
def __init__(self, json_data):
self._json = json_data
def Create(self, *args, **kwargs):
return self
def get(self, key, disable_refs=False):
if key not in self._json:
raise FileNotFoundError(key)
return self._json[key]
class _FakeAPIModels(object):
def __init__(self, names):
self._names = names
def GetNames(self):
return self._names
class _FakeTemplateCache(object):
def GetFromFile(self, key):
return Future(value='handlebar %s' % key)
class APIDataSourceTest(unittest.TestCase):
def setUp(self):
self._base_path = os.path.join(sys.path[0], 'test_data', 'test_json')
server_instance = ServerInstance.ForTest(
TestFileSystem(CANNED_TRUNK_FS_DATA, relative_to=EXTENSIONS))
self._json_cache = server_instance.compiled_fs_factory.ForJson(
server_instance.host_file_system_provider.GetTrunk())
self._api_models = server_instance.api_models
# Used for testGetApiAvailability() so that valid-ish data is processed.
server_instance = ServerInstance.ForTest(
file_system_provider=_FakeHostFileSystemProvider(
CANNED_API_FILE_SYSTEM_DATA))
self._avail_api_models = server_instance.api_models
self._avail_json_cache = server_instance.compiled_fs_factory.ForJson(
server_instance.host_file_system_provider.GetTrunk())
self._avail_finder = server_instance.availability_finder
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def _CreateRefResolver(self, filename):
test_data = self._LoadJSON(filename)
return ReferenceResolver.Factory(_FakeAPIDataSource(test_data),
_FakeAPIModels(test_data),
ObjectStoreCreator.ForTest()).Create()
def _LoadJSON(self, filename):
return json.loads(self._ReadLocalFile(filename))
def testCreateId(self):
dict_ = _JSCModel('tester',
self._api_models,
self._CreateRefResolver('test_file_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
None).ToDict()
self.assertEquals('type-TypeA', dict_['types'][0]['id'])
self.assertEquals('property-TypeA-b',
dict_['types'][0]['properties'][0]['id'])
self.assertEquals('method-get', dict_['functions'][0]['id'])
self.assertEquals('event-EventA', dict_['events'][0]['id'])
# TODO(kalman): re-enable this when we have a rebase option.
def DISABLED_testToDict(self):
expected_json = self._LoadJSON('expected_tester.json')
dict_ = _JSCModel('tester',
self._api_models,
False,
self._CreateRefResolver('test_file_data_source.json'),
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
None).ToDict()
self.assertEquals(expected_json, dict_)
def testFormatValue(self):
self.assertEquals('1,234,567', _FormatValue(1234567))
self.assertEquals('67', _FormatValue(67))
self.assertEquals('234,567', _FormatValue(234567))
def testFormatDescription(self):
dict_ = _JSCModel('ref_test',
self._api_models,
self._CreateRefResolver('ref_test_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
None).ToDict()
self.assertEquals(_MakeLink('ref_test.html#type-type2', 'type2'),
_GetType(dict_, 'type1')['description'])
self.assertEquals(
'A %s, or %s' % (_MakeLink('ref_test.html#type-type3', 'type3'),
_MakeLink('ref_test.html#type-type2', 'type2')),
_GetType(dict_, 'type2')['description'])
self.assertEquals(
'%s != %s' % (_MakeLink('other.html#type-type2', 'other.type2'),
_MakeLink('ref_test.html#type-type2', 'type2')),
_GetType(dict_, 'type3')['description'])
def testGetApiAvailability(self):
api_availabilities = {
'bluetooth': ChannelInfo('dev', CANNED_BRANCHES[28], 28),
'contextMenus': ChannelInfo('trunk', CANNED_BRANCHES['trunk'], 'trunk'),
'jsonStableAPI': ChannelInfo('stable', CANNED_BRANCHES[20], 20),
'idle': ChannelInfo('stable', CANNED_BRANCHES[5], 5),
'input.ime': ChannelInfo('stable', CANNED_BRANCHES[18], 18),
'tabs': ChannelInfo('stable', CANNED_BRANCHES[18], 18)
}
for api_name, availability in api_availabilities.iteritems():
model = _JSCModel(api_name,
self._avail_api_models,
None,
True,
self._avail_finder,
self._avail_json_cache,
_FakeTemplateCache(),
None)
self.assertEquals(availability, model._GetApiAvailability())
def testGetIntroList(self):
model = _JSCModel('tester',
self._api_models,
self._CreateRefResolver('test_file_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
None)
expected_list = [
{ 'title': 'Description',
'content': [
{ 'text': 'a test api' }
]
},
{ 'title': 'Availability',
'content': [
{ 'partial': 'handlebar chrome/common/extensions/docs/' +
'templates/private/intro_tables/stable_message.html',
'version': 5
}
]
},
{ 'title': 'Permissions',
'content': [
{ 'class': 'override',
'text': '"tester"'
},
{ 'text': 'is an API for testing things.' }
]
},
{ 'title': 'Manifest',
'content': [
{ 'class': 'code',
'text': '"tester": {...}'
}
]
},
{ 'title': 'Learn More',
'content': [
{ 'link': 'https://tester.test.com/welcome.html',
'text': 'Welcome!'
}
]
}
]
self.assertEquals(model._GetIntroTableList(), expected_list)
def testGetEventByNameFromEvents(self):
events = {}
# Missing 'types' completely.
self.assertRaises(AssertionError, _GetEventByNameFromEvents, events)
events['types'] = []
# No type 'Event' defined.
self.assertRaises(AssertionError, _GetEventByNameFromEvents, events)
events['types'].append({ 'name': 'Event',
'functions': []})
add_rules = { "name": "addRules" }
events['types'][0]['functions'].append(add_rules)
self.assertEqual(add_rules,
_GetEventByNameFromEvents(events)['addRules'])
events['types'][0]['functions'].append(add_rules)
# Duplicates are an error.
self.assertRaises(AssertionError, _GetEventByNameFromEvents, events)
def _FakeLoadAddRulesSchema(self):
events = self._LoadJSON('add_rules_def_test.json')
return _GetEventByNameFromEvents(events)
def testAddRules(self):
dict_ = _JSCModel('add_rules_tester',
self._api_models,
self._CreateRefResolver('test_file_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
self._FakeLoadAddRulesSchema).ToDict()
# Check that the first event has the addRulesFunction defined.
self.assertEquals('add_rules_tester', dict_['name'])
self.assertEquals('rules', dict_['events'][0]['name'])
self.assertEquals('notable_name_to_check_for',
dict_['events'][0]['byName']['addRules'][
'parameters'][0]['name'])
# Check that the second event has addListener defined.
self.assertEquals('noRules', dict_['events'][1]['name'])
self.assertEquals('add_rules_tester', dict_['name'])
self.assertEquals('noRules', dict_['events'][1]['name'])
self.assertEquals('callback',
dict_['events'][0]['byName']['addListener'][
'parameters'][0]['name'])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 2,553,276,532,594,382,000 | 33.646465 | 80 | 0.583771 | false |
jhawkesworth/ansible | lib/ansible/plugins/terminal/slxos.py | 177 | 1962 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"([\r\n]|(\x1b\[\?7h))[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
# re.compile(br"^% \w+", re.M),
re.compile(br"% ?Bad secret"),
re.compile(br"[\r\n%] Bad passwords"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Bad mask", re.I),
re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
re.compile(br"[%\S] ?Informational: ?[\s]+", re.I),
re.compile(br"syntax error: unknown argument.", re.I)
]
def on_open_shell(self):
try:
self._exec_cli_command(u'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
| gpl-3.0 | 7,101,720,302,800,315,000 | 35.333333 | 93 | 0.638124 | false |
LaoZhongGu/kbengine | kbe/res/scripts/common/Lib/ctypes/test/test_memfunctions.py | 50 | 3265 | import sys
import unittest
from ctypes import *
class MemFunctionsTest(unittest.TestCase):
## def test_overflow(self):
## # string_at and wstring_at must use the Python calling
## # convention (which acquires the GIL and checks the Python
## # error flag). Provoke an error and catch it; see also issue
## # #3554: <http://bugs.python.org/issue3554>
## self.assertRaises((OverflowError, MemoryError, SystemError),
## lambda: wstring_at(u"foo", sys.maxint - 1))
## self.assertRaises((OverflowError, MemoryError, SystemError),
## lambda: string_at("foo", sys.maxint - 1))
def test_memmove(self):
# large buffers apparently increase the chance that the memory
# is allocated in high address space.
a = create_string_buffer(1000000)
p = b"Hello, World"
result = memmove(a, p, len(p))
self.assertEqual(a.value, b"Hello, World")
self.assertEqual(string_at(result), b"Hello, World")
self.assertEqual(string_at(result, 5), b"Hello")
self.assertEqual(string_at(result, 16), b"Hello, World\0\0\0\0")
self.assertEqual(string_at(result, 0), b"")
def test_memset(self):
a = create_string_buffer(1000000)
result = memset(a, ord('x'), 16)
self.assertEqual(a.value, b"xxxxxxxxxxxxxxxx")
self.assertEqual(string_at(result), b"xxxxxxxxxxxxxxxx")
self.assertEqual(string_at(a), b"xxxxxxxxxxxxxxxx")
self.assertEqual(string_at(a, 20), b"xxxxxxxxxxxxxxxx\0\0\0\0")
def test_cast(self):
a = (c_ubyte * 32)(*map(ord, "abcdef"))
self.assertEqual(cast(a, c_char_p).value, b"abcdef")
self.assertEqual(cast(a, POINTER(c_byte))[:7],
[97, 98, 99, 100, 101, 102, 0])
self.assertEqual(cast(a, POINTER(c_byte))[:7:],
[97, 98, 99, 100, 101, 102, 0])
self.assertEqual(cast(a, POINTER(c_byte))[6:-1:-1],
[0, 102, 101, 100, 99, 98, 97])
self.assertEqual(cast(a, POINTER(c_byte))[:7:2],
[97, 99, 101, 0])
self.assertEqual(cast(a, POINTER(c_byte))[:7:7],
[97])
def test_string_at(self):
s = string_at(b"foo bar")
# XXX The following may be wrong, depending on how Python
# manages string instances
self.assertEqual(2, sys.getrefcount(s))
self.assertTrue(s, "foo bar")
self.assertEqual(string_at(b"foo bar", 7), b"foo bar")
self.assertEqual(string_at(b"foo bar", 3), b"foo")
try:
create_unicode_buffer
except NameError:
pass
else:
def test_wstring_at(self):
p = create_unicode_buffer("Hello, World")
a = create_unicode_buffer(1000000)
result = memmove(a, p, len(p) * sizeof(c_wchar))
self.assertEqual(a.value, "Hello, World")
self.assertEqual(wstring_at(a), "Hello, World")
self.assertEqual(wstring_at(a, 5), "Hello")
self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0")
self.assertEqual(wstring_at(a, 0), "")
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | 6,215,118,023,051,450,000 | 40.329114 | 72 | 0.569678 | false |
kawamon/hue | desktop/core/ext-py/eventlet-0.24.1/tests/manual/greenio_memtest.py | 10 | 1872 | import eventlet
from eventlet import greenio
import os
__test__ = False
_proc_status = '/proc/%d/status' % os.getpid()
_scale = {'kB': 1024.0, 'mB': 1024.0 * 1024.0,
'KB': 1024.0, 'MB': 1024.0 * 1024.0}
def _VmB(VmKey):
'''Private.
'''
global _proc_status, _scale
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except:
return 0.0 # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
if len(v) < 3:
return 0.0 # invalid format?
# convert Vm value to bytes
return float(v[1]) * _scale[v[2]]
def memory(since=0.0):
'''Return memory usage in bytes.
'''
return _VmB('VmSize:') - since
def resident(since=0.0):
'''Return resident memory usage in bytes.
'''
return _VmB('VmRSS:') - since
def stacksize(since=0.0):
'''Return stack size in bytes.
'''
return _VmB('VmStk:') - since
def test_pipe_writes_large_messages():
r, w = os.pipe()
r = greenio.GreenPipe(r)
w = greenio.GreenPipe(w, 'w')
large_message = b"".join([1024 * chr(i) for i in range(65)])
def writer():
w.write(large_message)
w.close()
gt = eventlet.spawn(writer)
for i in range(65):
buf = r.read(1024)
expected = 1024 * chr(i)
if buf != expected:
print(
"expected=%r..%r, found=%r..%r iter=%d"
% (expected[:4], expected[-4:], buf[:4], buf[-4:], i))
gt.wait()
if __name__ == "__main__":
_iter = 1
while True:
test_pipe_writes_large_messages()
_iter += 1
if _iter % 10 == 0:
print("_iter = %d, VmSize: %d, VmRSS = %d, VmStk = %d" %
(_iter, memory(), resident(), stacksize()))
| apache-2.0 | -7,237,980,189,321,866,000 | 21.554217 | 70 | 0.516026 | false |
unseenlaser/python-for-android | python3-alpha/python3-src/Lib/email/test/test_email_codecs.py | 50 | 3394 | # Copyright (C) 2002-2006 Python Software Foundation
# Contact: [email protected]
# email package unit tests for (optional) Asian codecs
import unittest
from test.support import run_unittest
from email.test.test_email import TestEmailBase
from email.charset import Charset
from email.header import Header, decode_header
from email.message import Message
# We're compatible with Python 2.3, but it doesn't have the built-in Asian
# codecs, so we have to skip all these tests.
try:
str(b'foo', 'euc-jp')
except LookupError:
raise unittest.SkipTest
class TestEmailAsianCodecs(TestEmailBase):
def test_japanese_codecs(self):
eq = self.ndiffAssertEqual
jcode = "euc-jp"
gcode = "iso-8859-1"
j = Charset(jcode)
g = Charset(gcode)
h = Header("Hello World!")
jhello = str(b'\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc'
b'\xa5\xeb\xa5\xc9\xa1\xaa', jcode)
ghello = str(b'Gr\xfc\xdf Gott!', gcode)
h.append(jhello, j)
h.append(ghello, g)
# BAW: This used to -- and maybe should -- fold the two iso-8859-1
# chunks into a single encoded word. However it doesn't violate the
# standard to have them as two encoded chunks and maybe it's
# reasonable <wink> for each .append() call to result in a separate
# encoded word.
eq(h.encode(), """\
Hello World! =?iso-2022-jp?b?GyRCJU8lbSE8JW8hPCVrJUkhKhsoQg==?=
=?iso-8859-1?q?Gr=FC=DF_Gott!?=""")
eq(decode_header(h.encode()),
[(b'Hello World!', None),
(b'\x1b$B%O%m!<%o!<%k%I!*\x1b(B', 'iso-2022-jp'),
(b'Gr\xfc\xdf Gott!', gcode)])
subject_bytes = (b'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5'
b'\xa4\xec\xa4\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2'
b'\xf1\xbc\xd4\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3'
b'\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9')
subject = str(subject_bytes, jcode)
h = Header(subject, j, header_name="Subject")
# test a very long header
enc = h.encode()
# TK: splitting point may differ by codec design and/or Header encoding
eq(enc , """\
=?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKGyhC?=
=?iso-2022-jp?b?GyRCMnE8VCROPjVHJyRyQlQkQyRGJCQkXiQ5GyhC?=""")
# TK: full decode comparison
eq(str(h).encode(jcode), subject_bytes)
def test_payload_encoding_utf8(self):
jhello = str(b'\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc'
b'\xa5\xeb\xa5\xc9\xa1\xaa', 'euc-jp')
msg = Message()
msg.set_payload(jhello, 'utf-8')
ustr = msg.get_payload(decode=True).decode(msg.get_content_charset())
self.assertEqual(jhello, ustr)
def test_payload_encoding(self):
jcode = 'euc-jp'
jhello = str(b'\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc'
b'\xa5\xeb\xa5\xc9\xa1\xaa', jcode)
msg = Message()
msg.set_payload(jhello, jcode)
ustr = msg.get_payload(decode=True).decode(msg.get_content_charset())
self.assertEqual(jhello, ustr)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestEmailAsianCodecs))
return suite
def test_main():
run_unittest(TestEmailAsianCodecs)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| apache-2.0 | 7,749,739,476,233,436,000 | 34.354167 | 79 | 0.625221 | false |
ryra/Personal | 3.secsif/All/cryptopals-solutions-master/set2/12/AES_128.py | 4 | 1929 | #! /usr/bin/env python
from Crypto.Cipher import AES
from binascii import a2b_base64
def pkcs_7_pad(data, final_len = None):
if final_len == None:
final_len = (len(data)/16 + 1)*16
padding_len = final_len - len(data)
return data + chr(padding_len)*padding_len
def pkcs_7_unpad(data):
padding_len = ord(data[len(data)-1])
for i in range(len(data)-padding_len,len(data)):
if ord(data[i]) != padding_len:
return data
return data[:-padding_len]
def AES_128_ECB_encrypt(data, key, pad = False):
cipher = AES.new(key, AES.MODE_ECB)
if pad:
data = pkcs_7_pad(data)
return cipher.encrypt(data)
def AES_128_ECB_decrypt(data, key, unpad = False):
cipher = AES.new(key, AES.MODE_ECB)
decr = cipher.decrypt(data)
if unpad:
decr = pkcs_7_unpad(decr)
return decr
def xor_data(A, B):
return ''.join(chr(ord(A[i])^ord(B[i])) for i in range(len(A)))
def AES_128_CBC_encrypt(data, key, iv):
data = pkcs_7_pad(data)
block_count = len(data)/16
encrypted_data = ''
prev_block = iv
for b in range(block_count):
cur_block = data[b*16:(b+1)*16]
encrypted_block = AES_128_ECB_encrypt(xor_data(cur_block, prev_block), key)
encrypted_data += encrypted_block
prev_block = encrypted_block
return encrypted_data
def AES_128_CBC_decrypt(data, key, iv):
block_count = len(data)/16
decrypted_data = ''
prev_block = iv
for b in range(block_count):
cur_block = data[b*16:(b+1)*16]
decrypted_block = AES_128_ECB_decrypt(cur_block, key)
decrypted_data += xor_data(decrypted_block, prev_block)
prev_block = cur_block
return pkcs_7_unpad(decrypted_data)
if __name__ == '__main__':
text = 'abcdefghijklmnopqrstuvwxyz!'
key = 'abcdef1234567890'
iv = '128348347dhrughdf'
if AES_128_CBC_decrypt(AES_128_CBC_encrypt(text, key, iv), key, iv) == text:
print "[+] CBC decrypt(encrypt(text))==text test passed"
else:
print "[-] CBC test failed"
| agpl-3.0 | -8,326,404,363,105,725,000 | 28.227273 | 79 | 0.657854 | false |
sathnaga/avocado-vt | selftests/unit/test_installer.py | 8 | 2175 | #!/usr/bin/python
import unittest
import os
import sys
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isdir(os.path.join(basedir, 'virttest')):
sys.path.append(basedir)
from virttest import installer
from virttest import cartesian_config
class installer_test(unittest.TestCase):
def setUp(self):
self.registry = installer.InstallerRegistry()
def test_register_get_installer(self):
install_mode = 'custom_install_mode'
virt_type = 'custom_virt_type'
class CustomVirtInstaller:
pass
self.registry.register(install_mode, CustomVirtInstaller, virt_type)
klass = self.registry.get_installer(install_mode, virt_type)
self.assertTrue(klass is CustomVirtInstaller)
def test_register_get_installer_default(self):
install_mode = 'base_install_mode'
class BaseVirtInstaller:
pass
self.registry.register(install_mode, BaseVirtInstaller)
klass = self.registry.get_installer(install_mode,
get_default_virt=True)
self.assertTrue(klass is BaseVirtInstaller)
klass = self.registry.get_installer(install_mode,
virt=None,
get_default_virt=True)
self.assertTrue(klass is BaseVirtInstaller)
def test_make_installer(self):
config = """install_mode = test_install_mode
vm_type = test"""
class Installer:
def __init__(self, mode, name, test, params):
pass
installer.INSTALLER_REGISTRY.register('test_install_mode',
Installer,
'test')
config_parser = cartesian_config.Parser()
config_parser.parse_string(config)
params = next(config_parser.get_dicts())
instance = installer.make_installer("test_install_mode_test", params)
self.assertTrue(isinstance(instance, Installer))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -8,299,031,636,746,632,000 | 30.071429 | 77 | 0.605057 | false |
FedoraScientific/salome-smesh | src/SMESH_SWIG/ex11_grid3partition.py | 1 | 3248 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D, OPEN CASCADE
#
# Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
# =======================================
#
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(salome.myStudy)
# Geometry
# ========
# grid compound of 3 x 3 elements
# an element is compound of 3 cylinders concentriques
# an element is centered in a square of the grid
# the smaller cylinder is a hole
# prism the grid, and mesh it in hexahedral way
# Values
# ------
g_x = 0
g_y = 0
g_z = 0
g_arete = 50
g_hauteur = 30
g_rayon1 = 20
g_rayon2 = 30
g_rayon3 = 40
g_grid = 3
g_trim = 1000
# Element
# -------
e_boite = geompy.MakeBox(g_x-g_arete, g_y-g_hauteur, g_z-g_arete, g_x+g_arete, g_y+g_hauteur, g_z+g_arete)
e_hauteur = 2*g_hauteur
e_centre = geompy.MakeVertex(g_x, g_y-g_hauteur, g_z)
e_dir = geompy.MakeVectorDXDYDZ(0, 1, 0)
e_cyl1 = geompy.MakeCylinder(e_centre, e_dir, g_rayon3, e_hauteur)
e_blo1 = geompy.MakeCut(e_boite, e_cyl1)
e_cyl2 = geompy.MakeCylinder(e_centre, e_dir, g_rayon2, e_hauteur)
e_blo2 = geompy.MakeCut(e_cyl1, e_cyl2)
e_cyl3 = geompy.MakeCylinder(e_centre, e_dir, g_rayon1, e_hauteur)
e_blo3 = geompy.MakeCut(e_cyl2, e_cyl3)
# Partition and repair
# --------------------
p_tools = []
p_tools.append(geompy.MakePlane(e_centre, geompy.MakeVectorDXDYDZ( 1, 0, 1), g_trim))
p_tools.append(geompy.MakePlane(e_centre, geompy.MakeVectorDXDYDZ(-1, 0, 1), g_trim))
p_part = geompy.MakePartition([e_blo1, e_blo2, e_blo3], p_tools, [], [], geompy.ShapeType["SOLID"])
p_element = geompy.RemoveExtraEdges(p_part, doUnionFaces=True)
# Grid and glue
# -------------
grid = geompy.MakeMultiTranslation2D(p_element, geompy.MakeVectorDXDYDZ(1, 0, 0), 2*g_arete, g_grid, geompy.MakeVectorDXDYDZ(0, 0, 1), 2*g_arete, g_grid)
piece = geompy.MakeGlueFaces(grid, 1e-5)
# Add in study
# ------------
piece_id = geompy.addToStudy(piece, "ex11_grid3partition")
# Meshing
# =======
# Create a hexahedral mesh
# ------------------------
hexa = smesh.Mesh(piece, "ex11_grid3partition:hexa")
algo = hexa.Segment()
algo.NumberOfSegments(3)
hexa.Quadrangle()
hexa.Hexahedron()
# Mesh calculus
# -------------
hexa.Compute()
| lgpl-2.1 | -4,072,128,741,821,675,000 | 24.777778 | 153 | 0.684421 | false |
neumerance/deploy | openstack_dashboard/dashboards/project/firewalls/views.py | 4 | 10365 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: KC Wang, Big Switch Networks
import logging
import re
from django.core.urlresolvers import reverse_lazy # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tabs
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.firewalls \
import forms as fw_forms
from openstack_dashboard.dashboards.project.firewalls \
import tabs as fw_tabs
from openstack_dashboard.dashboards.project.firewalls \
import workflows as fw_workflows
InsertRuleToPolicy = fw_forms.InsertRuleToPolicy
RemoveRuleFromPolicy = fw_forms.RemoveRuleFromPolicy
UpdateFirewall = fw_forms.UpdateFirewall
UpdatePolicy = fw_forms.UpdatePolicy
UpdateRule = fw_forms.UpdateRule
FirewallDetailsTabs = fw_tabs.FirewallDetailsTabs
FirewallTabs = fw_tabs.FirewallTabs
PolicyDetailsTabs = fw_tabs.PolicyDetailsTabs
RuleDetailsTabs = fw_tabs.RuleDetailsTabs
AddFirewall = fw_workflows.AddFirewall
AddPolicy = fw_workflows.AddPolicy
AddRule = fw_workflows.AddRule
LOG = logging.getLogger(__name__)
class IndexView(tabs.TabView):
tab_group_class = (FirewallTabs)
template_name = 'project/firewalls/details_tabs.html'
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
obj_type = re.search('.delete([a-z]+)', action).group(1)
if not obj_ids:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if obj_type == 'rule':
for obj_id in obj_ids:
try:
api.fwaas.rule_delete(request, obj_id)
messages.success(request, 'Deleted rule %s' % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete rule. %s' % e))
if obj_type == 'policy':
for obj_id in obj_ids:
try:
api.fwaas.policy_delete(request, obj_id)
messages.success(request, 'Deleted policy %s' % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete policy. %s' % e))
if obj_type == 'firewall':
for obj_id in obj_ids:
try:
api.fwaas.firewall_delete(request, obj_id)
messages.success(request, 'Deleted firewall %s' % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete firewall. %s' % e))
return self.get(request, *args, **kwargs)
class AddRuleView(workflows.WorkflowView):
workflow_class = AddRule
template_name = "project/firewalls/addrule.html"
class AddPolicyView(workflows.WorkflowView):
workflow_class = AddPolicy
template_name = "project/firewalls/addpolicy.html"
class AddFirewallView(workflows.WorkflowView):
workflow_class = AddFirewall
template_name = "project/firewalls/addfirewall.html"
class RuleDetailsView(tabs.TabView):
tab_group_class = (RuleDetailsTabs)
template_name = 'project/firewalls/details_tabs.html'
class PolicyDetailsView(tabs.TabView):
tab_group_class = (PolicyDetailsTabs)
template_name = 'project/firewalls/details_tabs.html'
class FirewallDetailsView(tabs.TabView):
tab_group_class = (FirewallDetailsTabs)
template_name = 'project/firewalls/details_tabs.html'
class UpdateRuleView(forms.ModalFormView):
form_class = UpdateRule
template_name = "project/firewalls/updaterule.html"
context_object_name = 'rule'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(UpdateRuleView, self).get_context_data(**kwargs)
context['rule_id'] = self.kwargs['rule_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
rule_id = self.kwargs['rule_id']
try:
self._object = api.fwaas.rule_get(self.request, rule_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rule details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
rule = self._get_object()
initial = rule.get_dict()
return initial
class UpdatePolicyView(forms.ModalFormView):
form_class = UpdatePolicy
template_name = "project/firewalls/updatepolicy.html"
context_object_name = 'policy'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(UpdatePolicyView, self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
policy_id = self.kwargs['policy_id']
try:
self._object = api.fwaas.policy_get(self.request, policy_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
return initial
class UpdateFirewallView(forms.ModalFormView):
form_class = UpdateFirewall
template_name = "project/firewalls/updatefirewall.html"
context_object_name = 'firewall'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(UpdateFirewallView, self).get_context_data(**kwargs)
context["firewall_id"] = self.kwargs['firewall_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
firewall_id = self.kwargs['firewall_id']
try:
self._object = api.fwaas.firewall_get(self.request,
firewall_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve firewall details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
firewall = self._get_object()
initial = firewall.get_dict()
return initial
class InsertRuleToPolicyView(forms.ModalFormView):
form_class = InsertRuleToPolicy
template_name = "project/firewalls/insert_rule_to_policy.html"
context_object_name = 'policy'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(InsertRuleToPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
policy_id = self.kwargs['policy_id']
try:
self._object = api.fwaas.policy_get(self.request, policy_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
class RemoveRuleFromPolicyView(forms.ModalFormView):
form_class = RemoveRuleFromPolicy
template_name = "project/firewalls/remove_rule_from_policy.html"
context_object_name = 'policy'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(RemoveRuleFromPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
policy_id = self.kwargs['policy_id']
try:
self._object = api.fwaas.policy_get(self.request, policy_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
| apache-2.0 | 2,162,242,117,870,831,900 | 35.368421 | 78 | 0.619103 | false |
windskyer/nova | nova/api/openstack/compute/legacy_v2/contrib/instance_usage_audit_log.py | 66 | 5751 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_config import cfg
import webob.exc
from nova.api.openstack import extensions
from nova import compute
from nova import context as nova_context
from nova.i18n import _
from nova import utils
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
authorize = extensions.extension_authorizer('compute',
'instance_usage_audit_log')
class InstanceUsageAuditLogController(object):
def __init__(self):
self.host_api = compute.HostAPI()
def index(self, req):
context = req.environ['nova.context']
authorize(context)
task_log = self._get_audit_task_logs(context)
return {'instance_usage_audit_logs': task_log}
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
if '.' in id:
before_date = datetime.datetime.strptime(str(id),
"%Y-%m-%d %H:%M:%S.%f")
else:
before_date = datetime.datetime.strptime(str(id),
"%Y-%m-%d %H:%M:%S")
except ValueError:
msg = _("Invalid timestamp for date %s") % id
raise webob.exc.HTTPBadRequest(explanation=msg)
task_log = self._get_audit_task_logs(context,
before=before_date)
return {'instance_usage_audit_log': task_log}
def _get_audit_task_logs(self, context, begin=None, end=None,
before=None):
"""Returns a full log for all instance usage audit tasks on all
computes.
:param begin: datetime beginning of audit period to get logs for,
Defaults to the beginning of the most recently completed
audit period prior to the 'before' date.
:param end: datetime ending of audit period to get logs for,
Defaults to the ending of the most recently completed
audit period prior to the 'before' date.
:param before: By default we look for the audit period most recently
completed before this datetime. Has no effect if both begin and end
are specified.
"""
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
defbegin, defend = utils.last_completed_audit_period(before=before)
if begin is None:
begin = defbegin
if end is None:
end = defend
task_logs = self.host_api.task_log_get_all(context,
"instance_usage_audit",
begin, end)
# We do this this way to include disabled compute services,
# which can have instances on them. (mdragon)
filters = {'topic': CONF.compute_topic}
services = self.host_api.service_get_all(context, filters=filters)
hosts = set(serv['host'] for serv in services)
seen_hosts = set()
done_hosts = set()
running_hosts = set()
total_errors = 0
total_items = 0
for tlog in task_logs:
seen_hosts.add(tlog['host'])
if tlog['state'] == "DONE":
done_hosts.add(tlog['host'])
if tlog['state'] == "RUNNING":
running_hosts.add(tlog['host'])
total_errors += tlog['errors']
total_items += tlog['task_items']
log = {tl['host']: dict(state=tl['state'],
instances=tl['task_items'],
errors=tl['errors'],
message=tl['message'])
for tl in task_logs}
missing_hosts = hosts - seen_hosts
overall_status = "%s hosts done. %s errors." % (
'ALL' if len(done_hosts) == len(hosts)
else "%s of %s" % (len(done_hosts), len(hosts)),
total_errors)
return dict(period_beginning=str(begin),
period_ending=str(end),
num_hosts=len(hosts),
num_hosts_done=len(done_hosts),
num_hosts_running=len(running_hosts),
num_hosts_not_run=len(missing_hosts),
hosts_not_run=list(missing_hosts),
total_instances=total_items,
total_errors=total_errors,
overall_status=overall_status,
log=log)
class Instance_usage_audit_log(extensions.ExtensionDescriptor):
"""Admin-only Task Log Monitoring."""
name = "OSInstanceUsageAuditLog"
alias = "os-instance_usage_audit_log"
namespace = "http://docs.openstack.org/ext/services/api/v1.1"
updated = "2012-07-06T01:00:00Z"
def get_resources(self):
ext = extensions.ResourceExtension('os-instance_usage_audit_log',
InstanceUsageAuditLogController())
return [ext]
| gpl-2.0 | -2,199,623,927,029,462,800 | 40.078571 | 79 | 0.565989 | false |
zamattiac/osf.io | framework/flask/__init__.py | 22 | 2444 | # -*- coding: utf-8 -*-
import os
from flask import (Flask, request, jsonify, render_template, # noqa
render_template_string, Blueprint, send_file, abort, make_response,
redirect as flask_redirect, url_for, send_from_directory, current_app
)
import furl
from website import settings
# Create app
app = Flask(
__name__,
static_folder=settings.STATIC_FOLDER,
static_url_path=settings.STATIC_URL_PATH,
)
# Pull debug mode from settings
app.debug = settings.DEBUG_MODE
app.config['SENTRY_TAGS'] = {'App': 'web'}
app.config['SENTRY_RELEASE'] = settings.VERSION
# Set up static routing for addons
# TODO: Handle this in nginx
addon_base_path = os.path.abspath('website/addons')
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
def add_handler(app, handler_name, func, key=None):
"""Add handler to Flask application if handler has not already been added.
Used to avoid attaching the same handlers more than once, e.g. when setting
up multiple applications during testing.
:param app: Flask app
:param handler_name: Name of handler type, e.g. 'before_request'
:param func: Handler function to attach
:param key: Blueprint name
"""
handler_adder = getattr(app, handler_name)
handler_funcs_name = '{0}_funcs'.format(handler_name)
handler_funcs = getattr(app, handler_funcs_name)
if func not in handler_funcs.get(key, []):
handler_adder(func)
def add_handlers(app, handlers, key=None):
"""Add multiple handlers to application.
:param app: Flask application
:param handlers: Mapping from handler names to handler functions
"""
for handler_name, func in handlers.iteritems():
add_handler(app, handler_name, func, key=key)
def redirect(location, code=302):
"""Redirect the client to a desired location. Behaves the same
as Flask's :func:`flask.redirect` function with an awareness of
OSF view-only links.
IMPORTANT: This function should always be used instead of
flask.redirect to ensure the correct behavior of view-only
links.
"""
view_only = request.args.get('view_only', '')
if view_only:
url = furl.furl(location)
url.args['view_only'] = view_only
location = url.url
return flask_redirect(location, code=code)
| apache-2.0 | -2,965,007,858,645,296,000 | 30.333333 | 79 | 0.695172 | false |
potsmaster/cinder | cinder/tests/unit/keymgr/test_key.py | 18 | 1908 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test cases for the key classes.
"""
import array
from cinder.keymgr import key
from cinder import test
class KeyTestCase(test.TestCase):
def _create_key(self):
raise NotImplementedError()
def setUp(self):
super(KeyTestCase, self).setUp()
self.key = self._create_key()
class SymmetricKeyTestCase(KeyTestCase):
def _create_key(self):
return key.SymmetricKey(self.algorithm, self.encoded)
def setUp(self):
self.algorithm = 'AES'
self.encoded = array.array('B', ('0' * 64).decode('hex')).tolist()
super(SymmetricKeyTestCase, self).setUp()
def test_get_algorithm(self):
self.assertEqual(self.algorithm, self.key.get_algorithm())
def test_get_format(self):
self.assertEqual('RAW', self.key.get_format())
def test_get_encoded(self):
self.assertEqual(self.encoded, self.key.get_encoded())
def test___eq__(self):
self.assertTrue(self.key == self.key)
self.assertFalse(self.key is None)
self.assertFalse(None == self.key)
def test___ne__(self):
self.assertFalse(self.key != self.key)
self.assertTrue(self.key is not None)
self.assertTrue(None != self.key)
| apache-2.0 | 6,872,491,349,620,654,000 | 27.477612 | 78 | 0.671384 | false |
arpan-chavda/rh_app | libs/venus/planet/vendor/html5lib/sanitizer.py | 3 | 14430 | import re
from xml.sax.saxutils import escape, unescape
from tokenizer import HTMLTokenizer
from constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figure', 'footer', 'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'keygen', 'kbd',
'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', 'multicol',
'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', 'p', 'pre',
'progress', 'q', 's', 'samp', 'section', 'select', 'small', 'sound',
'source', 'spacer', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead', 'tr', 'tt',
'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'prompt', 'pqg',
'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = [ 'ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs' ]
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in tokenTypes.keys():
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
if token.has_key("data"):
attrs = dict([(name,val) for name,val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if not attrs.has_key(attr):
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
#remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace(u"\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:",val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if attrs.has_key('style'):
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name,val] for name,val in attrs.items()]
return token
else:
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k,escape(v)) for k,v in token["data"]])
token["data"] = "<%s%s>" % (token["name"],attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"]=token["data"][:-1] + "/>"
if token["type"] in tokenTypes.keys():
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def sanitize_css(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$",keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False):
#Change case matching defaults as we only output lowercase html anyway
#This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
| gpl-3.0 | -4,867,528,023,247,370,000 | 54.930233 | 139 | 0.530631 | false |
aeroevan/snakebite | test/commandlineparser_test.py | 2 | 41119 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import unittest2
import os
import pwd
import json
import sys
import traceback
from mock import MagicMock, patch, mock_open
from snakebite.config import HDFSConfig
from snakebite.commandlineparser import Commands, CommandLineParser
from snakebite.namenode import Namenode
from config_test import ConfigTest
class CommandLineParserTest(unittest2.TestCase):
def setUp(self):
self.parser = CommandLineParser()
self.default_dir = os.path.join("/user", pwd.getpwuid(os.getuid())[0])
def test_general_options(self):
parser = self.parser
output = parser.parse('ls some_folder'.split())
self.assertFalse(output.debug)
self.assertFalse(output.human)
self.assertFalse(output.json)
self.assertEqual(output.namenode, None)
self.assertEqual(output.port, None)
#each option
output = parser.parse('-D ls some_folder'.split())
self.assertTrue(output.debug)
output = parser.parse('--debug ls some_folder'.split())
self.assertTrue(output.debug)
output = parser.parse('-j ls some_folder'.split())
self.assertTrue(output.json)
output = parser.parse('--json ls some_folder'.split())
self.assertTrue(output.json)
output = parser.parse('-n namenode_fqdn ls some_folder'.split()) # what are typical values for namenodes?
self.assertEqual(output.namenode, "namenode_fqdn")
output = parser.parse('--namenode namenode_fqdn ls some_folder'.split())
self.assertEqual(output.namenode, "namenode_fqdn")
output = parser.parse('-p 1234 ls some_folder'.split())
self.assertEqual(output.port, 1234)
output = parser.parse('--port 1234 ls some_folder'.split())
self.assertEqual(output.port, 1234)
output = parser.parse('-V 4 ls some_folder'.split())
self.assertEqual(output.version, 4)
output = parser.parse('--version 4 ls some_folder'.split())
self.assertEqual(output.version, 4)
#all options
output = parser.parse('-D -j -n namenode_fqdn -p 1234 -V 4 ls some_folder'.split())
self.assertTrue(output.debug)
self.assertTrue(output.json)
self.assertEqual(output.namenode, "namenode_fqdn")
self.assertEqual(output.port, 1234)
self.assertEqual(output.version, 4)
#options in illegal position
with self.assertRaises(SystemExit):
parser.parse('ls -D some_folder'.split())
with self.assertRaises(SystemExit):
parser.parse('ls some_folder -D'.split())
def test_ls(self):
parser = self.parser
#no dir
output = parser.parse('ls'.split())
self.assertEqual(output.command, 'ls')
self.assertEqual(output.dir, [self.default_dir])
#one dir
output = parser.parse('ls some_dir'.split())
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('ls dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#specific commands
output = parser.parse('ls -d -R -s -h some_dir'.split())
self.assertTrue(output.directory)
self.assertTrue(output.recurse)
self.assertTrue(output.summary)
self.assertTrue(output.human)
self.assertEqual(output.dir, ['some_dir'])
#multiple slashes
output = parser.parse('ls ///dir1 //dir2 /dir3'.split())
self.assertEqual(output.dir, ['///dir1', '//dir2', '/dir3'])
def test_mkdir(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('mkdir'.split())
#one dir
output = parser.parse('mkdir some_dir'.split())
self.assertEqual(output.command, 'mkdir')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('mkdir dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_mkdirp(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('mkdirp'.split())
#one dir
output = parser.parse('mkdirp some_dir'.split())
self.assertEqual(output.command, 'mkdirp')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('mkdirp dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_chown(self):
parser = self.parser
#no dir and/or no owner
with self.assertRaises(SystemExit):
parser.parse('chown'.split())
with self.assertRaises(SystemExit):
parser.parse('chown owner_or_dir'.split())
#one dir
output = parser.parse('chown root some_dir'.split())
self.assertEqual(output.command, 'chown')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_arg, 'root')
#multiple dirs
output = parser.parse('chown root dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_arg, 'root')
#recursive
output = parser.parse('chown -R root some_dir'.split())
self.assertTrue(output.recurse)
def test_chmod(self):
parser = self.parser
#no dir and/or no mode
with self.assertRaises(SystemExit):
parser.parse('chmod'.split())
with self.assertRaises(SystemExit):
parser.parse('chmod mode_or_dir'.split())
#one dir
output = parser.parse('chmod 664 some_dir'.split())
self.assertEqual(output.command, 'chmod')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_int_arg, 664)
#wrong type for mode argument
with self.assertRaises(SystemExit):
parser.parse('chmod not_an_int some_dir'.split())
#multiple dirs
output = parser.parse('chmod 664 dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_int_arg, 664)
#recursive
output = parser.parse('chmod -R 664 some_dir'.split())
self.assertTrue(output.recurse)
def test_chgrp(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('chgrp'.split())
with self.assertRaises(SystemExit):
parser.parse('chgrp group_or_dir'.split())
#one dir
output = parser.parse('chgrp group some_dir'.split())
self.assertEqual(output.command, 'chgrp')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_arg, 'group')
#multiple dirs
output = parser.parse('chgrp group dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_arg, 'group')
#recursive
output = parser.parse('chgrp -R group some_dir'.split())
self.assertTrue(output.recurse)
def test_count(self):
parser = self.parser
#no dir
output = parser.parse('count'.split())
self.assertEqual(output.command, 'count')
self.assertEqual(output.dir, [self.default_dir])
#one dir
output = parser.parse('count some_dir'.split())
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('count dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
# Human output
output = parser.parse('count -h dir1 dir2 dir3'.split())
self.assertTrue(output.human)
def test_df(self):
parser = self.parser
#no dir
output = parser.parse('df'.split())
self.assertEqual(output.command, 'df')
# Human output
output = parser.parse('df -h'.split())
self.assertEqual(output.command, 'df')
self.assertTrue(output.human)
with self.assertRaises(SystemExit):
parser.parse('df some_additional_argument'.split())
def test_du(self):
parser = self.parser
#no dir
output = parser.parse('du'.split())
self.assertEqual(output.command, 'du')
self.assertEqual(output.dir, [self.default_dir])
#one dir
output = parser.parse('du some_dir'.split())
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('du dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#summary
output = parser.parse('du -s some_dir'.split())
self.assertTrue(output.summary)
#human
output = parser.parse('du -h some_dir'.split())
self.assertTrue(output.human)
def test_mv(self):
parser = self.parser
#no source and/or no destination
with self.assertRaises(SystemExit):
parser.parse('mv'.split())
with self.assertRaises(SystemExit):
parser.parse('mv src_or_dest'.split())
#one source
output = parser.parse('mv source some_dest'.split())
self.assertEqual(output.command, 'mv')
self.assertEqual(output.dir, ['source'])
self.assertEqual(output.single_arg, 'some_dest')
#multiple sources
output = parser.parse('mv source1 source2 source3 some_dest'.split())
self.assertEqual(output.dir, ['source1', 'source2', 'source3'])
self.assertEqual(output.single_arg, 'some_dest')
def test_rm(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('rm'.split())
#one dir
output = parser.parse('rm some_dir'.split())
self.assertEqual(output.command, 'rm')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('rm dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#recursive
output = parser.parse('rm -R some_dir'.split())
self.assertTrue(output.recurse)
#skiptrash
output = parser.parse('rm -S some_dir'.split())
self.assertTrue(output.skiptrash)
#skiptrash
output = parser.parse('rm --skiptrash some_dir'.split())
self.assertTrue(output.skiptrash)
#usetrash
output = parser.parse('rm -T some_dir'.split())
self.assertTrue(output.usetrash)
#usetrash
output =parser.parse('rm --usetrash some_dir'.split())
self.assertTrue(output.usetrash)
#usetrash & skiptrash
output = parser.parse('rm --usetrash --skiptrash some_dir'.split())
self.assertTrue(output.usetrash)
self.assertTrue(output.skiptrash)
def test_touchz(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('touchz'.split())
#one dir
output = parser.parse('touchz some_dir'.split())
self.assertEqual(output.command, 'touchz')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('touchz dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_serverdefaults(self):
parser = self.parser
#no arg
output = parser.parse('serverdefaults'.split())
self.assertEqual(output.command, 'serverdefaults')
#too many args
with self.assertRaises(SystemExit):
parser.parse('serverdefaults some_additional_argument'.split())
def test_rmdir(self):
parser = self.parser
#no dir and/or no group
with self.assertRaises(SystemExit):
parser.parse('rmdir'.split())
#one dir
output = parser.parse('rmdir some_dir'.split())
self.assertEqual(output.command, 'rmdir')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('rmdir dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_setrep(self):
parser = self.parser
#no dir and/or no replication factor
with self.assertRaises(SystemExit):
parser.parse('setrep'.split())
with self.assertRaises(SystemExit):
parser.parse('setrep some_dir'.split())
with self.assertRaises(SystemExit):
parser.parse('setrep 3'.split())
#one dir
output = parser.parse('setrep 3 some_dir'.split())
self.assertEqual(output.command, 'setrep')
self.assertEqual(output.dir, ['some_dir'])
self.assertEqual(output.single_int_arg, 3)
#wrong type for mode argument
with self.assertRaises(SystemExit):
parser.parse('setrep not_an_int some_dir'.split())
#multiple dirs
output = parser.parse('setrep 3 dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
self.assertEqual(output.single_int_arg, 3)
#recursive
output = parser.parse('setrep -R 3 some_dir'.split())
self.assertTrue(output.recurse)
def test_usage(self):
parser = self.parser
#no command
output = parser.parse('usage'.split())
self.assertEqual(output.command, 'usage')
#one dir
output = parser.parse('usage some_cmd'.split())
self.assertEqual(output.command, 'usage')
self.assertEqual(output.arg, ['some_cmd'])
#multiple dirs
output = parser.parse('usage cmd1 cmd2 cmd3'.split())
self.assertEqual(output.arg, ['cmd1', 'cmd2', 'cmd3'])
def test_stat(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('stat'.split())
#one dir
output = parser.parse('stat some_dir'.split())
self.assertEqual(output.command, 'stat')
self.assertEqual(output.dir, ['some_dir'])
#multiple dirs
output = parser.parse('stat dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
def test_test(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('test'.split())
#one dir
output = parser.parse('test some_dir'.split())
self.assertEqual(output.command, 'test')
self.assertEqual(output.single_arg, 'some_dir')
#multiple dirs
with self.assertRaises(SystemExit):
parser.parse('test dir1 dir2 dir3'.split())
#specific commands
output = parser.parse('test -d -z -e some_dir'.split())
self.assertTrue(output.directory)
self.assertTrue(output.zero)
self.assertTrue(output.exists)
self.assertEqual(output.single_arg, 'some_dir')
def test_cat(self):
parser = self.parser
#no path
with self.assertRaises(SystemExit):
parser.parse('cat'.split())
#one path
output = parser.parse('cat some_file'.split())
self.assertEqual(output.command, 'cat')
self.assertEqual(output.dir, ['some_file'])
#multiple paths
output = parser.parse('cat dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#specific commands
output = parser.parse('cat -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
def test_copyFromLocal(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('copyFromLocal'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('copyFromLocal some_dir'.split())
#two dirs
output = parser.parse('copyFromLocal dir1 dir2'.split())
self.assertEqual(output.dir, ['dir1'])
self.assertEqual(output.single_arg, 'dir2')
def test_copyToLocal(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('copyToLocal'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('copyToLocal some_dir'.split())
#two dirs
output = parser.parse('copyToLocal dir1 dir2'.split())
self.assertEqual(output.dir, ['dir1'])
self.assertEqual(output.single_arg, 'dir2')
self.assertEqual(output.checkcrc, False)
#specific commands
output = parser.parse('copyToLocal -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
def test_cp(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('cp'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('cp some_dir'.split())
#multiple dirs
output = parser.parse('cp dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2'])
self.assertEqual(output.single_arg, 'dir3')
def test_get(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('get'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('get some_dir'.split())
#multiple dirs
output = parser.parse('get dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2'])
self.assertEqual(output.single_arg, 'dir3')
#specific commands
output = parser.parse('get -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
def test_getmerge(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('getmerge'.split())
#one dir
with self.assertRaises(SystemExit):
parser.parse('getmerge some_dir'.split())
#two dirs
output = parser.parse('getmerge dir1 dir2'.split())
self.assertEqual(output.src_dst[0], 'dir1')
self.assertEqual(output.src_dst[1], 'dir2')
#multiple dirs
with self.assertRaises(SystemExit):
parser.parse('getmerge dir1 dir2 dir3'.split())
# def test_put(self):
# parser = self.parser
# #no dir
# with self.assertRaises(SystemExit):
# parser.parse('put'.split())
# #one dir
# with self.assertRaises(SystemExit):
# parser.parse('put some_dir'.split())
# #multiple dirs
# output = parser.parse('put dir1 dir2 dir3'.split())
# self.assertEqual(output.dir, ['dir1', 'dir2'])
# self.assertEqual(output.single_arg, 'dir3')
def test_tail(self):
parser = self.parser
#no dir
with self.assertRaises(SystemExit):
parser.parse('tail'.split())
#one dir
output = parser.parse('tail some_dir'.split())
self.assertEqual(output.single_arg, 'some_dir')
#multiple dirs
with self.assertRaises(SystemExit):
parser.parse('tail dir1 dir2'.split())
#specific commands
output = parser.parse('tail -f some_dir'.split())
self.assertTrue(output.append)
def test_text(self):
parser = self.parser
#no path
with self.assertRaises(SystemExit):
parser.parse('text'.split())
#one path
output = parser.parse('text some_file'.split())
self.assertEqual(output.command, 'text')
self.assertEqual(output.dir, ['some_file'])
#multiple paths
output = parser.parse('text dir1 dir2 dir3'.split())
self.assertEqual(output.dir, ['dir1', 'dir2', 'dir3'])
#specific commands
output = parser.parse('text -checkcrc dir1 dir2'.split())
self.assertEqual(output.checkcrc, True)
class MockParseArgs(object):
# dir is a list of directories
def __init__(self, dir=[],
single_arg=None,
command=None,
namenode=None,
port=None,
usetrash=False,
skiptrash=False):
self.dir = dir
self.single_arg = single_arg
self.command = command
self.namenode = namenode
self.port = port
self.usetrash = usetrash
self.skiptrash = skiptrash
def __contains__(self, b):
return b in self.__dict__
class CommandLineParserInternalConfigTest(unittest2.TestCase):
def setUp(self):
self.parser = CommandLineParser()
self.default_dir = os.path.join("/user", pwd.getpwuid(os.getuid())[0])
def assert_namenode_spec(self, host, port, version=None):
self.assertEqual(self.parser.args.namenode, host)
self.assertEqual(self.parser.args.port, port)
if version:
self.assertEqual(self.parser.args.version, version)
def assert_namenodes_spec(self, host, port, version=None):
for namenode in self.parser.namenodes:
try:
self.assertEqual(namenode.host, host)
self.assertEqual(namenode.port, port)
if version:
self.assertEqual(namenode.version, version)
except AssertionError:
continue
# There was no AssertError -> we found our NN
return
self.fail("NN not found in namenodes")
def test_cl_config_conflicted(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar2:50070/user/rav"])
with self.assertRaises(SystemExit):
self.parser.read_config()
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50071/user/rav",
"hdfs://foobar:50070/user/rav"])
with self.assertRaises(SystemExit):
self.parser.read_config()
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50072/user/rav",
"hdfs://foobar2:50070/user/rav"])
with self.assertRaises(SystemExit):
self.parser.read_config()
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav"],
single_arg="hdfs://foobar2:50070/user/rav",
command="mv")
with self.assertRaises(SystemExit):
self.parser.read_config()
def test_cl_config_simple(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav2"])
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav2"],
single_arg="hdfs://foobar:50070/user/rav",
command="mv")
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
def test_cl_config_slash_madness_check_scheme(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070///user//rav",
"hdfs://foobar:50070/user/////rav2"])
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user////rav",
"hdfs://foobar:50070////user/rav2"],
single_arg="hdfs://foobar:50070/////user/rav",
command="mv")
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
def test_cl_config_slash_madness_full_check(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar/user////rav",
"hdfs://foobar////user/rav2"],
single_arg="hdfs://foobar/////user/rav",
command="mv")
self.parser.init()
self.assert_namenode_spec("foobar", Namenode.DEFAULT_PORT)
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT)
self.assertIn("/user////rav", self.parser.args.dir)
self.assertIn("////user/rav2", self.parser.args.dir)
self.assertEqual(self.parser.args.single_arg, "/////user/rav")
def test_cl_config_reduce_paths(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav",
"hdfs://foobar:50070/user/rav2"],
single_arg="hdfs://foobar:50070/user/rav3",
command="mv")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertIn("/user/rav", self.parser.args.dir)
self.assertIn("/user/rav2", self.parser.args.dir)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
def test_cl_config_test_single_arg_hdfs_paths(self):
self.parser.args = MockParseArgs(single_arg="hdfs://foobar:50070/user/rav3",
command="test")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
def test_cl_config_tail_single_arg_hdfs_paths(self):
self.parser.args = MockParseArgs(single_arg="hdfs://foobar:50070/user/rav3",
command="tail")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
def test_cl_config_mv_single_arg_hdfs_paths(self):
self.parser.args = MockParseArgs(single_arg="hdfs://foobar:50070/user/rav3",
command="mv")
self.parser.init()
self.assert_namenode_spec("foobar", 50070)
self.assertEqual(self.parser.args.single_arg, "/user/rav3")
import snakebite.config
@patch.object(snakebite.config.HDFSConfig, 'get_external_config')
@patch("snakebite.commandlineparser.CommandLineParser._read_config_snakebiterc", return_value=None)
def test_config_no_config(self, config_mock, read_config_mock):
hadoop_home = None
config_mock.return_value = []
if os.environ.get("HADOOP_HOME"):
hadoop_home = os.environ["HADOOP_HOME"]
del os.environ["HADOOP_HOME"]
self.parser.args = MockParseArgs()
with self.assertRaises(SystemExit):
self.parser.read_config()
if hadoop_home:
os.environ["HADOOP_HOME"] = hadoop_home
self.assert_namenode_spec(None, None)
valid_snake_one_rc = {"namenode": "foobar", "version": 9, "port": 54310}
valid_snake_ha_rc = [{"namenode": "foobar", "version": 9, "port": 54310},
{"namenode": "foobar2", "version": 9, "port": 54310}]
invalid_snake_rc = "hdfs://foobar:54310"
@patch("os.path.exists")
def test_read_config_snakebiterc_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", 54310, 9)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
@patch("os.path.exists")
def test_read_config_snakebiterc_ha_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_ha_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", 54310, 9)
self.assert_namenodes_spec("foobar2", 54310, 9)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
@patch("os.path.exists")
def test_read_config_snakebiterc_invalid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.invalid_snake_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
with self.assertRaises(SystemExit):
self.parser.read_config()
valid_snake_noport_one_rc = {"namenode": "foobar", "version": 11}
valid_snake_noport_ha_rc = [{"namenode": "foobar", "version": 100},
{"namenode": "foobar2", "version": 100}]
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, 11)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_ha_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_ha_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, 100)
self.assert_namenodes_spec("foobar2", Namenode.DEFAULT_PORT, 100)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
valid_snake_noport_nov_one_rc = {"namenode": "foobar"}
valid_snake_noport_nov_ha_rc = [{"namenode": "foobar"},
{"namenode": "foobar2"}]
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_nov_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_nov_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, Namenode.DEFAULT_VERSION)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_nov_ha_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_nov_ha_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, Namenode.DEFAULT_VERSION)
self.assert_namenodes_spec("foobar2", Namenode.DEFAULT_PORT, Namenode.DEFAULT_VERSION)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
valid_snake_noport_mix_rc = [{"namenode": "foobar", "version": 100},
{"namenode": "foobar2", "port": 66}]
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_mix_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_mix_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assert_namenodes_spec("foobar", Namenode.DEFAULT_PORT, 100)
self.assert_namenodes_spec("foobar2", 66, Namenode.DEFAULT_VERSION)
self.assertEquals(self.parser.args.usetrash, HDFSConfig.use_trash)
valid_snake_one_rc_v2 = {
"config_version": 2,
"use_trash": False,
"namenodes": [
{"host": "foobar3", "version": 9, "port": 54310}
]
}
valid_snake_ha_rc_v2 = {
"config_version": 2,
"use_trash": True,
"namenodes": [
{"host": "foobar4", "version": 9, "port": 54310},
{"host": "foobar5", "version": 9, "port": 54310}
]
}
invalid_snake_rc_v2 = "hdfs://foobar:54310"
@patch("os.path.exists")
def test_read_config_snakebiterc_one_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_one_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertFalse(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar3", 54310, 9)
@patch("os.path.exists")
def test_read_config_snakebiterc_ha_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_ha_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertTrue(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar4", 54310, 9)
self.assert_namenodes_spec("foobar5", 54310, 9)
@patch("os.path.exists")
def test_read_config_snakebiterc_invalid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.invalid_snake_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
with self.assertRaises(SystemExit):
self.parser.read_config()
valid_snake_noport_one_rc_v2 = {
"config_version": 2,
"use_trash": False,
"namenodes": [
{"host": "foobar3", "version": 9}
]
}
valid_snake_mix_ha_rc_v2 = {
"config_version": 2,
"use_trash": True,
"namenodes": [
{"host": "foobar4", "version": 100},
{"host": "foobar5", "port": 54310}
]
}
@patch("os.path.exists")
def test_read_config_snakebiterc_noport_one_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_noport_one_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertFalse(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar3", Namenode.DEFAULT_PORT, 9)
@patch("os.path.exists")
def test_read_config_snakebiterc_mix_ha_valid_v2(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_mix_ha_rc_v2))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs()
self.parser.read_config()
self.assertTrue(self.parser.args.usetrash)
self.assert_namenodes_spec("foobar4", Namenode.DEFAULT_PORT, 100)
self.assert_namenodes_spec("foobar5", 54310, Namenode.DEFAULT_VERSION)
def test_cl_default_port(self):
self.parser.args = MockParseArgs(dir=["hdfs://foobar/user/rav"],
single_arg="hdfs://foobar/user/rav",
command="mv")
self.parser.read_config()
self.assert_namenode_spec("foobar", Namenode.DEFAULT_PORT)
def test_cl_trash_setting_preserved_after_cl_config(self):
# no snakebiterc
# read config from CL
self.parser.args = MockParseArgs(dir=["hdfs://foobar:50070/user/rav"],
skiptrash=True,
command="rm")
self.parser.read_config()
self.assert_namenode_spec("foobar", 50070)
self.assert_namenodes_spec("foobar", 50070)
self.assertEquals(self.parser.args.skiptrash, True)
def _revert_hdfs_try_paths(self):
# Make sure HDFSConfig is in vanilla state
HDFSConfig.use_trash = False
HDFSConfig.hdfs_try_paths = ConfigTest.original_hdfs_try_path
HDFSConfig.core_try_paths = ConfigTest.original_core_try_path
@patch("os.path.exists")
def test_cl_trash_setting_preserved_after_snakebiterc_one_valid(self, exists_mock):
m = mock_open(read_data=json.dumps(self.valid_snake_one_rc))
with patch("snakebite.commandlineparser.open", m, create=True):
self.parser.args = MockParseArgs(usetrash=True)
self.parser.read_config()
self.assert_namenodes_spec("foobar", 54310, 9)
self.assertTrue(self.parser.args.usetrash)
@patch('os.environ.get')
def test_cl_usetrash_setting_preserved_after_external_nontrash_config(self, environ_get):
environ_get.return_value = False
# no snakebiterc
# read external config (hdfs-site, core-site)
self.parser.args = MockParseArgs(dir=["/user/rav/test"],
usetrash=True,
command="rm")
try:
HDFSConfig.core_try_paths = (ConfigTest.get_config_path('ha-core-site.xml'),)
HDFSConfig.hdfs_try_paths = (ConfigTest.get_config_path('ha-noport-hdfs-site.xml'),)
self.parser.init()
self.assertTrue(self.parser.args.usetrash)
self.assertTrue(self.parser.client.use_trash)
finally:
self._revert_hdfs_try_paths()
@patch('os.environ.get')
def test_cl_skiptrash_setting_preserved_after_external_nontrash_config(self, environ_get):
environ_get.return_value = False
# no snakebiterc
# read external config (hdfs-site, core-site)
self.parser.args = MockParseArgs(dir=["/user/rav/test"],
skiptrash=True,
usetrash=True,
command="rm")
try:
HDFSConfig.core_try_paths = (ConfigTest.get_config_path('ha-core-site.xml'),)
HDFSConfig.hdfs_try_paths = (ConfigTest.get_config_path('ha-noport-hdfs-site.xml'),)
self.parser.init()
self.assertTrue(self.parser.args.skiptrash)
self.assertTrue(self.parser.args.usetrash)
self.assertFalse(self.parser.client.use_trash)
finally:
self._revert_hdfs_try_paths()
class CommandLineParserExecuteTest(unittest2.TestCase):
def test_execute_does_not_swallow_tracebacks(self):
with patch.dict(Commands.methods, clear=True):
@CommandLineParser.command.im_func()
def boom(*args, **kwargs):
def subboom():
raise IndexError("Boom!")
subboom()
parser = CommandLineParser()
parser.parse(["boom"])
try:
parser.execute()
except IndexError:
_, _, exc_traceback = sys.exc_info()
self.assertIn(
"subboom()\n",
traceback.format_exc(),
msg="Lost some stack frames when re-raising!",
)
else:
self.fail("execute() should have raised an IndexError!")
| apache-2.0 | 6,454,102,346,488,941,000 | 36.551598 | 114 | 0.582821 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.