repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
stuckj/dupeguru | core_pe/tests/cache_test.py | 1 | 4134 | # Created By: Virgil Dupras
# Created On: 2006/09/14
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import logging
from pytest import raises, skip
from hscommon.testutil import eq_
try:
from ..cache import Cache, colors_to_string, string_to_colors
except ImportError:
skip("Can't import the cache module, probably hasn't been compiled.")
class TestCasecolors_to_string:
def test_no_color(self):
eq_('',colors_to_string([]))
def test_single_color(self):
eq_('000000',colors_to_string([(0,0,0)]))
eq_('010101',colors_to_string([(1,1,1)]))
eq_('0a141e',colors_to_string([(10,20,30)]))
def test_two_colors(self):
eq_('000102030405',colors_to_string([(0,1,2),(3,4,5)]))
class TestCasestring_to_colors:
def test_empty(self):
eq_([],string_to_colors(''))
def test_single_color(self):
eq_([(0,0,0)],string_to_colors('000000'))
eq_([(2,3,4)],string_to_colors('020304'))
eq_([(10,20,30)],string_to_colors('0a141e'))
def test_two_colors(self):
eq_([(10,20,30),(40,50,60)],string_to_colors('0a141e28323c'))
def test_incomplete_color(self):
# don't return anything if it's not a complete color
eq_([],string_to_colors('102'))
class TestCaseCache:
def test_empty(self):
c = Cache()
eq_(0,len(c))
with raises(KeyError):
c['foo']
def test_set_then_retrieve_blocks(self):
c = Cache()
b = [(0,0,0),(1,2,3)]
c['foo'] = b
eq_(b,c['foo'])
def test_delitem(self):
c = Cache()
c['foo'] = ''
del c['foo']
assert 'foo' not in c
with raises(KeyError):
del c['foo']
def test_persistance(self, tmpdir):
DBNAME = tmpdir.join('hstest.db')
c = Cache(str(DBNAME))
c['foo'] = [(1,2,3)]
del c
c = Cache(str(DBNAME))
eq_([(1,2,3)],c['foo'])
def test_filter(self):
c = Cache()
c['foo'] = ''
c['bar'] = ''
c['baz'] = ''
c.filter(lambda p:p != 'bar') #only 'bar' is removed
eq_(2,len(c))
assert 'foo' in c
assert 'baz' in c
assert 'bar' not in c
def test_clear(self):
c = Cache()
c['foo'] = ''
c['bar'] = ''
c['baz'] = ''
c.clear()
eq_(0,len(c))
assert 'foo' not in c
assert 'baz' not in c
assert 'bar' not in c
def test_corrupted_db(self, tmpdir, monkeypatch):
# If we don't do this monkeypatching, we get a weird exception about trying to flush a
# closed file. I've tried setting logging level and stuff, but nothing worked. So, there we
# go, a dirty monkeypatch.
monkeypatch.setattr(logging, 'warning', lambda *args, **kw: None)
dbname = str(tmpdir.join('foo.db'))
fp = open(dbname, 'w')
fp.write('invalid sqlite content')
fp.close()
c = Cache(dbname) # should not raise a DatabaseError
c['foo'] = [(1, 2, 3)]
del c
c = Cache(dbname)
eq_(c['foo'], [(1, 2, 3)])
def test_by_id(self):
# it's possible to use the cache by referring to the files by their row_id
c = Cache()
b = [(0,0,0),(1,2,3)]
c['foo'] = b
foo_id = c.get_id('foo')
eq_(c[foo_id], b)
class TestCaseCacheSQLEscape:
def test_contains(self):
c = Cache()
assert "foo'bar" not in c
def test_getitem(self):
c = Cache()
with raises(KeyError):
c["foo'bar"]
def test_setitem(self):
c = Cache()
c["foo'bar"] = []
def test_delitem(self):
c = Cache()
c["foo'bar"] = []
try:
del c["foo'bar"]
except KeyError:
assert False
| gpl-3.0 | -8,547,917,785,342,214,000 | 27.510345 | 99 | 0.52806 | false |
geekboxzone/mmallow_prebuilts_gcc_darwin-x86_x86_x86_64-linux-android-4.9 | share/gdb/python/gdb/command/type_printers.py | 126 | 4424 | # Type printer commands.
# Copyright (C) 2010-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import gdb
"""GDB commands for working with type-printers."""
class InfoTypePrinter(gdb.Command):
"""GDB command to list all registered type-printers.
Usage: info type-printers
"""
def __init__ (self):
super(InfoTypePrinter, self).__init__("info type-printers",
gdb.COMMAND_DATA)
def list_type_printers(self, type_printers):
"""Print a list of type printers."""
# A potential enhancement is to provide an option to list printers in
# "lookup order" (i.e. unsorted).
sorted_type_printers = sorted (copy.copy(type_printers),
key = lambda x: x.name)
for printer in sorted_type_printers:
if printer.enabled:
enabled = ''
else:
enabled = " [disabled]"
print (" %s%s" % (printer.name, enabled))
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
sep = ''
for objfile in gdb.objfiles():
if objfile.type_printers:
print ("%sType printers for %s:" % (sep, objfile.name))
self.list_type_printers(objfile.type_printers)
sep = '\n'
if gdb.current_progspace().type_printers:
print ("%sType printers for program space:" % sep)
self.list_type_printers(gdb.current_progspace().type_printers)
sep = '\n'
if gdb.type_printers:
print ("%sGlobal type printers:" % sep)
self.list_type_printers(gdb.type_printers)
class _EnableOrDisableCommand(gdb.Command):
def __init__(self, setting, name):
super(_EnableOrDisableCommand, self).__init__(name, gdb.COMMAND_DATA)
self.setting = setting
def set_some(self, name, printers):
result = False
for p in printers:
if name == p.name:
p.enabled = self.setting
result = True
return result
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
for name in arg.split():
ok = False
for objfile in gdb.objfiles():
if self.set_some(name, objfile.type_printers):
ok = True
if self.set_some(name, gdb.current_progspace().type_printers):
ok = True
if self.set_some(name, gdb.type_printers):
ok = True
if not ok:
print ("No type printer named '%s'" % name)
def add_some(self, result, word, printers):
for p in printers:
if p.name.startswith(word):
result.append(p.name)
def complete(self, text, word):
result = []
for objfile in gdb.objfiles():
self.add_some(result, word, objfile.type_printers)
self.add_some(result, word, gdb.current_progspace().type_printers)
self.add_some(result, word, gdb.type_printers)
return result
class EnableTypePrinter(_EnableOrDisableCommand):
"""GDB command to enable the specified type printer.
Usage: enable type-printer NAME
NAME is the name of the type-printer.
"""
def __init__(self):
super(EnableTypePrinter, self).__init__(True, "enable type-printer")
class DisableTypePrinter(_EnableOrDisableCommand):
"""GDB command to disable the specified type-printer.
Usage: disable type-printer NAME
NAME is the name of the type-printer.
"""
def __init__(self):
super(DisableTypePrinter, self).__init__(False, "disable type-printer")
InfoTypePrinter()
EnableTypePrinter()
DisableTypePrinter()
| gpl-2.0 | -8,285,277,077,054,738,000 | 34.392 | 79 | 0.605561 | false |
symmetricapi/django-symmetric | symmetric/management/generatemodels.py | 1 | 11905 | from importlib import import_module
from optparse import make_option
import os
from django.apps import apps
from django.conf import settings
from django.core.management.base import CommandError
from django.db.models.fields import NOT_PROVIDED, TimeField, DateField
from django.db.models.fields.related import ForeignKey
from django.template import Template, Context
import symmetric.management.overrides
from symmetric.functions import _ApiModel, underscore_to_camel_case
from symmetric.management.functions import get_base_classes, get_base_models, get_base_model, get_field, has_field
from symmetric.management.translate import translate_code
from symmetric.models import get_related_model
from symmetric.views import ApiAction, ApiRequirement, BasicApiView, api_view
get_model = apps.get_model
class GenerateModelsCommand(object):
option_list = (
make_option(
'--prefix',
type='string',
dest='prefix',
default='',
help='Prefix to add to each class name and file name.',
),
make_option(
'--dest',
type='string',
dest='dest',
help='Output the all detected models from api endpoints and render them into this destination directory.',
),
make_option(
'--exclude',
type='string',
dest='exclude',
action='append',
help='Do not output anything for the models specified.',
),
make_option(
'--indent',
dest='indent',
type='int',
default=2,
help='Each tab should instead indent with this number of spaces or 0 for hard tabs.',
),
)
def get_include_related_models(self, model):
related_models = set()
if hasattr(model, 'API') and hasattr(model.API, 'include_related'):
include_related = model.API.include_related
for field in model._meta.fields:
if field.name in include_related:
related_models.add(get_related_model(field))
related_models |= self.get_include_related_models(get_related_model(field))
return related_models
def post_render(self, output):
if self.indent:
return output.replace('\t', ' ' * self.indent)
return output
def base_extra_context(self, model, api_model):
has_date = False
has_bool = False
datetime_fields = []
primary_field = None
if api_model.id_field:
primary_field = api_model.id_field[1]
base = get_base_model(model)
base_name = None
if base:
base_name = base.__name__
for decoded_name, encoded_name, encode, decode in api_model.fields:
if has_field(base, decoded_name):
continue
field = get_field(model, decoded_name)
field_type = field.__class__.__name__
if field_type == 'DateTimeField' or field_type == 'DateField':
has_date = True
datetime_fields.append((encoded_name, encoded_name[0].upper() + encoded_name[1:]))
elif field_type == 'BooleanField':
has_bool = True
if not primary_field and field.primary_key:
primary_field = encoded_name
return {'prefix': self.prefix, 'base_name': base_name, 'name': model.__name__, 'name_lower': model.__name__[0].lower() + model.__name__[1:], 'has_date': has_date, 'has_bool': has_bool, 'primary_field': primary_field, 'datetime_fields': datetime_fields}
def perform_mapping(self, mapping, format_context):
if callable(mapping):
# callable method
return mapping(format_context)
elif isinstance(mapping, Template):
# django template
return mapping.render(Context(format_context, autoescape=False))
else:
# normal python string formatting
return mapping.format(**format_context)
def get_context(self, model):
api_model = _ApiModel(model)
context = self.base_extra_context(model, api_model)
if hasattr(self, 'extra_context'):
context.update(self.extra_context(model, api_model))
# Loop over the mappings
for mapping_name in self.mappings:
mapping = self.mappings[mapping_name]
write_only = False
if isinstance(mapping, dict):
write_only = mapping.get('WriteOnly', False)
lines = []
for decoded_name, encoded_name, encode, decode in api_model.fields:
field = get_field(model, decoded_name)
# Skip any field that is not directly on model and is not the primary id field (which could be on the base too)
if field.model is not model and encoded_name != context['primary_field']:
continue
# Skip any ptr field to base models
if decoded_name.endswith('_ptr_id'):
continue
include_related = hasattr(model, 'API') and hasattr(model.API, 'include_related') and field.name in model.API.include_related
included_readonly = False
included_obj_name = ''
if write_only and encoded_name not in api_model.encoded_fields:
# Skip readonly fields, but make an exception for included foreign keys, see Included Objects in the documentation
if isinstance(field, ForeignKey) and include_related:
included_obj_name = encoded_name
encoded_name += 'Id' if self.camelcase else '_id'
included_readonly = True
else:
continue
line = None
classes = [field.__class__] + get_base_classes(field.__class__)
for cls in classes:
field_type = cls.__name__
if callable(mapping):
line = mapping(model, encoded_name, field)
elif mapping.has_key(field_type):
format_context = {'name': encoded_name, 'null': field.null}
if field.default is not NOT_PROVIDED and not isinstance(field, (TimeField, DateField)):
# Only supply default values for non-date/time fields, it will be easier to just add these after manually
format_context['default'] = field.default
if include_related:
format_context['included'] = True
format_context['included_readonly'] = included_readonly
format_context['included_obj_name'] = included_obj_name
format_context['included_name'] = get_related_model(field).__name__
line = self.perform_mapping(mapping[field_type], format_context)
if line is not None:
break
if line is None:
raise CommandError("No such mapping for %s in %s." % (field_type, mapping_name))
elif line:
lines += line.split('\n')
context[mapping_name] = lines
# Translate the api properties
if hasattr(self, 'property_declarations') or hasattr(self, 'property_implementations'):
decl_lines = []
impl_lines = []
property_transformer = getattr(self, 'property_transformer', None)
for name in model.__dict__:
attr = model.__dict__[name]
if type(attr) is property and attr.fget and hasattr(attr.fget, 'api_code'):
if getattr(attr.fget, 'api_translations', None) and attr.fget.api_translations.has_key(self.lang):
code = attr.fget.api_translations[self.lang]
else:
code = translate_code(attr.fget.api_code, self.lang, (property_transformer(model) if property_transformer else None))
format_context = {'name': name if not self.camelcase else underscore_to_camel_case(name), 'type': self.property_types[attr.fget.api_type], 'code': code}
format_context['name_upper'] = format_context['name'][0].upper() + format_context['name'][1:]
if hasattr(self, 'property_declarations'):
line = self.perform_mapping(self.property_declarations, format_context)
decl_lines += line.split('\n')
if hasattr(self, 'property_implementations'):
line = self.perform_mapping(self.property_implementations, format_context)
impl_lines += line.split('\n')
if decl_lines:
context['property_declarations'] = decl_lines
if impl_lines:
context['property_implementations'] = impl_lines
return context
def enum_patterns(self, patterns):
for pattern in patterns:
if pattern.callback:
if isinstance(pattern.callback, (api_view, BasicApiView)) and pattern.callback.model:
self.models.add(pattern.callback.model)
self.models |= self.get_include_related_models(pattern.callback.model)
else:
self.enum_patterns(pattern.url_patterns)
def expand_mappings(self, field, *expanded_fields):
for mapping in self.mappings.values():
for key, value in mapping.items():
if key == field:
for expanded_field in expanded_fields:
if not mapping.has_key(expanded_field):
mapping[expanded_field] = mapping[field]
break
def render(self, *args, **options):
self.camelcase = getattr(settings, 'API_CAMELCASE', True)
self.prefix = options['prefix']
self.indent = options['indent']
if not hasattr(self, 'templates'):
raise CommandError('No templates set!')
if options and options['dest']:
try:
os.makedirs(options['dest'])
except:
print 'Warning: Overwriting any contents in %s' % options['dest']
self.models = set()
module = import_module(settings.ROOT_URLCONF)
self.enum_patterns(module.urlpatterns)
# Add any base models to the set
base_models = set()
for model in self.models:
base_models |= set(get_base_models(model))
self.models |= base_models
for model in self.models:
if options['exclude'] and model.__name__ in options['exclude']:
continue
context = self.get_context(model)
for i in range(len(self.templates)):
template = self.templates[i]
template_extension = self.template_extensions[i]
path = os.path.join(options['dest'], '%s%s.%s' % (self.prefix, model.__name__, template_extension))
print 'Rendering %s' % path
with open(path, 'w') as f:
f.write(self.post_render(template.render(Context(context, autoescape=False))))
elif args:
for model_name in args:
model = model_name.split('.')
model = get_model(model[0], model[1])
context = self.get_context(model)
for template in self.templates:
print self.post_render(template.render(Context(context, autoescape=False)))
else:
raise CommandError("No model or destination directory specified.")
| mit | 2,578,567,297,031,787,000 | 47.394309 | 260 | 0.563377 | false |
annarev/tensorflow | tensorflow/python/summary/writer/writer.py | 11 | 16949 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an API for generating Event protocol buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import warnings
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2
from tensorflow.python.util.tf_export import tf_export
_PLUGINS_DIR = "plugins"
class SummaryToEventTransformer(object):
"""Abstractly implements the SummaryWriter API.
This API basically implements a number of endpoints (add_summary,
add_session_log, etc). The endpoints all generate an event protobuf, which is
passed to the contained event_writer.
"""
def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event and get_logdir.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (graph.as_graph_def(add_shapes=True)
if isinstance(graph, ops.Graph) else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(graph_def=graph_def or
maybe_graph_as_def))
# This set contains tags of Summary Values that have been encountered
# already. The motivation here is that the SummaryWriter only keeps the
# metadata property (which is a SummaryMetadata proto) of the first Summary
# Value encountered for each tag. The SummaryWriter strips away the
# SummaryMetadata for all subsequent Summary Values with tags seen
# previously. This saves space.
self._seen_summary_tags = set()
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
`tf.Session.run` or
`tf.Tensor.eval`, to this
function. Alternatively, you can pass a `tf.compat.v1.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
# We strip metadata from values with tags that we have seen before in order
# to save space - we just store the metadata on the first value with a
# specific tag.
for value in summary.value:
if not value.metadata:
continue
if value.tag in self._seen_summary_tags:
# This tag has been encountered before. Strip the metadata.
value.ClearField("metadata")
continue
# We encounter a value with a tag we have not encountered previously. And
# it has metadata. Remember to strip metadata from future values with this
# tag string.
self._seen_summary_tags.add(value.tag)
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_session_log(self, session_log, global_step=None):
"""Adds a `SessionLog` protocol buffer to the event file.
This method wraps the provided session in an `Event` protocol buffer
and adds it to the event file.
Args:
session_log: A `SessionLog` protocol buffer.
global_step: Number. Optional global step value to record with the
summary.
"""
event = event_pb2.Event(session_log=session_log)
self._add_event(event, global_step)
def _add_graph_def(self, graph_def, global_step=None):
graph_bytes = graph_def.SerializeToString()
event = event_pb2.Event(graph_def=graph_bytes)
self._add_event(event, global_step)
def add_graph(self, graph, global_step=None, graph_def=None):
"""Adds a `Graph` to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph: A `Graph` object, such as `sess.graph`.
global_step: Number. Optional global step counter to record with the
graph.
graph_def: DEPRECATED. Use the `graph` parameter instead.
Raises:
ValueError: If both graph and graph_def are passed to the method.
"""
if graph is not None and graph_def is not None:
raise ValueError("Please pass only graph, or graph_def (deprecated), "
"but not both.")
if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):
# The user passed a `Graph`.
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if not isinstance(graph, ops.Graph):
logging.warning("When passing a `Graph` object, please use the `graph`"
" named argument instead of `graph_def`.")
graph = graph_def
# Serialize the graph with additional info.
true_graph_def = graph.as_graph_def(add_shapes=True)
self._write_plugin_assets(graph)
elif (isinstance(graph, graph_pb2.GraphDef) or
isinstance(graph_def, graph_pb2.GraphDef)):
# The user passed a `GraphDef`.
logging.warning("Passing a `GraphDef` to the SummaryWriter is deprecated."
" Pass a `Graph` object instead, such as `sess.graph`.")
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if isinstance(graph, graph_pb2.GraphDef):
true_graph_def = graph
else:
true_graph_def = graph_def
else:
# The user passed neither `Graph`, nor `GraphDef`.
raise TypeError("The passed graph must be an instance of `Graph` "
"or the deprecated `GraphDef`")
# Finally, add the graph_def to the summary writer.
self._add_graph_def(true_graph_def, global_step)
def _write_plugin_assets(self, graph):
plugin_assets = plugin_asset.get_all_plugin_assets(graph)
logdir = self.event_writer.get_logdir()
for asset_container in plugin_assets:
plugin_name = asset_container.plugin_name
plugin_dir = os.path.join(logdir, _PLUGINS_DIR, plugin_name)
gfile.MakeDirs(plugin_dir)
assets = asset_container.assets()
for (asset_name, content) in assets.items():
asset_path = os.path.join(plugin_dir, asset_name)
with gfile.Open(asset_path, "w") as f:
f.write(content)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Adds a `MetaGraphDef` to the event file.
The `MetaGraphDef` allows running the given graph via
`saver.import_meta_graph()`.
Args:
meta_graph_def: A `MetaGraphDef` object, often as returned by
`saver.export_meta_graph()`.
global_step: Number. Optional global step counter to record with the
graph.
Raises:
TypeError: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
"""
if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
raise TypeError("meta_graph_def must be type MetaGraphDef, saw type: %s" %
type(meta_graph_def))
meta_graph_bytes = meta_graph_def.SerializeToString()
event = event_pb2.Event(meta_graph_def=meta_graph_bytes)
self._add_event(event, global_step)
def add_run_metadata(self, run_metadata, tag, global_step=None):
"""Adds a metadata information for a single session.run() call.
Args:
run_metadata: A `RunMetadata` protobuf object.
tag: The tag name for this metadata.
global_step: Number. Optional global step counter to record with the
StepStats.
Raises:
ValueError: If the provided tag was already used for this type of event.
"""
if tag in self._session_run_tags:
raise ValueError("The provided tag was already used for this event type")
self._session_run_tags[tag] = True
tagged_metadata = event_pb2.TaggedRunMetadata()
tagged_metadata.tag = tag
# Store the `RunMetadata` object as bytes in order to have postponed
# (lazy) deserialization when used later.
tagged_metadata.run_metadata = run_metadata.SerializeToString()
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self._add_event(event, global_step)
def _add_event(self, event, step):
event.wall_time = time.time()
if step is not None:
event.step = int(step)
self.event_writer.add_event(event)
@tf_export(v1=["summary.FileWriter"])
class FileWriter(SummaryToEventTransformer):
"""Writes `Summary` protocol buffers to event files.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
When constructed with a `tf.compat.v1.Session` parameter, a `FileWriter`
instead forms a compatibility layer over new graph-based summaries
to facilitate the use of new summary writing with
pre-existing code that expects a `FileWriter` instance.
This class is not thread-safe.
"""
def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None,
filename_suffix=None,
session=None):
"""Creates a `FileWriter`, optionally shared within the given session.
Typically, constructing a file writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
The `session` argument to the constructor makes the returned `FileWriter` a
compatibility layer over new graph-based summaries (`tf.summary`).
Crucially, this means the underlying writer resource and events file will
be shared with any other `FileWriter` using the same `session` and `logdir`.
In either case, ops will be added to `session.graph` to control the
underlying file writer resource.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead.
filename_suffix: A string. Every event file's name is suffixed with
`suffix`.
session: A `tf.compat.v1.Session` object. See details above.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
`v1.summary.FileWriter` is not compatible with eager execution.
To write TensorBoard summaries under eager execution,
use `tf.summary.create_file_writer` or
a `with v1.Graph().as_default():` context.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError(
"v1.summary.FileWriter is not compatible with eager execution. "
"Use `tf.summary.create_file_writer`,"
"or a `with v1.Graph().as_default():` context")
if session is not None:
event_writer = EventFileWriterV2(
session, logdir, max_queue, flush_secs, filename_suffix)
else:
event_writer = EventFileWriter(logdir, max_queue, flush_secs,
filename_suffix)
self._closed = False
super(FileWriter, self).__init__(event_writer, graph, graph_def)
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.event_writer.get_logdir()
def _warn_if_event_writer_is_closed(self):
if self._closed:
warnings.warn("Attempting to use a closed FileWriter. "
"The operation will be a noop unless the FileWriter "
"is explicitly reopened.")
def _add_event(self, event, step):
self._warn_if_event_writer_is_closed()
super(FileWriter, self)._add_event(event, step)
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
self._warn_if_event_writer_is_closed()
self.event_writer.add_event(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
# Flushing a closed EventFileWriterV2 raises an exception. It is,
# however, a noop for EventFileWriter.
self._warn_if_event_writer_is_closed()
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
self._closed = True
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
self._closed = False
| apache-2.0 | -2,873,367,120,335,768,600 | 38.233796 | 83 | 0.681043 | false |
mxOBS/deb-pkg_trusty_chromium-browser | tools/memory_inspector/memory_inspector/classification/rules.py | 109 | 5039 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module defines the core structure of the classification rules.
This module does NOT specify how the rules filter the data: this responsibility
is of to the concrete classifiers, which have to override the Rule class herein
defined and know how to do the math.
This module, instead, defines the format of the rules and the way they are
encoded and loaded (in a python-style dictionary file).
Rules are organized in a tree, where the root is always represented by a 'Total'
node, and the leaves are arbitrarily defined by the user, according to the
following principles:
- Order of siblings rules matter: what is caught by a rule will not be caught
by the next ones, but it is propagated to its children rules if any.
- Every non-leaf node X gets an implicit extra-children named X-other. This
catch-all child catches everything (within the parent rule scope) that is
not caught by the other siblings. This is to guarantee that, when doing the
math (the aggregation), at any level, the sum of the values in the leaves
match the value of their parent.
The format of a rule dictionary is the following:
[
{
'name': 'Name of the rule',
'filter-X': 'The embedder will know how to interpret this value and will use
it to filter the data'
'filter-Y': 'Idem'
children: [
{
'name': 'Name of the sub-rule 1'
... and so on recursively ,
},
]
},
]
And a typical resulting rule tree looks like this:
+----------------------+
| Total |
|----------------------|
+------------------+ Match all. +--------------------+
| +----------+-----------+ |
| | |
+-----v-----+ +-----v-----+ +------v----+
| Foo | | Bar | |Total-other|
|-----------| |-----------| |-----------|
|File: foo* | +---+File: bar* +-----+ | Match all |
+-----------+ | +-----------+ | +-----------+
| |
+------v------+ +------v----+
| Bar::Coffee | | Bar-other |
|-------------| |-----------|
|File: bar*cof| | Match all |
+-------------+ +-----------+
"""
import ast
def Load(content, rule_builder):
"""Construct a rule tree from a python-style dict representation.
Args:
content: a string containing the dict (i.e. content of the rule file).
rule_builder: a method which takes two arguments (rule_name, filters_dict)
and returns a subclass of |Rule|. |filters_dict| is a dict of the keys
(filter-foo, filter-bar in the example above) for the rule node.
"""
rules_dict = ast.literal_eval(content)
root = Rule('Total')
_MakeRuleNodeFromDictNode(root, rules_dict, rule_builder)
return root
class Rule(object):
""" An abstract class representing a rule node in the rules tree.
Embedders must override the Match method when deriving this class.
"""
def __init__(self, name):
self.name = name
self.children = []
def Match(self, _): # pylint: disable=R0201
""" The rationale of this default implementation is modeling the root
('Total') and the catch-all (*-other) rules that every |RuleTree| must have,
regardless of the embedder-specific children rules. This is to guarantee
that the totals match at any level of the tree.
"""
return True
def AppendChild(self, child_rule):
assert(isinstance(child_rule, Rule))
duplicates = filter(lambda x: x.name == child_rule.name, self.children)
assert(not duplicates), 'Duplicate rule ' + child_rule.name
self.children.append(child_rule)
def _MakeRuleNodeFromDictNode(rule_node, dict_nodes, rule_builder):
"""Recursive rule tree builder for traversing the rule dict."""
for dict_node in dict_nodes:
assert('name' in dict_node)
# Extract the filter keys (e.g., mmap-file, mmap-prot) that will be passed
# to the |rule_builder|
filter_keys = set(dict_node.keys()) - set(('name', 'children'))
filters = dict((k, dict_node[k]) for k in filter_keys)
child_rule = rule_builder(dict_node['name'], filters)
rule_node.AppendChild(child_rule)
dict_children = dict_node.get('children', {})
_MakeRuleNodeFromDictNode(child_rule, dict_children, rule_builder)
# If the rule_node isn't a leaf, add the 'name-other' catch-all sibling to
# catch all the entries that matched this node but none of its children.
if len(rule_node.children):
rule_node.AppendChild(Rule(rule_node.name + '-other')) | bsd-3-clause | -4,886,667,247,460,531,000 | 41.352941 | 80 | 0.57432 | false |
hogarthj/ansible | hacking/fix_test_syntax.py | 135 | 3563 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2017, Matt Martz <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Purpose:
# The purpose of this script is to convert uses of tests as filters to proper jinja test syntax
# as part of https://github.com/ansible/proposals/issues/83
# Notes:
# This script is imperfect, but was close enough to "fix" all integration tests
# with the exception of:
#
# 1. One file needed manual remediation, where \\\\ was ultimately replace with \\ in 8 locations.
# 2. Multiple filter pipeline is unsupported. Example:
# var|string|search('foo')
# Which should be converted to:
# var|string is search('foo')
import argparse
import os
import re
from ansible.plugins.test import core, files, mathstuff
TESTS = list(core.TestModule().tests().keys()) + list(files.TestModule().tests().keys()) + list(mathstuff.TestModule().tests().keys())
TEST_MAP = {
'version_compare': 'version',
'is_dir': 'directory',
'is_file': 'file',
'is_link': 'link',
'is_abs': 'abs',
'is_same_file': 'same_file',
'is_mount': 'mount',
'issubset': 'subset',
'issuperset': 'superset',
'isnan': 'nan',
'succeeded': 'successful',
'success': 'successful',
'change': 'changed',
'skip': 'skipped',
}
FILTER_RE = re.compile(r'((.+?)\s*([\w \.\'"]+)(\s*)\|(\s*)(\w+))')
NOT_RE = re.compile(r'( ?)not ')
ASSERT_SPACE_RE = re.compile(r'- ([\'"])\s+')
parser = argparse.ArgumentParser()
parser.add_argument(
'path',
help='Path to a directory that will be recursively walked. All .yml and .yaml files will be evaluated '
'and uses of tests as filters will be conveted to proper jinja test syntax files to have test syntax '
'fixed'
)
args = parser.parse_args()
for root, dirs, filenames in os.walk(args.path):
for name in filenames:
if os.path.splitext(name)[1] not in ('.yml', '.yaml'):
continue
path = os.path.join(root, name)
print(path)
with open(path) as f:
text = f.read()
for match in FILTER_RE.findall(text):
filter_name = match[5]
is_not = match[2].strip(' "\'').startswith('not ')
try:
test_name = TEST_MAP[filter_name]
except KeyError:
test_name = filter_name
if test_name not in TESTS:
continue
if is_not:
before = NOT_RE.sub(r'\1', match[2]).rstrip()
text = re.sub(
re.escape(match[0]),
'%s %s is not %s' % (match[1], before, test_name,),
text
)
else:
text = re.sub(
re.escape(match[0]),
'%s %s is %s' % (match[1], match[2].rstrip(), test_name,),
text
)
with open(path, 'w+') as f:
f.write(text)
| gpl-3.0 | 2,247,590,909,063,366,100 | 30.530973 | 134 | 0.588268 | false |
liberorbis/libernext | env/lib/python2.7/site-packages/pip/exceptions.py | 123 | 1125 | """Exceptions used throughout package"""
from __future__ import absolute_import
class PipError(Exception):
"""Base pip exception"""
class InstallationError(PipError):
"""General exception during installation"""
class UninstallationError(PipError):
"""General exception during uninstallation"""
class DistributionNotFound(InstallationError):
"""Raised when a distribution cannot be found to satisfy a requirement"""
class BestVersionAlreadyInstalled(PipError):
"""Raised when the most up-to-date version of a package is already
installed. """
class BadCommand(PipError):
"""Raised when virtualenv or a command is not found"""
class CommandError(PipError):
"""Raised when there is an error in command-line arguments"""
class PreviousBuildDirError(PipError):
"""Raised when there's a previous conflicting build directory"""
class HashMismatch(InstallationError):
"""Distribution file hash values don't match."""
class InvalidWheelFilename(InstallationError):
"""Invalid wheel filename."""
class UnsupportedWheel(InstallationError):
"""Unsupported wheel."""
| gpl-2.0 | -457,518,654,800,270,100 | 22.93617 | 77 | 0.740444 | false |
ketjow4/NOV | Lib/site-packages/scipy/stats/info.py | 55 | 8282 | """
Statistical Functions
=====================
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each included distribution is an instance of the class rv_continous.
For each given name the following methods are available. See docstring
for rv_continuous for more information
:rvs:
random variates with the distribution
:pdf:
probability density function
:cdf:
cumulative distribution function
:sf:
survival function (1.0 - cdf)
:ppf:
percent-point function (inverse of cdf)
:isf:
inverse survival function
:stats:
mean, variance, and optionally skew and kurtosis
Calling the instance as a function returns a frozen pdf whose shape,
location, and scale parameters are fixed.
Distributions
---------------
The distributions available with the above methods are:
Continuous (Total == 81 distributions)
---------------------------------------
.. autosummary::
:toctree: generated/
norm Normal (Gaussian)
alpha Alpha
anglit Anglit
arcsine Arcsine
beta Beta
betaprime Beta Prime
bradford Bradford
burr Burr
cauchy Cauchy
chi Chi
chi2 Chi-squared
cosine Cosine
dgamma Double Gamma
dweibull Double Weibull
erlang Erlang
expon Exponential
exponweib Exponentiated Weibull
exponpow Exponential Power
f F (Snecdor F)
fatiguelife Fatigue Life (Birnbaum-Sanders)
fisk Fisk
foldcauchy Folded Cauchy
foldnorm Folded Normal
frechet_r Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min
frechet_l Frechet Left Sided, Weibull_max
genlogistic Generalized Logistic
genpareto Generalized Pareto
genexpon Generalized Exponential
genextreme Generalized Extreme Value
gausshyper Gauss Hypergeometric
gamma Gamma
gengamma Generalized gamma
genhalflogistic Generalized Half Logistic
gompertz Gompertz (Truncated Gumbel)
gumbel_r Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l Left Sided Gumbel, etc.
halfcauchy Half Cauchy
halflogistic Half Logistic
halfnorm Half Normal
hypsecant Hyperbolic Secant
invgamma Inverse Gamma
invnorm Inverse Normal
invgauss Inverse Gaussian
invweibull Inverse Weibull
johnsonsb Johnson SB
johnsonsu Johnson SU
ksone Kolmogorov-Smirnov one-sided (no stats)
kstwobign Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace Laplace
logistic Logistic
loggamma Log-Gamma
loglaplace Log-Laplace (Log Double Exponential)
lognorm Log-Normal
gilbrat Gilbrat
lomax Lomax (Pareto of the second kind)
maxwell Maxwell
mielke Mielke's Beta-Kappa
nakagami Nakagami
ncx2 Non-central chi-squared
ncf Non-central F
nct Non-central Student's T
pareto Pareto
powerlaw Power-function
powerlognorm Power log normal
powernorm Power normal
rdist R distribution
reciprocal Reciprocal
rayleigh Rayleigh
rice Rice
recipinvgauss Reciprocal Inverse Gaussian
semicircular Semicircular
t Student's T
triang Triangular
truncexpon Truncated Exponential
truncnorm Truncated Normal
tukeylambda Tukey-Lambda
uniform Uniform
vonmises Von-Mises (Circular)
wald Wald
weibull_min Minimum Weibull (see Frechet)
weibull_max Maximum Weibull (see Frechet)
wrapcauchy Wrapped Cauchy
=============== ==============================================================
Discrete (Total == 10 distributions)
==============================================================================
binom Binomial
bernoulli Bernoulli
nbinom Negative Binomial
geom Geometric
hypergeom Hypergeometric
logser Logarithmic (Log-Series, Series)
poisson Poisson
planck Planck (Discrete Exponential)
boltzmann Boltzmann (Truncated Discrete Exponential)
randint Discrete Uniform
zipf Zipf
dlaplace Discrete Laplacian
=============== ==============================================================
Statistical Functions (adapted from Gary Strangman)
-----------------------------------------------------
================= ==============================================================
gmean Geometric mean
hmean Harmonic mean
mean Arithmetic mean
cmedian Computed median
median Median
mode Modal value
tmean Truncated arithmetic mean
tvar Truncated variance
tmin _
tmax _
tstd _
tsem _
moment Central moment
variation Coefficient of variation
skew Skewness
kurtosis Fisher or Pearson kurtosis
describe Descriptive statistics
skewtest _
kurtosistest _
normaltest _
================= ==============================================================
================= ==============================================================
itemfreq _
scoreatpercentile _
percentileofscore _
histogram2 _
histogram _
cumfreq _
relfreq _
================= ==============================================================
================= ==============================================================
obrientransform _
signaltonoise _
bayes_mvs _
sem _
zmap _
================= ==============================================================
================= ==============================================================
threshold _
trimboth _
trim1 _
================= ==============================================================
================= ==============================================================
f_oneway _
paired _
pearsonr _
spearmanr _
pointbiserialr _
kendalltau _
linregress _
================= ==============================================================
================= ==============================================================
ttest_1samp _
ttest_ind _
ttest_rel _
kstest _
chisquare _
ks_2samp _
meanwhitneyu _
tiecorrect _
ranksums _
wilcoxon _
kruskal _
friedmanchisquare _
================= ==============================================================
================= ==============================================================
ansari _
bartlett _
levene _
shapiro _
anderson _
binom_test _
fligner _
mood _
oneway _
================= ==============================================================
================= ==============================================================
glm _
================= ==============================================================
================= ==============================================================
Plot-tests
================================================================================
probplot _
ppcc_max _
ppcc_plot _
================= ==============================================================
For many more stat related functions install the software R and the
interface package rpy.
"""
postpone_import = 1
global_symbols = ['find_repeats']
depends = ['linalg','special']
ignore = False # importing stats causes a segfault
| gpl-3.0 | -6,142,414,574,027,203,000 | 31.735178 | 91 | 0.452427 | false |
Vutshi/qutip | qutip/tests/test_subsystem_apply.py | 1 | 5219 | # This file is part of QuTiP.
#
# QuTiP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QuTiP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QuTiP. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011 and later, Paul D. Nation & Robert J. Johansson
#
###########################################################################
from numpy.linalg import norm
from numpy.testing import assert_, run_module_suite
from qutip.random_objects import rand_dm, rand_unitary, rand_kraus_map
from qutip.subsystem_apply import subsystem_apply
from qutip.superop_reps import kraus_to_super
from qutip.superoperator import mat2vec, vec2mat
from qutip.tensor import tensor
from qutip.qobj import Qobj
class TestSubsystemApply(object):
"""
A test class for the QuTiP function for applying superoperators to
subsystems.
The four tests below determine whether efficient numerics, naive numerics
and semi-analytic results are identical.
"""
def test_SimpleSingleApply(self):
"""
Non-composite system, operator on Hilbert space.
"""
rho_3 = rand_dm(3)
single_op = rand_unitary(3)
analytic_result = single_op * rho_3 * single_op.dag()
naive_result = subsystem_apply(rho_3, single_op, [True],
reference=True)
efficient_result = subsystem_apply(rho_3, single_op, [True])
naive_diff = (analytic_result - naive_result).data.todense()
efficient_diff = (efficient_result - analytic_result).data.todense()
assert_(norm(naive_diff) < 1e-12 and norm(efficient_diff) < 1e-12)
def test_SimpleSuperApply(self):
"""
Non-composite system, operator on Liouville space.
"""
rho_3 = rand_dm(3)
superop = kraus_to_super(rand_kraus_map(3))
analytic_result = vec2mat(superop.data.todense() *
mat2vec(rho_3.data.todense()))
naive_result = subsystem_apply(rho_3, superop, [True],
reference=True)
naive_diff = (analytic_result - naive_result).data.todense()
assert_(norm(naive_diff) < 1e-12)
efficient_result = subsystem_apply(rho_3, superop, [True])
efficient_diff = (efficient_result - analytic_result).data.todense()
assert_(norm(efficient_diff) < 1e-12)
def test_ComplexSingleApply(self):
"""
Composite system, operator on Hilbert space.
"""
rho_list = list(map(rand_dm, [2, 3, 2, 3, 2]))
rho_input = tensor(rho_list)
single_op = rand_unitary(3)
analytic_result = rho_list
analytic_result[1] = single_op * analytic_result[1] * single_op.dag()
analytic_result[3] = single_op * analytic_result[3] * single_op.dag()
analytic_result = tensor(analytic_result)
naive_result = subsystem_apply(rho_input, single_op,
[False, True, False, True, False],
reference=True)
naive_diff = (analytic_result - naive_result).data.todense()
assert_(norm(naive_diff) < 1e-12)
efficient_result = subsystem_apply(rho_input, single_op,
[False, True, False, True, False])
efficient_diff = (efficient_result - analytic_result).data.todense()
assert_(norm(efficient_diff) < 1e-12)
def test_ComplexSuperApply(self):
"""
Superoperator: Efficient numerics and reference return same result,
acting on non-composite system
"""
rho_list = list(map(rand_dm, [2, 3, 2, 3, 2]))
rho_input = tensor(rho_list)
superop = kraus_to_super(rand_kraus_map(3))
analytic_result = rho_list
analytic_result[1] = Qobj(vec2mat(superop.data.todense() *
mat2vec(analytic_result[1].data.todense())))
analytic_result[3] = Qobj(vec2mat(superop.data.todense() *
mat2vec(analytic_result[3].data.todense())))
analytic_result = tensor(analytic_result)
naive_result = subsystem_apply(rho_input, superop,
[False, True, False, True, False],
reference=True)
naive_diff = (analytic_result - naive_result).data.todense()
assert_(norm(naive_diff) < 1e-12)
efficient_result = subsystem_apply(rho_input, superop,
[False, True, False, True, False])
efficient_diff = (efficient_result - analytic_result).data.todense()
assert_(norm(efficient_diff) < 1e-12)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | 3,885,156,561,271,169,000 | 41.08871 | 78 | 0.597241 | false |
jgcaaprom/android_external_chromium_org | components/test/data/password_manager/run_tests.py | 43 | 4038 | # -*- coding: utf-8 -*-
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This file allows the bots to be easily configure and run the tests."""
import argparse
import os
import tempfile
from environment import Environment
import tests
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Password Manager automated tests runner help.")
parser.add_argument(
"--chrome-path", action="store", dest="chrome_path",
help="Set the chrome path (required).", nargs=1, required=True)
parser.add_argument(
"--chromedriver-path", action="store", dest="chromedriver_path",
help="Set the chromedriver path (required).", nargs=1, required=True)
parser.add_argument(
"--profile-path", action="store", dest="profile_path",
help="Set the profile path (required). You just need to choose a "
"temporary empty folder. If the folder is not empty all its content "
"is going to be removed.",
nargs=1, required=True)
parser.add_argument(
"--passwords-path", action="store", dest="passwords_path",
help="Set the usernames/passwords path (required).", nargs=1,
required=True)
parser.add_argument("--save-path", action="store", nargs=1, dest="save_path",
help="Write the results in a file.", required=True)
args = parser.parse_args()
environment = Environment('', '', '', None, False)
tests.Tests(environment)
xml = open(args.save_path[0],"w")
xml.write("<xml>")
try:
results = tempfile.NamedTemporaryFile(
dir=os.path.join(tempfile.gettempdir()), delete=False)
results_path = results.name
results.close()
full_path = os.path.realpath(__file__)
tests_dir = os.path.dirname(full_path)
tests_path = os.path.join(tests_dir, "tests.py")
for websitetest in environment.websitetests:
# The tests can be flaky. This is why we try to rerun up to 3 times.
for x in range(0, 3):
# TODO(rchtara): Using "pkill" is just temporary until a better,
# platform-independent solution is found.
os.system("pkill chrome")
try:
os.remove(results_path)
except Exception:
pass
# TODO(rchtara): Using "timeout is just temporary until a better,
# platform-independent solution is found.
# The website test runs in two passes, each pass has an internal
# timeout of 200s for waiting (see |remaining_time_to_wait| and
# Wait() in websitetest.py). Accounting for some more time spent on
# the non-waiting execution, 300 seconds should be the upper bound on
# the runtime of one pass, thus 600 seconds for the whole test.
os.system("timeout 600 python %s %s --chrome-path %s "
"--chromedriver-path %s --passwords-path %s --profile-path %s "
"--save-path %s" %
(tests_path, websitetest.name, args.chrome_path[0],
args.chromedriver_path[0], args.passwords_path[0],
args.profile_path[0], results_path))
if os.path.isfile(results_path):
results = open(results_path, "r")
count = 0 # Count the number of successful tests.
for line in results:
xml.write(line)
count += line.count("successful='True'")
results.close()
# There is only two tests running for every website: the prompt and
# the normal test. If both of the tests were successful, the tests
# would be stopped for the current website.
if count == 2:
break
else:
xml.write("<result><test name='%s' type='prompt' successful='false'>"
"</test><test name='%s' type='normal' successful='false'></test>"
"</result>" % (websitetest.name, websitetest.name))
finally:
try:
os.remove(results_path)
except Exception:
pass
xml.write("</xml>")
xml.close()
| bsd-3-clause | -3,363,233,355,291,991,000 | 39.38 | 80 | 0.631253 | false |
manazhao/tf_recsys | tensorflow/contrib/predictor/saved_model_predictor_test.py | 93 | 6114 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for predictor.saved_model_predictor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.predictor import saved_model_predictor
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_def_utils
KEYS_AND_OPS = (('sum', lambda x, y: x + y),
('product', lambda x, y: x * y,),
('difference', lambda x, y: x - y))
MODEL_DIR_NAME = 'contrib/predictor/test_export_dir'
class SavedModelPredictorTest(test.TestCase):
@classmethod
def setUpClass(cls):
# Load a saved model exported from the arithmetic `Estimator`.
# See `testing_common.py`.
cls._export_dir = test.test_src_dir_path(MODEL_DIR_NAME)
def testDefault(self):
"""Test prediction with default signature."""
np.random.seed(1111)
x = np.random.rand()
y = np.random.rand()
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir)
output = predictor({'x': x, 'y': y})['outputs']
self.assertAlmostEqual(output, x + y, places=3)
def testSpecifiedSignatureKey(self):
"""Test prediction with spedicified signature key."""
np.random.seed(1234)
for signature_def_key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
signature_def_key=signature_def_key)
output_tensor_name = predictor.fetch_tensors['outputs'].name
self.assertRegexpMatches(
output_tensor_name,
signature_def_key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})['outputs']
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}." '
'Got output {} for x = {} and y = {}'.format(
signature_def_key, output, x, y))
def testSpecifiedSignature(self):
"""Test prediction with spedicified signature definition."""
np.random.seed(4444)
for key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
inputs = {
'x': meta_graph_pb2.TensorInfo(
name='inputs/x:0',
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto()),
'y': meta_graph_pb2.TensorInfo(
name='inputs/y:0',
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto())}
outputs = {
key: meta_graph_pb2.TensorInfo(
name='outputs/{}:0'.format(key),
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto())}
signature_def = signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/regress')
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
signature_def=signature_def)
output_tensor_name = predictor.fetch_tensors[key].name
self.assertRegexpMatches(
output_tensor_name,
key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}". '
'Got output {} for x = {} and y = {}'.format(key, output, x, y))
def testSpecifiedTensors(self):
"""Test prediction with spedicified `Tensor`s."""
np.random.seed(987)
for key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
input_names = {'x': 'inputs/x:0',
'y': 'inputs/y:0'}
output_names = {key: 'outputs/{}:0'.format(key)}
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
input_names=input_names,
output_names=output_names)
output_tensor_name = predictor.fetch_tensors[key].name
self.assertRegexpMatches(
output_tensor_name,
key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}". '
'Got output {} for x = {} and y = {}'.format(key, output, x, y))
def testBadTagsFail(self):
"""Test that predictor construction fails for bad tags."""
bad_tags_regex = ('.* could not be found in SavedModel')
with self.assertRaisesRegexp(RuntimeError, bad_tags_regex):
_ = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
tags=('zomg, bad, tags'))
def testSpecifiedGraph(self):
"""Test that the predictor remembers a specified `Graph`."""
g = ops.Graph()
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
graph=g)
self.assertEqual(predictor.graph, g)
if __name__ == '__main__':
test.main()
| apache-2.0 | -7,811,639,115,508,005,000 | 34.964706 | 80 | 0.626431 | false |
zahodi/ansible | test/units/vars/test_variable_manager.py | 32 | 14137 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import defaultdict
from ansible.compat.six import iteritems
from ansible.compat.six.moves import builtins
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock, mock_open, patch
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
from ansible.vars import VariableManager
class TestVariableManager(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_manager(self):
fake_loader = DictDataLoader({})
v = VariableManager()
vars = v.get_vars(loader=fake_loader, use_cache=False)
#FIXME: not sure why we remove all and only test playbook_dir
for remove in ['omit', 'vars', 'ansible_version', 'ansible_check_mode', 'ansible_playbook_python']:
if remove in vars:
del vars[remove]
self.assertEqual(vars, dict(playbook_dir='.'))
def test_variable_manager_extra_vars(self):
fake_loader = DictDataLoader({})
extra_vars = dict(a=1, b=2, c=3)
v = VariableManager()
v.extra_vars = extra_vars
vars = v.get_vars(loader=fake_loader, use_cache=False)
for (key, val) in iteritems(extra_vars):
self.assertEqual(vars.get(key), val)
self.assertIsNot(v.extra_vars, extra_vars)
def test_variable_manager_host_vars_file(self):
fake_loader = DictDataLoader({
"host_vars/hostname1.yml": """
foo: bar
""",
"other_path/host_vars/hostname1.yml": """
foo: bam
baa: bat
""",
"host_vars/host.name.yml": """
host_with_dots: true
""",
})
v = VariableManager()
v.add_host_vars_file("host_vars/hostname1.yml", loader=fake_loader)
v.add_host_vars_file("other_path/host_vars/hostname1.yml", loader=fake_loader)
self.assertIn("hostname1", v._host_vars_files)
self.assertEqual(v._host_vars_files["hostname1"], [dict(foo="bar"), dict(foo="bam", baa="bat")])
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = ()
mock_host.get_group_vars.return_value = dict()
self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("foo"), "bam")
self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("baa"), "bat")
v.add_host_vars_file("host_vars/host.name", loader=fake_loader)
self.assertEqual(v._host_vars_files["host.name"], [dict(host_with_dots=True)])
def test_variable_manager_group_vars_file(self):
fake_loader = DictDataLoader({
"group_vars/all.yml": """
foo: bar
""",
"group_vars/somegroup.yml": """
bam: baz
""",
"other_path/group_vars/somegroup.yml": """
baa: bat
""",
"group_vars/some.group.yml": """
group_with_dots: true
""",
})
v = VariableManager()
v.add_group_vars_file("group_vars/all.yml", loader=fake_loader)
v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader)
v.add_group_vars_file("other_path/group_vars/somegroup.yml", loader=fake_loader)
self.assertIn("somegroup", v._group_vars_files)
self.assertEqual(v._group_vars_files["all"], [dict(foo="bar")])
self.assertEqual(v._group_vars_files["somegroup"], [dict(bam="baz"), dict(baa="bat")])
mock_group = MagicMock()
mock_group.name = "somegroup"
mock_group.get_ancestors.return_value = ()
mock_group.get_vars.return_value = dict()
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = (mock_group,)
mock_host.get_group_vars.return_value = dict()
vars = v.get_vars(loader=fake_loader, host=mock_host, use_cache=False)
self.assertEqual(vars.get("foo"), "bar")
self.assertEqual(vars.get("baa"), "bat")
v.add_group_vars_file("group_vars/some.group", loader=fake_loader)
self.assertEqual(v._group_vars_files["some.group"], [dict(group_with_dots=True)])
def test_variable_manager_play_vars(self):
fake_loader = DictDataLoader({})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict(foo="bar")
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = []
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_play_vars_files(self):
fake_loader = DictDataLoader({
"/path/to/somefile.yml": """
foo: bar
"""
})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict()
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = ['/path/to/somefile.yml']
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_task_vars(self):
fake_loader = DictDataLoader({})
mock_task = MagicMock()
mock_task._role = None
mock_task.loop = None
mock_task.get_vars.return_value = dict(foo="bar")
mock_task.get_include_params.return_value = dict()
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task, use_cache=False).get("foo"), "bar")
@patch.object(Inventory, 'basedir')
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_variable_manager_precedence(self, mock_basedir):
'''
Tests complex variations and combinations of get_vars() with different
objects to modify the context under which variables are merged.
'''
v = VariableManager()
v._fact_cache = defaultdict(dict)
inventory1_filedata = """
[group2:children]
group1
[group1]
host1 host_var=host_var_from_inventory_host1
[group1:vars]
group_var = group_var_from_inventory_group1
[group2:vars]
group_var = group_var_from_inventory_group2
"""
fake_loader = DictDataLoader({
# inventory1
'/etc/ansible/inventory1': inventory1_filedata,
# role defaults_only1
'/etc/ansible/roles/defaults_only1/defaults/main.yml': """
default_var: "default_var_from_defaults_only1"
host_var: "host_var_from_defaults_only1"
group_var: "group_var_from_defaults_only1"
group_var_all: "group_var_all_from_defaults_only1"
extra_var: "extra_var_from_defaults_only1"
""",
'/etc/ansible/roles/defaults_only1/tasks/main.yml': """
- debug: msg="here i am"
""",
# role defaults_only2
'/etc/ansible/roles/defaults_only2/defaults/main.yml': """
default_var: "default_var_from_defaults_only2"
host_var: "host_var_from_defaults_only2"
group_var: "group_var_from_defaults_only2"
group_var_all: "group_var_all_from_defaults_only2"
extra_var: "extra_var_from_defaults_only2"
""",
})
mock_basedir.return_value = './'
with patch.object(builtins, 'open', mock_open(read_data=inventory1_filedata)):
inv1 = Inventory(loader=fake_loader, variable_manager=v, host_list='/etc/ansible/inventory1')
inv1.set_playbook_basedir('./')
play1 = Play.load(dict(
hosts=['all'],
roles=['defaults_only1', 'defaults_only2'],
), loader=fake_loader, variable_manager=v)
# first we assert that the defaults as viewed as a whole are the merged results
# of the defaults from each role, with the last role defined "winning" when
# there is a variable naming conflict
res = v.get_vars(loader=fake_loader, play=play1)
self.assertEqual(res['default_var'], 'default_var_from_defaults_only2')
# next, we assert that when vars are viewed from the context of a task within a
# role, that task will see its own role defaults before any other role's
blocks = play1.compile()
task = blocks[1].block[0]
res = v.get_vars(loader=fake_loader, play=play1, task=task)
self.assertEqual(res['default_var'], 'default_var_from_defaults_only1')
# next we assert the precendence of inventory variables
v.set_inventory(inv1)
h1 = inv1.get_host('host1')
res = v.get_vars(loader=fake_loader, play=play1, host=h1)
self.assertEqual(res['group_var'], 'group_var_from_inventory_group1')
self.assertEqual(res['host_var'], 'host_var_from_inventory_host1')
# next we test with group_vars/ files loaded
fake_loader.push("/etc/ansible/group_vars/all", """
group_var_all: group_var_all_from_group_vars_all
""")
fake_loader.push("/etc/ansible/group_vars/group1", """
group_var: group_var_from_group_vars_group1
""")
fake_loader.push("/etc/ansible/group_vars/group3", """
# this is a dummy, which should not be used anywhere
group_var: group_var_from_group_vars_group3
""")
fake_loader.push("/etc/ansible/host_vars/host1", """
host_var: host_var_from_host_vars_host1
""")
fake_loader.push("group_vars/group1", """
playbook_group_var: playbook_group_var
""")
fake_loader.push("host_vars/host1", """
playbook_host_var: playbook_host_var
""")
v.add_group_vars_file("/etc/ansible/group_vars/all", loader=fake_loader)
v.add_group_vars_file("/etc/ansible/group_vars/group1", loader=fake_loader)
v.add_group_vars_file("/etc/ansible/group_vars/group2", loader=fake_loader)
v.add_group_vars_file("group_vars/group1", loader=fake_loader)
v.add_host_vars_file("/etc/ansible/host_vars/host1", loader=fake_loader)
v.add_host_vars_file("host_vars/host1", loader=fake_loader)
res = v.get_vars(loader=fake_loader, play=play1, host=h1)
self.assertEqual(res['group_var'], 'group_var_from_group_vars_group1')
self.assertEqual(res['group_var_all'], 'group_var_all_from_group_vars_all')
self.assertEqual(res['playbook_group_var'], 'playbook_group_var')
self.assertEqual(res['host_var'], 'host_var_from_host_vars_host1')
self.assertEqual(res['playbook_host_var'], 'playbook_host_var')
# add in the fact cache
v._fact_cache['host1'] = dict(fact_cache_var="fact_cache_var_from_fact_cache")
res = v.get_vars(loader=fake_loader, play=play1, host=h1)
self.assertEqual(res['fact_cache_var'], 'fact_cache_var_from_fact_cache')
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_variable_manager_role_vars_dependencies(self):
'''
Tests vars from role dependencies with duplicate dependencies.
'''
v = VariableManager()
v._fact_cache = defaultdict(dict)
fake_loader = DictDataLoader({
# role common-role
'/etc/ansible/roles/common-role/tasks/main.yml': """
- debug: msg="{{role_var}}"
""",
# We do not need allow_duplicates: yes for this role
# because eliminating duplicates is done by the execution
# strategy, which we do not test here.
# role role1
'/etc/ansible/roles/role1/vars/main.yml': """
role_var: "role_var_from_role1"
""",
'/etc/ansible/roles/role1/meta/main.yml': """
dependencies:
- { role: common-role }
""",
# role role2
'/etc/ansible/roles/role2/vars/main.yml': """
role_var: "role_var_from_role2"
""",
'/etc/ansible/roles/role2/meta/main.yml': """
dependencies:
- { role: common-role }
""",
})
play1 = Play.load(dict(
hosts=['all'],
roles=['role1', 'role2'],
), loader=fake_loader, variable_manager=v)
# The task defined by common-role exists twice because role1
# and role2 depend on common-role. Check that the tasks see
# different values of role_var.
blocks = play1.compile()
task = blocks[1].block[0]
res = v.get_vars(loader=fake_loader, play=play1, task=task)
self.assertEqual(res['role_var'], 'role_var_from_role1')
task = blocks[2].block[0]
res = v.get_vars(loader=fake_loader, play=play1, task=task)
self.assertEqual(res['role_var'], 'role_var_from_role2')
| gpl-3.0 | -1,478,598,134,294,481,400 | 38.488827 | 107 | 0.606918 | false |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/stata.py | 7 | 82769 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
import numpy as np
import sys
import struct
from dateutil.relativedelta import relativedelta
from pandas.types.common import (is_categorical_dtype, is_datetime64_dtype,
_ensure_object)
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.series import Series
import datetime
from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util.decorators import Appender
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas.lib import max_len_string_array, infer_dtype
from pandas.tslib import NaT, Timestamp
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to iso-8859-1."""
_statafile_processing_params2 = """\
index : identifier of index column
identifier of column that should be used as index of the DataFrame
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """Reads observations from Stata file, converting them into a dataframe
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index=index, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
data = reader
else:
data = reader.read()
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas. Other
wise it falls back to a slower but more robust method using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return (to_datetime(year, format='%Y') +
to_timedelta(days, unit='d'))
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000))
for d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt in ["%tc", "tc"]: # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = pd.NaT
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt in ["%tm", "tm"]: # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%th", "th"]: # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isnull(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("Format %s is not a known Stata date format" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint colums are converted to int of the
same size if there is no loss in precision, other wise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if (data[col].max() <= 2147483620 and
data[col].min() >= -2147483647):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
msg = 'Column {0} has a maximum value of infinity which is ' \
'outside the range supported by Stata.'
raise ValueError(msg.format(col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
msg = 'Column {0} has a maximum value ({1}) outside the ' \
'range supported by Stata ({1})'
raise ValueError(msg.format(col, value, float64_max))
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatability shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[
0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'iso-8859-1'
def __init__(self, encoding):
self._encoding = encoding
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, 'Q'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = {
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(
struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = {
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'Q': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='iso-8859-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index = index
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _ = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
# remove format details from %td
self.fmtlist = ["%td" if x.startswith("%td") else x
for x in self.fmtlist]
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self.data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
self._seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
self._seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".
format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]"
.format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [self._decode(self.path_or_buf.read(321))
for i in range(self.nvar)]
elif self.format_version > 105:
vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._null_terminate(self.path_or_buf.read(81))
else:
return self._null_terminate(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._null_terminate(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + 'q',
self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self.data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _calcsize(self, fmt):
return (type(fmt) is int and fmt or
struct.calcsize(self.byteorder + fmt))
def _decode(self, s):
s = s.partition(b"\0")[0]
return s.decode('utf-8')
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._null_terminate(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
val = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
if self.format_version <= 117:
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:end]))
else:
self.value_label_dict[labname][val[i]] = (
self._decode(txt[off[i]:end]))
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
self.GSO = {0: ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == '<':
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
encoding = 'utf-8'
if self.format_version == 117:
encoding = self._encoding or self._default_encoding
va = va[0:-1].decode(encoding)
self.GSO[v_o] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (self._dtype is None):
self._can_read_value_labels = True
self._read_strls()
# Setup the dtype.
if self._dtype is None:
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame.from_records(data, index=index)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(
self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
index = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], index, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col],
self.fmtlist[i])
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(data,
self.value_label_dict,
self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist,
order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
msg = 'Value labels for column {0} are not unique. The ' \
'repeated labels are:\n{1}'.format(col, repeats)
raise ValueError(msg)
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it with null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError("Format %s not implemented" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a "
"column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(_ensure_object(column.values))
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(_ensure_object(column.values))
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : str or buffer
String path of file-like object
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when wirting the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
index = data.index
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values, index))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append(
'{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = 'tc'
self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates,
self.varlist)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file = _open_file_binary_write(
self._fname, self._encoding or self._default_encoding
)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
finally:
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
self._file.write(
self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M"))
)
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (nvar + 1))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes('', 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c' + str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c' + str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
| gpl-3.0 | -911,198,936,538,238,300 | 35.574901 | 90 | 0.544443 | false |
Snailed/group-generator | gruppeapp/views.py | 1 | 10629 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.datastructures import MultiValueDictKeyError
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
from django.views import View
from random import shuffle, randint
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.password_validation import validate_password, password_validators_help_texts
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from .models import Gruppe, GruppeElev, Klasse, Elev, Advertisement
from .forms import UserForm, LoginForm
import uuid
import operator
# Create your views here.
#Here, users can enter student names etc. and submit.
def makegroup(request, selectedclassid=0):
loginform = LoginForm(None)
error = False
errormessage = ""
classes = None
selectedclass = None
if request.user.is_authenticated:
classes = Klasse.objects.filter(user=request.user)
if selectedclassid != 0:
selectedclass = Klasse.objects.filter(id=selectedclassid).first()
context = {"error": error, "errormessage": errormessage, "loginform": loginform, "classes":classes, "selectedclass":selectedclass}
return render(request, "gruppeapp/welcome.html", context)
#Here, users can view the newly generated group!
class Creategroup(View):
def post(self, request):
numberofgroups = 1
students = []
studentCounter = request.POST["studentcounter"]
numberofgroups = int(request.POST["numberofgroupsinput"])
currentStudent=""
"""if int(request.POST["createfromclass"]) == 1:
for i in range(0, int(studentCounter)+1):
if int(request.POST["studentexists"+str(i)])==1:
if request.POST["student"+str(i)]:
students.append(request.POST["student"+str(i)])
else:"""
print(str(request.POST))
for i in range(0, int(studentCounter)+1):
print("trying to find student "+str(i))
try:
if request.POST.get("student"+str(i),0) is not 0:
print("Added student "+str(i))
currentStudent = request.POST["student"+str(i)]
if currentStudent is not "":
students.append(currentStudent)
except MultiValueDictKeyError:
error = True
errormessage = "No students added"
print("Tried to find student"+str(i))
print(str(request.POST))
context = {"error": error, "errormessage": errormessage}
return render(request, "gruppeapp/welcome.html", context)
except ValueError:
error = True
errormessage = "You didn't choose how many groups should be made"
context = {"error": error, "errormessage": errormessage}
return render(request, "gruppeapp/welcome.html", context)
shuffle(students)
linkhash=uuid.uuid4().hex
gruppe = Gruppe(link=linkhash, antalgrupper=numberofgroups)
if request.user.is_authenticated():
gruppe.user = request.user
gruppe.save()
for number, iterator in enumerate(students):
student = GruppeElev(navn=iterator, position=number, gruppe=gruppe)
student.save()
return redirect("gruppeapp:viewgroup", linkhash=linkhash)
def get(self,request):
raise Http404("Page not found")
class Creategroupfromclass(View):
def get(self,request):
return redirect("gruppeapp:makegroup")
def post(self,request):
classid=request.POST["classid"]
return redirect("gruppeapp:makegroupwithclassid", selectedclassid=classid)
class About(View):
def get(self,request):
return render(request, "gruppeapp/about.html", {"loginform":LoginForm(None)})
def post(self, request):
raise Http404("Page not found")
def viewgroup(request, linkhash):
loginform = LoginForm(None)
gruppe = Gruppe.objects.get(link=linkhash)
students = []
for student in GruppeElev.objects.filter(gruppe=gruppe):
students.append(student)
smallqueryset = Advertisement.objects.filter(size="small").order_by('?')
bigqueryset = Advertisement.objects.filter(size="large").order_by('?')
print(str(bigqueryset))
smalloverhead = smallqueryset.first()
bigoverhead = bigqueryset.first()
try:
smallunderhead = smallqueryset[1]
bigunderhead = bigqueryset[1]
except IndexError:
smallunderhead = smalloverhead
bigunderhead = bigoverhead
context = {
"students": students,
"numberofgroups": gruppe.antalgrupper,
"numberofgroupsrange": range(0,gruppe.antalgrupper),
"loginform": loginform,
"smalloverhead": smalloverhead,
"bigoverhead": bigoverhead,
"smallunderhead": smallunderhead,
"bigunderhead": bigunderhead,
}
return render(request, "gruppeapp/viewgroup.html", context)
class SignUpView(View):
form_class=UserForm
template_name="gruppeapp/registration_form.html"
def post(self, request):
form = self.form_class(request.POST)
loginform = LoginForm(None)
if form.is_valid():
user = form.save(commit=False)
user.username = form.cleaned_data["username"]
user.email = form.cleaned_data["email"]
password = form.cleaned_data["password1"]
try:
validate_password(password)
except(ValidationError):
return render(request, self.template_name, {"form": form, "errorhelp": password_validators_help_texts(), "loginform": loginform,})
user.set_password(password)
user.save()
user = authenticate(username=form.cleaned_data["username"], password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect("gruppeapp:makegroup")
return render(request, self.template_name, {"form": form,"errorhelp": password_validators_help_texts(), "loginform": loginform,})
def get(self, request):
form = self.form_class(None)
loginform = LoginForm(None)
return render(request, self.template_name, {"form": form,"errorhelp": password_validators_help_texts(), "loginform": loginform,})
class LoginView(View):
def post(self, request):
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
if request.POST.get('remember_me', None):
print("remember_me!")
request.session.set_expiry(60*60*24*30)
else:
print("No remember_me!")
request.session.set_expiry(360)
return redirect("gruppeapp:makegroup")
else:
return redirect("gruppeapp:makegroup")
else:
return redirect("gruppeapp:makegroup")
def get(self, request):
return redirect("gruppeapp:makegroup")
class MyClassesView(View):
template_name="gruppeapp/myclasses.html"
def post(self, request):
if request.user.is_authenticated:
classid = 0
#print("Post: "+str(sorted(request.POST, key=operator.itemgetter(0))))
for key in request.POST: #Gets class id and deletes every student of that class
if key.endswith("classid"):
classid = request.POST[key]
currentclass = Klasse.objects.filter(id=classid)[0]
currentclass.elev_set.all().delete()
for key in sorted(request.POST):
if key.endswith("name"): #gets the name of a student and creates it.
currentstudentname = request.POST[key]
currentclass = Klasse.objects.filter(id=classid)[0]
student = Elev(navn=currentstudentname, klasse=currentclass)
student.save()
elif key.endswith("newstudentname"):
currentstudentname = request.POST[key]
currentclass = Klasse.objects.filter(id=classid)[0]
student = Elev(navn=currentstudentname, klasse=currentclass)
student.save()
classes = Klasse.objects.filter(user=request.user)
classfromquery = classes.filter(pk=classid).first()
return render(request, self.template_name,{"classes": classes, "loginform": LoginForm(None), "currentclass":classfromquery})
def get(self, request, currentclass=0):
if request.user.is_authenticated:
classes = Klasse.objects.filter(user=request.user)
# print("Thing!"+str(classes.first().id))
print("Currentclass="+str(currentclass))
if currentclass is not 0:
classfromquery = classes.filter(pk=currentclass).first()
else:
classfromquery = classes.first()
print("Class from query:"+str(classfromquery))
context = {"classes": classes, "loginform": LoginForm(None), "currentclass": classfromquery}
return render(request, self.template_name, context)
else:
context = {"loginerror": True, "loginform":LoginForm(None)}
return render(request, self.template_name, context)
class CreateNewClass(View):
def post(self, request):
if request.user.is_authenticated:
classname=request.POST["classname"]
description = request.POST["classdescription"]
newclass = Klasse(navn=classname, description=description, user=request.user)
newclass.save()
return redirect("gruppeapp:myclasses")
else:
raise Http404("Page not found")
def get(self, request):
return redirect("gruppeapp:myclasses")
class DeleteClass(View):
def post(self, request):
classid=request.POST["classid"]
Klasse.objects.filter(id=classid).delete()
return redirect("gruppeapp:myclasses")
def get(self, request):
return redirect("gruppeapp:myclasses")
def privacypolicy(request):
return render(request, "gruppeapp/privacypolicy.htm") | mit | 8,469,901,769,704,796,000 | 39.265152 | 146 | 0.6248 | false |
mozilla/captain | vendor/lib/python/django/contrib/staticfiles/finders.py | 102 | 9658 | import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage, Storage, FileSystemStorage
from django.utils.datastructures import SortedDict
from django.utils.functional import empty, memoize, LazyObject
from django.utils.importlib import import_module
from django.utils._os import safe_join
from django.utils import six
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.storage import AppStaticStorage
_finders = SortedDict()
class BaseFinder(object):
"""
A base file finder to be used for custom staticfiles finder classes.
"""
def find(self, path, all=False):
"""
Given a relative file path this ought to find an
absolute file path.
If the ``all`` parameter is ``False`` (default) only
the first found file path will be returned; if set
to ``True`` a list of all found files paths is returned.
"""
raise NotImplementedError()
def list(self, ignore_patterns):
"""
Given an optional list of paths to ignore, this should return
a two item iterable consisting of the relative path and storage
instance.
"""
raise NotImplementedError()
class FileSystemFinder(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, apps=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = SortedDict()
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
raise ImproperlyConfigured(
"Your STATICFILES_DIRS setting is not a tuple or list; "
"perhaps you forgot a trailing comma?")
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
raise ImproperlyConfigured(
"The STATICFILES_DIRS setting should "
"not contain the STATIC_ROOT setting")
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super(FileSystemFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Finds a requested static file in a location, returning the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
class AppDirectoriesFinder(BaseFinder):
"""
A static files finder that looks in the directory of each app as
specified in the source_dir attribute of the given storage class.
"""
storage_class = AppStaticStorage
def __init__(self, apps=None, *args, **kwargs):
# The list of apps that are handled
self.apps = []
# Mapping of app module paths to storage instances
self.storages = SortedDict()
if apps is None:
apps = settings.INSTALLED_APPS
for app in apps:
app_storage = self.storage_class(app)
if os.path.isdir(app_storage.location):
self.storages[app] = app_storage
if app not in self.apps:
self.apps.append(app)
super(AppDirectoriesFinder, self).__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in six.itervalues(self.storages):
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches
def find_in_app(self, app, path):
"""
Find a requested static file in an app's static locations.
"""
storage = self.storages.get(app, None)
if storage:
if storage.prefix:
prefix = '%s%s' % (storage.prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path
class BaseStorageFinder(BaseFinder):
"""
A base static files finder to be used to extended
with an own storage class.
"""
storage = None
def __init__(self, storage=None, *args, **kwargs):
if storage is not None:
self.storage = storage
if self.storage is None:
raise ImproperlyConfigured("The staticfiles storage finder %r "
"doesn't have a storage class "
"assigned." % self.__class__)
# Make sure we have an storage instance here.
if not isinstance(self.storage, (Storage, LazyObject)):
self.storage = self.storage()
super(BaseStorageFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the default file storage, if it's local.
"""
try:
self.storage.path('')
except NotImplementedError:
pass
else:
if self.storage.exists(path):
match = self.storage.path(path)
if all:
match = [match]
return match
return []
def list(self, ignore_patterns):
"""
List all files of the storage.
"""
for path in utils.get_files(self.storage, ignore_patterns):
yield path, self.storage
class DefaultStorageFinder(BaseStorageFinder):
"""
A static files finder that uses the default storage backend.
"""
storage = default_storage
def __init__(self, *args, **kwargs):
super(DefaultStorageFinder, self).__init__(*args, **kwargs)
base_location = getattr(self.storage, 'base_location', empty)
if not base_location:
raise ImproperlyConfigured("The storage backend of the "
"staticfiles finder %r doesn't have "
"a valid location." % self.__class__)
def find(path, all=False):
"""
Find a static file with the given path using all enabled finders.
If ``all`` is ``False`` (default), return the first matching
absolute path (or ``None`` if no match). Otherwise return a list.
"""
matches = []
for finder in get_finders():
result = finder.find(path, all=all)
if not all and result:
return result
if not isinstance(result, (list, tuple)):
result = [result]
matches.extend(result)
if matches:
return matches
# No match.
return all and [] or None
def get_finders():
for finder_path in settings.STATICFILES_FINDERS:
yield get_finder(finder_path)
def _get_finder(import_path):
"""
Imports the staticfiles finder class described by import_path, where
import_path is the full Python path to the class.
"""
module, attr = import_path.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
Finder = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, attr))
if not issubclass(Finder, BaseFinder):
raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' %
(Finder, BaseFinder))
return Finder()
get_finder = memoize(_get_finder, _finders, 1)
| mpl-2.0 | 3,624,644,190,722,065,400 | 33.992754 | 81 | 0.575896 | false |
datadesk/panda | config/settings.py | 4 | 7153 | #!/usr/bin/env python
import datetime
import os
import django
from django.utils.translation import ugettext_lazy as _
# Which settings are we using?
# Useful for debugging.
SETTINGS = 'base'
# Base paths
DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# Debugging
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
LOGIN_URL = '/admin/login/'
LOGOUT_URL = '/admin/logout/'
LOGIN_REDIRECT_URL = '/admin/'
SITE_ID = 1
# Default connection to socket
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': 'localhost',
'PORT': '5432',
'NAME': 'panda',
'USER': 'panda',
'PASSWORD': 'panda'
}
}
TIME_ZONE = 'Etc/UTC'
USE_TZ = True
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = False
LOCALE_PATHS = (os.path.join(SITE_ROOT, 'locale'),)
# Media
STATIC_ROOT = os.path.join(SITE_ROOT, 'media')
STATIC_URL = '/site_media/'
ADMIN_MEDIA_PREFIX = '/site_media/admin/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Uploads
MEDIA_ROOT = '/tmp/panda'
EXPORT_ROOT = '/tmp/panda_exports'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '-lyd+@8@=9oni01+gjvb(txz3%hh_7a9m5*n0q^ce5+&c1fkm('
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.csrf',
'django.core.context_processors.i18n'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'panda.middleware.CsrfCookieUsedMiddleware'
)
ROOT_URLCONF = 'config.urls'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates')
)
INSTALLED_APPS = (
'longerusername',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.sites',
'django.contrib.staticfiles',
'south',
'tastypie',
'djcelery',
'compressor',
'livesettings',
'jumpstart',
'panda',
'client'
)
SESSION_COOKIE_AGE = 2592000 # 30 days
AUTH_PROFILE_MODULE = 'panda.UserProfile'
# Django-compressor
COMPRESS_ENABLED = False
# Celery
import djcelery
djcelery.setup_loader()
BROKER_TRANSPORT = 'sqlalchemy'
BROKER_URL = 'postgresql://%(USER)s:%(PASSWORD)s@%(HOST)s/%(NAME)s' % DATABASES['default']
CELERY_RESULT_DBURI = 'postgresql://%(USER)s:%(PASSWORD)s@%(HOST)s/%(NAME)s' % DATABASES['default']
CELERYD_HIJACK_ROOT_LOGGER = False
CELERYD_CONCURRENCY = 1
CELERY_IGNORE_RESULT = True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
CELERYBEAT_SCHEDULE_FILENAME = 'celerybeat-schedule'
from celery.schedules import crontab
CELERYBEAT_SCHEDULE = {
'purge_orphaned_uploads': {
'task': 'panda.tasks.cron.purge_orphaned_uploads',
'schedule': crontab(minute=0, hour=2),
'kwargs': { 'fake': False }
},
'run_subscriptions': {
'task': 'panda.tasks.cron.run_subscriptions',
'schedule': crontab(minute=30, hour=2)
},
'run_admin_alerts': {
'task': 'panda.tasks.cron.run_admin_alerts',
'schedule': crontab(minute=0, hour=4)
}
}
# South
SOUTH_TESTS_MIGRATE = False
# Hack, see: http://stackoverflow.com/questions/3898239/souths-syncdb-migrate-creates-pages-of-output
import south.logger
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'console': {
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
'default': {
'level':'INFO',
'class':'loghandlers.GroupWriteRotatingFileHandler',
'filename': '/var/log/panda/panda.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'request_handler': {
'level':'INFO',
'class':'loghandlers.GroupWriteRotatingFileHandler',
'filename': '/var/log/panda/requests.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'backend_handler': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
},
'loggers': {
'': {
'handlers': ['default', 'console'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['request_handler', 'console'],
'level': 'DEBUG',
'propagate': False
},
'django.db': {
'handlers': ['backend_handler'],
'level': 'DEBUG',
'propagate': False
},
'south': {
'handlers': ['console'],
'level': 'INFO',
'propogate': False
},
'keyedcache': {
'handlers': ['console'],
'level': 'ERROR',
'propogate': False
},
'requests.packages.urllib3.connectionpool': {
'handlers': ['console'],
'level': 'ERROR',
'propogate': False
}
}
}
# Solr
SOLR_ENDPOINT = 'http://localhost:8983/solr'
SOLR_DATA_CORE = 'data'
SOLR_DATASETS_CORE = 'datasets'
SOLR_DIRECTORY = '/var/solr'
# Miscellaneous configuration
PANDA_VERSION = '1.1.2'
PANDA_DEFAULT_SEARCH_GROUPS = 10
PANDA_DEFAULT_SEARCH_ROWS_PER_GROUP = 5
PANDA_DEFAULT_SEARCH_ROWS = 50
PANDA_SNIFFER_MAX_SAMPLE_SIZE = 1024 * 100 # 100 KB
PANDA_SAMPLE_DATA_ROWS = 5
PANDA_SCHEMA_SAMPLE_ROWS = 100
PANDA_ACTIVATION_PERIOD = datetime.timedelta(days=30)
PANDA_AVAILABLE_SPACE_WARN = 1024 * 1024 * 1024 * 2 # 2GB
PANDA_AVAILABLE_SPACE_CRITICAL = 1024 * 1024 * 1024 * 1 # 1GB
PANDA_NOTIFICATIONS_TO_SHOW = 50
PANDA_UNCATEGORIZED_ID = 0
PANDA_UNCATEGORIZED_SLUG = 'uncategorized'
# running this through gettext causes file uploads not to work, so disabled until solved!
PANDA_UNCATEGORIZED_NAME = _('Uncategorized')
MOMENT_LANGUAGE_MAPPING = {
'en': None,
'es': 'es',
'de': 'de'
}
# Allow for local (per-user) override
try:
from local_settings import *
except ImportError:
pass
| mit | 1,787,296,661,865,948,400 | 25.201465 | 101 | 0.617783 | false |
hmpf/nav | python/nav/statemon/event.py | 2 | 1484 | #
# Copyright (C) 2018 Uninett AS
#
# This file is part of Network Administration Visualized (NAV)
#
# NAV is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# NAV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NAV; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class Event(object):
"""
Class representing a NAV Event
"""
UP = 'UP'
DOWN = 'DOWN'
boxState = 'boxState'
serviceState = 'serviceState'
def __init__(self, serviceid, netboxid, deviceid,
eventtype, source, status, info='', version=''):
self.serviceid = serviceid
self.netboxid = netboxid
self.deviceid = deviceid
self.info = info
self.eventtype = eventtype
self.status = status
self.version = version
self.source = source
def __repr__(self):
return "Service: %s, netbox: %s, eventtype: %s, status: %s" % \
(self.serviceid, self.netboxid, self.eventtype, self.status)
| gpl-3.0 | -3,148,251,219,955,815,000 | 33.511628 | 75 | 0.668464 | false |
cbrentharris/bricklayer | bricklayer/tests/doctor/config_test.py | 1 | 2313 | from unittest import TestCase
from bricklayer.doctor.config import Configurator
import uuid
import os
import shutil
import tempfile
import ConfigParser
class ConfiguratorTest(TestCase):
def setUp(self):
self.random_dir = tempfile.gettempdir() + '/.' + uuid.uuid4().hex
os.makedirs(self.random_dir)
def tearDown(self):
if self.random_dir is not None:
shutil.rmtree(self.random_dir)
def test_it_creates_a_config_file_with_a_uuid_if_one_doesnt_exist(self):
os.chdir(self.random_dir)
Configurator.create_config_if_doesnt_exist()
self.assertTrue(os.path.exists(self.random_dir + '/.bricklayer' + '/settings.cfg'))
def test_it_adds_the_uuid_to_the_config_file(self):
os.chdir(self.random_dir)
Configurator.create_config_if_doesnt_exist()
config = ConfigParser.RawConfigParser()
config.read([self.random_dir + '/.bricklayer/settings.cfg'])
self.assertIsInstance(config.get('General', 'uuid'), str)
def test_it_returns_the_uuid_stored(self):
os.chdir(self.random_dir)
random_uuid = Configurator.get('uuid')
config = ConfigParser.RawConfigParser()
config.read([self.random_dir + '/.bricklayer/settings.cfg'])
self.assertEqual(config.get('General', 'uuid'), random_uuid)
def test_it_doesnt_overwrite_the_config_file(self):
os.chdir(self.random_dir)
Configurator.create_config_if_doesnt_exist()
config = ConfigParser.RawConfigParser()
config.read([self.random_dir + '/.bricklayer/settings.cfg'])
generated_uuid = config.get('General', 'uuid')
Configurator.create_config_if_doesnt_exist()
config2 = ConfigParser.RawConfigParser()
config2.read([self.random_dir + '/.bricklayer/settings.cfg'])
self.assertEqual(generated_uuid, config.get('General', 'uuid'))
def test_it_adds_to_the_config_file(self):
os.chdir(self.random_dir)
Configurator.create_config_if_doesnt_exist()
Configurator.set('name', 'chris')
self.assertIsNotNone(Configurator.get('name'))
def test_it_gets_from_the_config_file(self):
os.chdir(self.random_dir)
Configurator.create_config_if_doesnt_exist()
self.assertIsNotNone(Configurator.get('uuid'))
| mit | 7,152,472,166,302,749,000 | 38.20339 | 91 | 0.669261 | false |
rich-digi/wp-xml-transformer | cts-import.py | 1 | 4100 | # ------------------------------------------------------------------------------------------------
# Split Wordpress XML (using LXML)
# ------------------------------------------------------------------------------------------------
import sys, os, re, pprint, codecs, datetime, subprocess
# sys.path.append('/usr/local/lib/python2.7/site-packages/')
# from lxml import etree as ET
# from phpserialize import serialize, unserialize
class trml:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
BOLD = '\033[1m'
NORMAL = '\033[0;0m'
# Wordpress XML namespaces
namespaces = {
'wp' : 'http://wordpress.org/export/1.2/',
'excerpt' : 'http://wordpress.org/export/1.2/excerpt/',
'content' : 'http://purl.org/rss/1.0/modules/content/',
'wfw' : 'http://wellformedweb.org/CommentAPI/',
'dc' : 'http://purl.org/dc/elements/1.1/',
}
"""
REGISTER NAMESPACE WHEN WRITING ONLY
for prefix, uri in namespaces.iteritems():
ET.register_namespace(prefix, uri)
"""
# ------------------------------------------------------------------------------------------------
# Utility functions
def make_dir(dir):
dir = os.getcwd() + dir
if not os.path.exists(dir): os.makedirs(dir)
def write_utf8_file(fp, ustr):
f = codecs.open(os.getcwd()+fp, 'w', 'utf-8');
f.write(ustr)
f.close()
def logprint(ustr=''):
# Unicode-safe logger
print ustr
lfp.write(ustr+'\n')
def shexec(cmd):
try:
res = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except:
res = 'ERROR: Shell command error, running ' + cmd
logprint(res)
return res
def parse_shellvars(file_name):
TIC = "'"
QUOTE = '"'
return_dict = dict()
with open(file_name) as reader:
for line in reader.readlines():
line = re.sub(r"export\s+", "", line.strip())
if "=" in line:
key, value = line.split("=", 1)
# Values that are wrapped in tics: remove the tics but otherwise leave as is
if value.startswith(TIC):
# Remove first tic and everything after the last tic
last_tic_position = value.rindex(TIC)
value = value[1:last_tic_position]
return_dict[key] = value
continue
# Values that are wrapped in quotes: remove the quotes and optional trailing comment
elif value.startswith(QUOTE): # Values that are wrapped quotes
value = re.sub(r'^"(.+?)".+', '\g<1>', value)
# Values that are followed by whitespace or comments: remove the whitespace and/or comments
else:
value = re.sub(r'(#|\s+).*', '', value)
for variable in re.findall(r"\$\{?\w+\}?", value):
# Find embedded shell variables
dict_key = variable.strip("${}")
# Replace them with their values
value = value.replace(variable, return_dict.get(dict_key, ""))
# Add this key to the dictionary
return_dict[key] = value
return return_dict
# --------------------------------------------------------------------------------
# RUN
def run():
logprint()
logprint('------------------------------------------------------')
logprint('cts-import.py : running at ' + logtime)
logprint('------------------------------------------------------')
logprint()
logprint('Let\'s join & import...')
logprint()
logprint(pprint.pformat(config))
logprint()
if len(sys.argv) > 1: revision = sys.argv[1]
# Pull latest version from central Git repo
os.chdir(config['GIT_ContentLocal'])
shexec('pwd')
shexec('git pull')
# parse_html_xml_and_join()
logprint('Copying into import area @')
shexec(' '.join(['cp -pr', config['GIT_ContentLocal'], config['GIT_ImportTarget']]))
# res = trigger_import()
logprint()
logprint('STATUS: SUCCESS')
logprint('DONE')
logprint()
# --------------------------------------------------------------------------------
if __name__ == '__main__':
# Parse config file
config = parse_shellvars('bizclub-instance.cfg')
# Create logfile as global
today = datetime.datetime.today()
logtime = today.strftime('%Y-%m-%d-%H-%M-%S')
logfile = config['CTS_ImportLogDir'] + 'cts-import-' + logtime + '.log'
lfp = codecs.open(logfile, 'w', 'utf-8')
# Run
run();
# Close logfile
lfp.close()
| mit | -930,942,110,120,406,500 | 26.891156 | 98 | 0.560244 | false |
yd0str/infernal-twin | build/pillow/PIL/PaletteFile.py | 72 | 1113 | #
# Python Imaging Library
# $Id$
#
# stuff to read simple, teragon-style palette files
#
# History:
# 97-08-23 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from PIL._binary import o8
##
# File handler for Teragon-style palette files.
class PaletteFile(object):
rawmode = "RGB"
def __init__(self, fp):
self.palette = [(i, i, i) for i in range(256)]
while True:
s = fp.readline()
if not s:
break
if s[0:1] == b"#":
continue
if len(s) > 100:
raise SyntaxError("bad palette file")
v = [int(x) for x in s.split()]
try:
[i, r, g, b] = v
except ValueError:
[i, r] = v
g = b = r
if 0 <= i <= 255:
self.palette[i] = o8(r) + o8(g) + o8(b)
self.palette = b"".join(self.palette)
def getpalette(self):
return self.palette, self.rawmode
| gpl-3.0 | 6,438,082,773,590,776,000 | 19.236364 | 66 | 0.497754 | false |
daphne-yu/aubio | python/tests/test_source.py | 8 | 2264 | #! /usr/bin/env python
from numpy.testing import TestCase, assert_equal, assert_almost_equal
from aubio import fvec, source
from numpy import array
from utils import list_all_sounds
list_of_sounds = list_all_sounds('sounds')
path = None
class aubio_source_test_case(TestCase):
def setUp(self):
if not len(list_of_sounds): self.skipTest('add some sound files in \'python/tests/sounds\'')
def read_from_sink(self, f):
total_frames = 0
while True:
vec, read = f()
total_frames += read
if read < f.hop_size: break
print "read", "%.2fs" % (total_frames / float(f.samplerate) ),
print "(", total_frames, "frames", "in",
print total_frames / f.hop_size, "blocks", "at", "%dHz" % f.samplerate, ")",
print "from", f.uri
def test_samplerate_hopsize(self):
for p in list_of_sounds:
for samplerate, hop_size in zip([0, 44100, 8000, 32000], [ 512, 512, 64, 256]):
f = source(p, samplerate, hop_size)
assert f.samplerate != 0
self.read_from_sink(f)
def test_samplerate_none(self):
for p in list_of_sounds:
f = source(p)
assert f.samplerate != 0
self.read_from_sink(f)
def test_samplerate_0(self):
for p in list_of_sounds:
f = source(p, 0)
assert f.samplerate != 0
self.read_from_sink(f)
def test_wrong_samplerate(self):
for p in list_of_sounds:
try:
f = source(p, -1)
except Exception, e:
print e
else:
self.fail('does not fail with wrong samplerate')
def test_wrong_hop_size(self):
for p in list_of_sounds:
try:
f = source(p, 0, -1)
except Exception, e:
print e
else:
self.fail('does not fail with wrong hop_size %d' % f.hop_size)
def test_zero_hop_size(self):
for p in list_of_sounds:
f = source(p, 0, 0)
assert f.samplerate != 0
assert f.hop_size != 0
self.read_from_sink(f)
if __name__ == '__main__':
from unittest import main
main()
| gpl-3.0 | -8,988,579,364,581,915,000 | 30.013699 | 100 | 0.534011 | false |
yjmade/odoo | addons/decimal_precision/tests/test_qweb_float.py | 103 | 2000 | # -*- coding: utf-8 -*-
from openerp.tests import common
class TestFloatExport(common.TransactionCase):
def setUp(self):
super(TestFloatExport, self).setUp()
self.Model = self.registry('decimal.precision.test')
def get_converter(self, name):
converter = self.registry('ir.qweb.field.float')
column = self.Model._all_columns[name].column
return lambda value, options=None: converter.value_to_html(
self.cr, self.uid, value, column, options=options, context=None)
def test_basic_float(self):
converter = self.get_converter('float')
self.assertEqual(
converter(42.0),
"42.0")
self.assertEqual(
converter(42.12345),
"42.12345")
converter = self.get_converter('float_2')
self.assertEqual(
converter(42.0),
"42.00")
self.assertEqual(
converter(42.12345),
"42.12")
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.0),
'42.0000')
self.assertEqual(
converter(42.12345),
'42.1234')
def test_precision_domain(self):
DP = self.registry('decimal.precision')
DP.create(self.cr, self.uid, {
'name': 'A',
'digits': 2,
})
DP.create(self.cr, self.uid, {
'name': 'B',
'digits': 6,
})
converter = self.get_converter('float')
self.assertEqual(
converter(42.0, {'decimal_precision': 'A'}),
'42.00')
self.assertEqual(
converter(42.0, {'decimal_precision': 'B'}),
'42.000000')
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'A'}),
'42.12')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'B'}),
'42.123450')
| agpl-3.0 | -1,733,428,438,505,503,200 | 29.30303 | 76 | 0.5305 | false |
richo/groundstation | groundstation/stream_client.py | 1 | 1052 | from sockets.stream_socket import StreamSocket
from transfer.request import Request
from transfer.notification import Notification
import settings
from groundstation.utils import path2id
import groundstation.logger
log = groundstation.logger.getLogger(__name__)
class StreamClient(StreamSocket):
def __init__(self, addr):
super(StreamClient, self).__init__()
# TODO Pretty sure this should be a struct sockaddr
self.peer = addr
self.socket.connect((addr, settings.PORT))
self.socket.setblocking(False)
def begin_handshake(self, station):
request = Request("LISTALLOBJECTS", station=station, stream=self)
station.register_request(request)
self.enqueue(request)
def notify_new_object(self, station, path):
# TODO FSWatcher should probably be responsible for catching these to
# keep signal:noise sane
obj = path2id(path)
notification = Notification("NEWOBJECT", station=station, stream=self, payload=obj)
self.enqueue(notification)
| mit | 4,035,132,895,205,636,000 | 34.066667 | 91 | 0.709125 | false |
argivaitv/argivaitv | plugin.video.salts/scrapers/movie25_scraper.py | 1 | 3790 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import urllib
import urlparse
import re
from salts_lib import kodi
import base64
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
QUALITY_MAP = {'DVD': QUALITIES.HIGH, 'CAM': QUALITIES.LOW}
BASE_URL = 'http://movie25.ag'
class Movie25_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'movie25'
def resolve_link(self, link):
url = urlparse.urljoin(self.base_url, link)
html = self._http_get(url, cache_limit=0)
match = re.search('href=\'([^\']*)\'"\s+value="Click Here to Play"', html, re.DOTALL | re.I)
if match:
return match.group(1)
else:
match = re.search('<IFRAME SRC="(?:/?tz\.php\?url=external\.php\?url=)?([^"]+)', html, re.DOTALL | re.I)
if match:
try:
return base64.b64decode(match.group(1))
except TypeError:
return match.group(1)
else:
return link
def format_source_label(self, item):
return '[%s] %s' % (item['quality'], item['host'])
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
quality = None
match = re.search('Links\s+-\s+Quality\s*([^<]*)</h1>', html, re.DOTALL | re.I)
if match:
quality = QUALITY_MAP.get(match.group(1).strip().upper())
for match in re.finditer('id="link_name">\s*([^<]+).*?href="([^"]+)', html, re.DOTALL):
host, url = match.groups()
hoster = {'multi-part': False, 'host': host, 'class': self, 'url': url, 'quality': self._get_quality(video, host, quality), 'rating': None, 'views': None, 'direct': False}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(Movie25_Scraper, self)._default_get_url(video)
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/search.php?key=')
search_url += urllib.quote_plus('%s %s' % (title, year))
search_url += '&submit='
html = self._http_get(search_url, cache_limit=.25)
pattern = 'class="movie_about">.*?href="([^"]+).*?>\s+(.*?)\s*\(?(\d{4})?\)?\s+</a></h1>'
results = []
for match in re.finditer(pattern, html, re.DOTALL):
url, title, year = match.groups('')
result = {'url': self._pathify_url(url), 'title': title, 'year': year}
results.append(result)
return results
| gpl-2.0 | 6,805,842,469,713,769,000 | 38.072165 | 187 | 0.595515 | false |
formiano/enigma2-4.4 | lib/python/Components/Renderer/valioPosition.py | 13 | 1298 | # -*- coding: utf-8 -*-
#
# Maximum Temperature Renderer for Dreambox/Enigma-2
# Version: 1.0
# Coded by Vali (c)2010-2011
#
#######################################################################
from Components.VariableText import VariableText
from enigma import eLabel
from Renderer import Renderer
class valioPosition(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
GUI_WIDGET = eLabel
def changed(self, what):
if not self.suspended:
orb_pos = " "
service = self.source.service
feinfo = (service and service.frontendInfo())
if (feinfo is not None):
frontendData = (feinfo and feinfo.getAll(True))
if (frontendData is not None):
if (frontendData.get("tuner_type") == "DVB-S"):
orbital_pos = int(frontendData["orbital_position"])
if orbital_pos > 1800:
orb_pos = str((float(3600 - orbital_pos))/10.0) + "°W"
elif orbital_pos > 0:
orb_pos = str((float(orbital_pos))/10.0) + "°E"
elif (frontendData.get("tuner_type") == "DVB-T"):
orb_pos = "DVB-T"
elif (frontendData.get("tuner_type") == "DVB-C"):
orb_pos = "DVB-C"
self.text = orb_pos
def onShow(self):
self.suspended = False
self.changed(None)
def onHide(self):
self.suspended = True
| gpl-2.0 | -8,136,977,001,451,987,000 | 28.5 | 71 | 0.607088 | false |
derekjchow/models | research/skip_thoughts/skip_thoughts/skip_thoughts_model_test.py | 19 | 6755 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.skip_thoughts.skip_thoughts_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from skip_thoughts import configuration
from skip_thoughts import skip_thoughts_model
class SkipThoughtsModel(skip_thoughts_model.SkipThoughtsModel):
"""Subclass of SkipThoughtsModel without the disk I/O."""
def build_inputs(self):
if self.mode == "encode":
# Encode mode doesn't read from disk, so defer to parent.
return super(SkipThoughtsModel, self).build_inputs()
else:
# Replace disk I/O with random Tensors.
self.encode_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_pre_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_post_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.encode_mask = tf.ones_like(self.encode_ids)
self.decode_pre_mask = tf.ones_like(self.decode_pre_ids)
self.decode_post_mask = tf.ones_like(self.decode_post_ids)
class SkipThoughtsModelTest(tf.test.TestCase):
def setUp(self):
super(SkipThoughtsModelTest, self).setUp()
self._model_config = configuration.model_config()
def _countModelParameters(self):
"""Counts the number of parameters in the model at top level scope."""
counter = {}
for v in tf.global_variables():
name = v.op.name.split("/")[0]
num_params = v.get_shape().num_elements()
if not num_params:
self.fail("Could not infer num_elements from Variable %s" % v.op.name)
counter[name] = counter.get(name, 0) + num_params
return counter
def _checkModelParameters(self):
"""Verifies the number of parameters in the model."""
param_counts = self._countModelParameters()
expected_param_counts = {
# vocab_size * embedding_size
"word_embedding": 12400000,
# GRU Cells
"encoder": 21772800,
"decoder_pre": 21772800,
"decoder_post": 21772800,
# (encoder_dim + 1) * vocab_size
"logits": 48020000,
"global_step": 1,
}
self.assertDictEqual(expected_param_counts, param_counts)
def _checkOutputs(self, expected_shapes, feed_dict=None):
"""Verifies that the model produces expected outputs.
Args:
expected_shapes: A dict mapping Tensor or Tensor name to expected output
shape.
feed_dict: Values of Tensors to feed into Session.run().
"""
fetches = expected_shapes.keys()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs = sess.run(fetches, feed_dict)
for index, output in enumerate(outputs):
tensor = fetches[index]
expected = expected_shapes[tensor]
actual = output.shape
if expected != actual:
self.fail("Tensor %s has shape %s (expected %s)." % (tensor, actual,
expected))
def testBuildForTraining(self):
model = SkipThoughtsModel(self._model_config, mode="train")
model.build()
self._checkModelParameters()
expected_shapes = {
# [batch_size, length]
model.encode_ids: (128, 15),
model.decode_pre_ids: (128, 15),
model.decode_post_ids: (128, 15),
model.encode_mask: (128, 15),
model.decode_pre_mask: (128, 15),
model.decode_post_mask: (128, 15),
# [batch_size, length, word_embedding_dim]
model.encode_emb: (128, 15, 620),
model.decode_pre_emb: (128, 15, 620),
model.decode_post_emb: (128, 15, 620),
# [batch_size, encoder_dim]
model.thought_vectors: (128, 2400),
# [batch_size * length]
model.target_cross_entropy_losses[0]: (1920,),
model.target_cross_entropy_losses[1]: (1920,),
# [batch_size * length]
model.target_cross_entropy_loss_weights[0]: (1920,),
model.target_cross_entropy_loss_weights[1]: (1920,),
# Scalar
model.total_loss: (),
}
self._checkOutputs(expected_shapes)
def testBuildForEval(self):
model = SkipThoughtsModel(self._model_config, mode="eval")
model.build()
self._checkModelParameters()
expected_shapes = {
# [batch_size, length]
model.encode_ids: (128, 15),
model.decode_pre_ids: (128, 15),
model.decode_post_ids: (128, 15),
model.encode_mask: (128, 15),
model.decode_pre_mask: (128, 15),
model.decode_post_mask: (128, 15),
# [batch_size, length, word_embedding_dim]
model.encode_emb: (128, 15, 620),
model.decode_pre_emb: (128, 15, 620),
model.decode_post_emb: (128, 15, 620),
# [batch_size, encoder_dim]
model.thought_vectors: (128, 2400),
# [batch_size * length]
model.target_cross_entropy_losses[0]: (1920,),
model.target_cross_entropy_losses[1]: (1920,),
# [batch_size * length]
model.target_cross_entropy_loss_weights[0]: (1920,),
model.target_cross_entropy_loss_weights[1]: (1920,),
# Scalar
model.total_loss: (),
}
self._checkOutputs(expected_shapes)
def testBuildForEncode(self):
model = SkipThoughtsModel(self._model_config, mode="encode")
model.build()
# Test feeding a batch of word embeddings to get skip thought vectors.
encode_emb = np.random.rand(64, 15, 620)
encode_mask = np.ones((64, 15), dtype=np.int64)
feed_dict = {model.encode_emb: encode_emb, model.encode_mask: encode_mask}
expected_shapes = {
# [batch_size, encoder_dim]
model.thought_vectors: (64, 2400),
}
self._checkOutputs(expected_shapes, feed_dict)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 9,216,509,880,755,745,000 | 34.366492 | 80 | 0.623834 | false |
edry/edx-platform | common/lib/xmodule/xmodule/annotator_token.py | 211 | 1542 | """
This file contains a function used to retrieve the token for the annotation backend
without having to create a view, but just returning a string instead.
It can be called from other files by using the following:
from xmodule.annotator_token import retrieve_token
"""
import datetime
from firebase_token_generator import create_token
def retrieve_token(userid, secret):
'''
Return a token for the backend of annotations.
It uses the course id to retrieve a variable that contains the secret
token found in inheritance.py. It also contains information of when
the token was issued. This will be stored with the user along with
the id for identification purposes in the backend.
'''
# the following five lines of code allows you to include the default timezone in the iso format
# for more information: http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
newhour, newmin = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60)
newtime = "%s%+02d:%02d" % (dtnow.isoformat(), newhour, newmin)
# uses the issued time (UTC plus timezone), the consumer key and the user's email to maintain a
# federated system in the annotation backend server
custom_data = {"issuedAt": newtime, "consumerKey": secret, "userId": userid, "ttl": 86400}
newtoken = create_token(secret, custom_data)
return newtoken
| agpl-3.0 | 478,234,736,981,009,540 | 47.1875 | 141 | 0.732815 | false |
ericMayer/tekton-master | backend/venv/lib/python2.7/site-packages/unidecode/x067.py | 252 | 4635 | data = (
'Zui ', # 0x00
'Can ', # 0x01
'Xu ', # 0x02
'Hui ', # 0x03
'Yin ', # 0x04
'Qie ', # 0x05
'Fen ', # 0x06
'Pi ', # 0x07
'Yue ', # 0x08
'You ', # 0x09
'Ruan ', # 0x0a
'Peng ', # 0x0b
'Ban ', # 0x0c
'Fu ', # 0x0d
'Ling ', # 0x0e
'Fei ', # 0x0f
'Qu ', # 0x10
'[?] ', # 0x11
'Nu ', # 0x12
'Tiao ', # 0x13
'Shuo ', # 0x14
'Zhen ', # 0x15
'Lang ', # 0x16
'Lang ', # 0x17
'Juan ', # 0x18
'Ming ', # 0x19
'Huang ', # 0x1a
'Wang ', # 0x1b
'Tun ', # 0x1c
'Zhao ', # 0x1d
'Ji ', # 0x1e
'Qi ', # 0x1f
'Ying ', # 0x20
'Zong ', # 0x21
'Wang ', # 0x22
'Tong ', # 0x23
'Lang ', # 0x24
'[?] ', # 0x25
'Meng ', # 0x26
'Long ', # 0x27
'Mu ', # 0x28
'Deng ', # 0x29
'Wei ', # 0x2a
'Mo ', # 0x2b
'Ben ', # 0x2c
'Zha ', # 0x2d
'Zhu ', # 0x2e
'Zhu ', # 0x2f
'[?] ', # 0x30
'Zhu ', # 0x31
'Ren ', # 0x32
'Ba ', # 0x33
'Po ', # 0x34
'Duo ', # 0x35
'Duo ', # 0x36
'Dao ', # 0x37
'Li ', # 0x38
'Qiu ', # 0x39
'Ji ', # 0x3a
'Jiu ', # 0x3b
'Bi ', # 0x3c
'Xiu ', # 0x3d
'Ting ', # 0x3e
'Ci ', # 0x3f
'Sha ', # 0x40
'Eburi ', # 0x41
'Za ', # 0x42
'Quan ', # 0x43
'Qian ', # 0x44
'Yu ', # 0x45
'Gan ', # 0x46
'Wu ', # 0x47
'Cha ', # 0x48
'Shan ', # 0x49
'Xun ', # 0x4a
'Fan ', # 0x4b
'Wu ', # 0x4c
'Zi ', # 0x4d
'Li ', # 0x4e
'Xing ', # 0x4f
'Cai ', # 0x50
'Cun ', # 0x51
'Ren ', # 0x52
'Shao ', # 0x53
'Tuo ', # 0x54
'Di ', # 0x55
'Zhang ', # 0x56
'Mang ', # 0x57
'Chi ', # 0x58
'Yi ', # 0x59
'Gu ', # 0x5a
'Gong ', # 0x5b
'Du ', # 0x5c
'Yi ', # 0x5d
'Qi ', # 0x5e
'Shu ', # 0x5f
'Gang ', # 0x60
'Tiao ', # 0x61
'Moku ', # 0x62
'Soma ', # 0x63
'Tochi ', # 0x64
'Lai ', # 0x65
'Sugi ', # 0x66
'Mang ', # 0x67
'Yang ', # 0x68
'Ma ', # 0x69
'Miao ', # 0x6a
'Si ', # 0x6b
'Yuan ', # 0x6c
'Hang ', # 0x6d
'Fei ', # 0x6e
'Bei ', # 0x6f
'Jie ', # 0x70
'Dong ', # 0x71
'Gao ', # 0x72
'Yao ', # 0x73
'Xian ', # 0x74
'Chu ', # 0x75
'Qun ', # 0x76
'Pa ', # 0x77
'Shu ', # 0x78
'Hua ', # 0x79
'Xin ', # 0x7a
'Chou ', # 0x7b
'Zhu ', # 0x7c
'Chou ', # 0x7d
'Song ', # 0x7e
'Ban ', # 0x7f
'Song ', # 0x80
'Ji ', # 0x81
'Yue ', # 0x82
'Jin ', # 0x83
'Gou ', # 0x84
'Ji ', # 0x85
'Mao ', # 0x86
'Pi ', # 0x87
'Bi ', # 0x88
'Wang ', # 0x89
'Ang ', # 0x8a
'Fang ', # 0x8b
'Fen ', # 0x8c
'Yi ', # 0x8d
'Fu ', # 0x8e
'Nan ', # 0x8f
'Xi ', # 0x90
'Hu ', # 0x91
'Ya ', # 0x92
'Dou ', # 0x93
'Xun ', # 0x94
'Zhen ', # 0x95
'Yao ', # 0x96
'Lin ', # 0x97
'Rui ', # 0x98
'E ', # 0x99
'Mei ', # 0x9a
'Zhao ', # 0x9b
'Guo ', # 0x9c
'Zhi ', # 0x9d
'Cong ', # 0x9e
'Yun ', # 0x9f
'Waku ', # 0xa0
'Dou ', # 0xa1
'Shu ', # 0xa2
'Zao ', # 0xa3
'[?] ', # 0xa4
'Li ', # 0xa5
'Haze ', # 0xa6
'Jian ', # 0xa7
'Cheng ', # 0xa8
'Matsu ', # 0xa9
'Qiang ', # 0xaa
'Feng ', # 0xab
'Nan ', # 0xac
'Xiao ', # 0xad
'Xian ', # 0xae
'Ku ', # 0xaf
'Ping ', # 0xb0
'Yi ', # 0xb1
'Xi ', # 0xb2
'Zhi ', # 0xb3
'Guai ', # 0xb4
'Xiao ', # 0xb5
'Jia ', # 0xb6
'Jia ', # 0xb7
'Gou ', # 0xb8
'Fu ', # 0xb9
'Mo ', # 0xba
'Yi ', # 0xbb
'Ye ', # 0xbc
'Ye ', # 0xbd
'Shi ', # 0xbe
'Nie ', # 0xbf
'Bi ', # 0xc0
'Duo ', # 0xc1
'Yi ', # 0xc2
'Ling ', # 0xc3
'Bing ', # 0xc4
'Ni ', # 0xc5
'La ', # 0xc6
'He ', # 0xc7
'Pan ', # 0xc8
'Fan ', # 0xc9
'Zhong ', # 0xca
'Dai ', # 0xcb
'Ci ', # 0xcc
'Yang ', # 0xcd
'Fu ', # 0xce
'Bo ', # 0xcf
'Mou ', # 0xd0
'Gan ', # 0xd1
'Qi ', # 0xd2
'Ran ', # 0xd3
'Rou ', # 0xd4
'Mao ', # 0xd5
'Zhao ', # 0xd6
'Song ', # 0xd7
'Zhe ', # 0xd8
'Xia ', # 0xd9
'You ', # 0xda
'Shen ', # 0xdb
'Ju ', # 0xdc
'Tuo ', # 0xdd
'Zuo ', # 0xde
'Nan ', # 0xdf
'Ning ', # 0xe0
'Yong ', # 0xe1
'Di ', # 0xe2
'Zhi ', # 0xe3
'Zha ', # 0xe4
'Cha ', # 0xe5
'Dan ', # 0xe6
'Gu ', # 0xe7
'Pu ', # 0xe8
'Jiu ', # 0xe9
'Ao ', # 0xea
'Fu ', # 0xeb
'Jian ', # 0xec
'Bo ', # 0xed
'Duo ', # 0xee
'Ke ', # 0xef
'Nai ', # 0xf0
'Zhu ', # 0xf1
'Bi ', # 0xf2
'Liu ', # 0xf3
'Chai ', # 0xf4
'Zha ', # 0xf5
'Si ', # 0xf6
'Zhu ', # 0xf7
'Pei ', # 0xf8
'Shi ', # 0xf9
'Guai ', # 0xfa
'Cha ', # 0xfb
'Yao ', # 0xfc
'Jue ', # 0xfd
'Jiu ', # 0xfe
'Shi ', # 0xff
)
| mit | -5,882,486,668,536,943,000 | 16.965116 | 19 | 0.38835 | false |
mscherer/ansible-modules-core | cloud/openstack/os_port.py | 70 | 12457 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_port
short_description: Add/Update/Delete ports from an OpenStack cloud.
extends_documentation_fragment: openstack
author: "Davide Agnello (@dagnello)"
version_added: "2.0"
description:
- Add, Update or Remove ports from an OpenStack cloud. A I(state) of
'present' will ensure the port is created or updated if required.
options:
network:
description:
- Network ID or name this port belongs to.
required: true
name:
description:
- Name that has to be given to the port.
required: false
default: None
fixed_ips:
description:
- Desired IP and/or subnet for this port. Subnet is referenced by
subnet_id and IP is referenced by ip_address.
required: false
default: None
admin_state_up:
description:
- Sets admin state.
required: false
default: None
mac_address:
description:
- MAC address of this port.
required: false
default: None
security_groups:
description:
- Security group(s) ID(s) or name(s) associated with the port (comma
separated string or YAML list)
required: false
default: None
no_security_groups:
description:
- Do not associate a security group with this port.
required: false
default: False
allowed_address_pairs:
description:
- "Allowed address pairs list. Allowed address pairs are supported with
dictionary structure.
e.g. allowed_address_pairs:
- ip_address: 10.1.0.12
mac_address: ab:cd:ef:12:34:56
- ip_address: ..."
required: false
default: None
extra_dhcp_opts:
description:
- "Extra dhcp options to be assigned to this port. Extra options are
supported with dictionary structure.
e.g. extra_dhcp_opts:
- opt_name: opt name1
opt_value: value1
- opt_name: ..."
required: false
default: None
device_owner:
description:
- The ID of the entity that uses this port.
required: false
default: None
device_id:
description:
- Device ID of device using this port.
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Create a port
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
# Create a port with a static IP
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
fixed_ips:
- ip_address: 10.1.0.21
# Create a port with No security groups
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
no_security_groups: True
# Update the existing 'port1' port with multiple security groups (version 1)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups: 1496e8c7-4918-482a-9172-f4f00fc4a3a5,057d4bdf-6d4d-472...
# Update the existing 'port1' port with multiple security groups (version 2)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups:
- 1496e8c7-4918-482a-9172-f4f00fc4a3a5
- 057d4bdf-6d4d-472...
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the port.
returned: success
type: string
network_id:
description: Network ID this port belongs in.
returned: success
type: string
security_groups:
description: Security group(s) associated with this port.
returned: success
type: list of strings
status:
description: Port's status.
returned: success
type: string
fixed_ips:
description: Fixed ip(s) associated with this port.
returned: success
type: list of dicts
tenant_id:
description: Tenant id associated with this port.
returned: success
type: string
allowed_address_pairs:
description: Allowed address pairs with this port.
returned: success
type: list of dicts
admin_state_up:
description: Admin state up flag for this port.
returned: success
type: bool
'''
def _needs_update(module, port, cloud):
"""Check for differences in the updatable values.
NOTE: We don't currently allow name updates.
"""
compare_simple = ['admin_state_up',
'mac_address',
'device_owner',
'device_id']
compare_dict = ['allowed_address_pairs',
'extra_dhcp_opts']
compare_list = ['security_groups']
for key in compare_simple:
if module.params[key] is not None and module.params[key] != port[key]:
return True
for key in compare_dict:
if module.params[key] is not None and cmp(module.params[key],
port[key]) != 0:
return True
for key in compare_list:
if module.params[key] is not None and (set(module.params[key]) !=
set(port[key])):
return True
# NOTE: if port was created or updated with 'no_security_groups=True',
# subsequent updates without 'no_security_groups' flag or
# 'no_security_groups=False' and no specified 'security_groups', will not
# result in an update to the port where the default security group is
# applied.
if module.params['no_security_groups'] and port['security_groups'] != []:
return True
if module.params['fixed_ips'] is not None:
for item in module.params['fixed_ips']:
if 'ip_address' in item:
# if ip_address in request does not match any in existing port,
# update is required.
if not any(match['ip_address'] == item['ip_address']
for match in port['fixed_ips']):
return True
if 'subnet_id' in item:
return True
for item in port['fixed_ips']:
# if ip_address in existing port does not match any in request,
# update is required.
if not any(match.get('ip_address') == item['ip_address']
for match in module.params['fixed_ips']):
return True
return False
def _system_state_change(module, port, cloud):
state = module.params['state']
if state == 'present':
if not port:
return True
return _needs_update(module, port, cloud)
if state == 'absent' and port:
return True
return False
def _compose_port_args(module, cloud):
port_kwargs = {}
optional_parameters = ['name',
'fixed_ips',
'admin_state_up',
'mac_address',
'security_groups',
'allowed_address_pairs',
'extra_dhcp_opts',
'device_owner',
'device_id']
for optional_param in optional_parameters:
if module.params[optional_param] is not None:
port_kwargs[optional_param] = module.params[optional_param]
if module.params['no_security_groups']:
port_kwargs['security_groups'] = []
return port_kwargs
def get_security_group_id(module, cloud, security_group_name_or_id):
security_group = cloud.get_security_group(security_group_name_or_id)
if not security_group:
module.fail_json(msg="Security group: %s, was not found"
% security_group_name_or_id)
return security_group['id']
def main():
argument_spec = openstack_full_argument_spec(
network=dict(required=False),
name=dict(required=False),
fixed_ips=dict(type='list', default=None),
admin_state_up=dict(type='bool', default=None),
mac_address=dict(default=None),
security_groups=dict(default=None, type='list'),
no_security_groups=dict(default=False, type='bool'),
allowed_address_pairs=dict(type='list', default=None),
extra_dhcp_opts=dict(type='list', default=None),
device_owner=dict(default=None),
device_id=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['no_security_groups', 'security_groups'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
state = module.params['state']
try:
cloud = shade.openstack_cloud(**module.params)
if module.params['security_groups']:
# translate security_groups to UUID's if names where provided
module.params['security_groups'] = [
get_security_group_id(module, cloud, v)
for v in module.params['security_groups']
]
port = None
network_id = None
if name:
port = cloud.get_port(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, port, cloud))
changed = False
if state == 'present':
if not port:
network = module.params['network']
if not network:
module.fail_json(
msg="Parameter 'network' is required in Port Create"
)
port_kwargs = _compose_port_args(module, cloud)
network_object = cloud.get_network(network)
if network_object:
network_id = network_object['id']
else:
module.fail_json(
msg="Specified network was not found."
)
port = cloud.create_port(network_id, **port_kwargs)
changed = True
else:
if _needs_update(module, port, cloud):
port_kwargs = _compose_port_args(module, cloud)
port = cloud.update_port(port['id'], **port_kwargs)
changed = True
module.exit_json(changed=changed, id=port['id'], port=port)
if state == 'absent':
if port:
cloud.delete_port(port['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | 8,992,682,568,714,327,000 | 30.778061 | 80 | 0.593401 | false |
adviti/melange | app/gdata/dublincore/data.py | 126 | 2106 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Dublin Core Metadata Initiative (DCMI) Extension"""
__author__ = '[email protected] (Jeff Scudder)'
import atom.core
DC_TEMPLATE = '{http://purl.org/dc/terms/}%s'
class Creator(atom.core.XmlElement):
"""Entity primarily responsible for making the resource."""
_qname = DC_TEMPLATE % 'creator'
class Date(atom.core.XmlElement):
"""Point or period of time associated with an event in the lifecycle of the resource."""
_qname = DC_TEMPLATE % 'date'
class Description(atom.core.XmlElement):
"""Account of the resource."""
_qname = DC_TEMPLATE % 'description'
class Format(atom.core.XmlElement):
"""File format, physical medium, or dimensions of the resource."""
_qname = DC_TEMPLATE % 'format'
class Identifier(atom.core.XmlElement):
"""An unambiguous reference to the resource within a given context."""
_qname = DC_TEMPLATE % 'identifier'
class Language(atom.core.XmlElement):
"""Language of the resource."""
_qname = DC_TEMPLATE % 'language'
class Publisher(atom.core.XmlElement):
"""Entity responsible for making the resource available."""
_qname = DC_TEMPLATE % 'publisher'
class Rights(atom.core.XmlElement):
"""Information about rights held in and over the resource."""
_qname = DC_TEMPLATE % 'rights'
class Subject(atom.core.XmlElement):
"""Topic of the resource."""
_qname = DC_TEMPLATE % 'subject'
class Title(atom.core.XmlElement):
"""Name given to the resource."""
_qname = DC_TEMPLATE % 'title'
| apache-2.0 | 3,880,798,802,522,566,000 | 26 | 90 | 0.714625 | false |
Zopieux/py-gfm | gfm/standalone_fenced_code.py | 1 | 2080 | # Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import markdown
from markdown.extensions.fenced_code import FencedCodeExtension, FencedBlockPreprocessor
class StandaloneFencedCodeExtension(FencedCodeExtension):
def __init__(self, **kwargs):
self.config = {
"linenums": [False, "Use lines numbers. True=yes, False=no, None=auto"],
"guess_lang": [False, "Automatic language detection - Default: True"],
"css_class": [
"highlight",
"Set class name for wrapper <div> - " "Default: codehilite",
],
"pygments_style": [
"default",
"Pygments HTML Formatter Style " "(Colorscheme) - Default: default",
],
"noclasses": [
False,
"Use inline styles instead of CSS classes - " "Default false",
],
"use_pygments": [
True,
"Use Pygments to Highlight code blocks. "
"Disable if using a JavaScript library. "
"Default: True",
],
}
# Markdown 3.3 introduced a breaking change.
if markdown.__version_info__ >= (3, 3):
super().setConfigs(kwargs)
else:
super().__init__(**kwargs)
def extendMarkdown(self, md):
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
# Markdown 3.3 introduced a breaking change.
if markdown.__version_info__ >= (3, 3):
processor = FencedBlockPreprocessor(md, self.config)
processor.codehilite_conf = self.getConfigs()
else:
processor = FencedBlockPreprocessor(md)
processor.checked_for_codehilite = True
processor.codehilite_conf = self.config
md.preprocessors.register(processor, "fenced_code_block", 25)
| bsd-3-clause | -6,578,108,611,704,230,000 | 39.784314 | 88 | 0.576923 | false |
centic9/subversion-ppa | tools/dev/graph-dav-servers.py | 5 | 5465 | #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# graph-svn-dav.py by Brian W. Fitzpatrick <[email protected]>
#
# This was originally a quick hack to make a pretty picture of svn DAV servers.
#
# I've dropped it in Subversion's repository at the request of Karl Fogel.
#
# Be warned this this script has many dependencies that don't ship with Python.
import sys
import os
import fileinput
import datetime
import time
import datetime
from matplotlib import dates
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab
import Image
OUTPUT_FILE = '../../www/images/svn-dav-securityspace-survey.png'
OUTPUT_IMAGE_WIDTH = 800
STATS = [
('1/1/2003', 70),
('2/1/2003', 158),
('3/1/2003', 222),
('4/1/2003', 250),
('5/1/2003', 308),
('6/1/2003', 369),
('7/1/2003', 448),
('8/1/2003', 522),
('9/1/2003', 665),
('10/1/2003', 782),
('11/1/2003', 969),
('12/1/2003', 1009),
('1/1/2004', 1162),
('2/1/2004', 1307),
('3/1/2004', 1424),
('4/1/2004', 1792),
('5/1/2004', 2113),
('6/1/2004', 2502),
('7/1/2004', 2941),
('8/1/2004', 3863),
('9/1/2004', 4174),
('10/1/2004', 4187),
('11/1/2004', 4783),
('12/1/2004', 4995),
('1/1/2005', 5565),
('2/1/2005', 6505),
('3/1/2005', 7897),
('4/1/2005', 8751),
('5/1/2005', 9793),
('6/1/2005', 11534),
('7/1/2005', 12808),
('8/1/2005', 13545),
('9/1/2005', 15233),
('10/1/2005', 17588),
('11/1/2005', 18893),
('12/1/2005', 20278),
('1/1/2006', 21084),
('2/1/2006', 23861),
('3/1/2006', 26540),
('4/1/2006', 29396),
('5/1/2006', 33001),
('6/1/2006', 35082),
('7/1/2006', 38939),
('8/1/2006', 40672),
('9/1/2006', 46525),
('10/1/2006', 54247),
('11/1/2006', 63145),
('12/1/2006', 68988),
('1/1/2007', 77027),
('2/1/2007', 84813),
('3/1/2007', 95679),
('4/1/2007', 103852),
('5/1/2007', 117267),
('6/1/2007', 133665),
('7/1/2007', 137575),
('8/1/2007', 155426),
('9/1/2007', 159055),
('10/1/2007', 169939),
('11/1/2007', 180831),
('12/1/2007', 187093),
('1/1/2008', 199432),
('2/1/2008', 221547),
('3/1/2008', 240794),
('4/1/2008', 255520),
('5/1/2008', 269478),
('6/1/2008', 286614),
('7/1/2008', 294579),
('8/1/2008', 307923),
('9/1/2008', 254757),
('10/1/2008', 268081),
('11/1/2008', 299071),
('12/1/2008', 330884),
('1/1/2009', 369719),
('2/1/2009', 378434),
('3/1/2009', 390502),
('4/1/2009', 408658),
('5/1/2009', 407044),
('6/1/2009', 406520),
('7/1/2009', 334276),
]
def get_date(raw_date):
month, day, year = map(int, raw_date.split('/'))
return datetime.datetime(year, month, day)
def get_ordinal_date(date):
# This is the only way I can get matplotlib to do the dates right.
return int(dates.date2num(get_date(date)))
def load_stats():
dates = [get_ordinal_date(date) for date, value in STATS]
counts = [x[1] for x in STATS]
return dates, counts
def draw_graph(dates, counts):
###########################################################
# Drawing takes place here.
pylab.figure(1)
ax = pylab.subplot(111)
pylab.plot_date(dates, counts,
color='r', linestyle='-', marker='o', markersize=3)
ax.xaxis.set_major_formatter( pylab.DateFormatter('%Y') )
ax.xaxis.set_major_locator( pylab.YearLocator() )
ax.xaxis.set_minor_locator( pylab.MonthLocator() )
ax.set_xlim( (dates[0] - 92, dates[len(dates) - 1] + 92) )
ax.yaxis.set_major_formatter( pylab.FormatStrFormatter('%d') )
pylab.ylabel('Total # of Public DAV Servers')
lastdate = datetime.datetime.fromordinal(dates[len(dates) - 1]).strftime("%B %Y")
pylab.xlabel("Data as of " + lastdate)
pylab.title('Security Space Survey of\nPublic Subversion DAV Servers')
# End drawing
###########################################################
png = open(OUTPUT_FILE, 'w')
pylab.savefig(png)
png.close()
os.rename(OUTPUT_FILE, OUTPUT_FILE + ".tmp.png")
try:
im = Image.open(OUTPUT_FILE + ".tmp.png", 'r')
(width, height) = im.size
print("Original size: %d x %d pixels" % (width, height))
scale = float(OUTPUT_IMAGE_WIDTH) / float(width)
width = OUTPUT_IMAGE_WIDTH
height = int(float(height) * scale)
print("Final size: %d x %d pixels" % (width, height))
im = im.resize((width, height), Image.ANTIALIAS)
im.save(OUTPUT_FILE, im.format)
os.unlink(OUTPUT_FILE + ".tmp.png")
except Exception, e:
sys.stderr.write("Error attempting to resize the graphic: %s\n" % (str(e)))
os.rename(OUTPUT_FILE + ".tmp.png", OUTPUT_FILE)
raise
pylab.close()
if __name__ == '__main__':
dates, counts = load_stats()
draw_graph(dates, counts)
print("Don't forget to update ../../www/svn-dav-securityspace-survey.html!")
| apache-2.0 | -1,030,338,365,839,252,100 | 27.170103 | 83 | 0.610247 | false |
JulyKikuAkita/PythonPrac | cs15211/FindEventualSafeStates.py | 1 | 7463 | __source__ = 'https://leetcode.com/problems/find-eventual-safe-states/'
# Time: O(N + E)
# Space: O(N)
#
# Description: Leetcode # 802. Find Eventual Safe States
#
# In a directed graph, we start at some node and every turn,
# walk along a directed edge of the graph.
# If we reach a node that is terminal (that is, it has no outgoing directed edges), we stop.
#
# Now, say our starting node is eventually safe if and only if we must eventually walk to a terminal node.
# More specifically, there exists a natural number K so that for any choice of where to walk,
# we must have stopped at a terminal node in less than K steps.
#
# Which nodes are eventually safe? Return them as an array in sorted order.
#
# The directed graph has N nodes with labels 0, 1, ..., N-1, where N is the length of graph.
# The graph is given in the following form: graph[i] is a list of labels j
# such that (i, j) is a directed edge of the graph.
#
# Example:
# Input: graph = [[1,2],[2,3],[5],[0],[5],[],[]]
# Output: [2,4,5,6]
# Here is a diagram of the above graph.
#
# Illustration of graph
#
# Note:
#
# graph will have length at most 10000.
# The number of edges in the graph will not exceed 32000.
# Each graph[i] will be a sorted list of different integers,
# chosen within the range [0, graph.length - 1].
#
import collections
import unittest
# 512ms 18.54%
class Solution(object):
def eventualSafeNodes(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[int]
"""
N = len(graph)
safe = [False] * N
graph = map(set, graph)
rgraph = [set() for _ in xrange(N)]
q = collections.deque()
for i, js in enumerate(graph):
if not js:
q.append(i)
for j in js:
rgraph[j].add(i)
while q:
j = q.popleft()
safe[j] = True
for i in rgraph[j]:
graph[i].remove(j)
if len(graph[i]) == 0:
q.append(i)
return [i for i, v in enumerate(safe) if v]
# 304ms 35.35%
class Solution2(object):
def eventualSafeNodes(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[int]
"""
WHITE, GRAY, BLACK = 0, 1, 2
color = collections.defaultdict(int)
def dfs(node):
if color[node] != WHITE:
return color[node] == BLACK
color[node] = GRAY
for nei in graph[node]:
if color[nei] == BLACK:
continue
if color[nei] == GRAY or not dfs(nei):
return False
color[node] = BLACK
return True
return filter(dfs, range(len(graph)))
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/find-eventual-safe-states/solution/
Approach #1: Reverse Edges [Accepted]
Complexity Analysis
Time Complexity: O(N + E), where N is the number of nodes in the given graph, and E is the total number of edges.
Space Complexity: O(N) in additional space complexity.
# 114ms 22.56%
class Solution {
public List<Integer> eventualSafeNodes(int[][] graph) {
int N = graph.length;
boolean[] safe = new boolean[N];
List<Set<Integer>> tmp = new ArrayList();
List<Set<Integer>> rgraph = new ArrayList();
for (int i = 0; i < N; ++i) {
tmp.add(new HashSet());
rgraph.add(new HashSet());
}
Queue<Integer> queue = new LinkedList();
for (int i = 0; i < N; i++) {
if (graph[i].length == 0) queue.offer(i);
for (int j : graph[i]) {
tmp.get(i).add(j);
rgraph.get(j).add(i);
}
}
while (!queue.isEmpty()) {
int j = queue.poll();
safe[j] = true;
for (int i : rgraph.get(j)) {
tmp.get(i).remove(j);
if (tmp.get(i).isEmpty()) queue.offer(i);
}
}
List<Integer> ans = new ArrayList();
for (int i = 0; i < N; i++) {
if (safe[i]) ans.add(i);
}
return ans;
}
}
Approach #2: Depth-First Search [Accepted]
Complexity Analysis
Time Complexity: O(N + E), where N is the number of nodes in the given graph, and E is the total number of edges.
Space Complexity: O(N) in additional space complexity.
# 11ms 97.36%
class Solution {
public List<Integer> eventualSafeNodes(int[][] graph) {
int N = graph.length;
int[] color = new int[N];
List<Integer> ans = new ArrayList();
for (int i = 0; i < N; i++) {
if (dfs(i, color, graph)) ans.add(i);
}
return ans;
}
// colors: WHITE 0, GRAY 1, BLACK 2;
private boolean dfs(int node, int[] color, int[][] graph) {
if (color[node] > 0) return color[node] == 2;
color[node] = 1;
for (int nei: graph[node]) {
if (color[node] == 2) continue;
if (color[nei] == 1 || !dfs(nei, color, graph)) return false;
}
color[node] = 2;
return true;
}
}
# https://leetcode.com/problems/find-eventual-safe-states/discuss/120633/Java-Solution-(DFS-andand-Topological-Sort)
# topological sort
# 62ms 36.36%
class Solution {
public List<Integer> eventualSafeNodes(int[][] graph) {
int n = graph.length;
int[] degree = new int [n];
Set<Integer>[] map = new HashSet[n];
for (int i = 0; i < n; i++) map[i] = new HashSet();
for (int i = 0; i < n; i++) {
for (int node : graph[i]) {
map[node].add(i);
degree[i]++;
}
}
Queue<Integer> queue = new LinkedList();
Set<Integer> set = new HashSet();
for (int i = 0; i < n; i++) {
if (degree[i] == 0) {
set.add(i);
queue.add(i);
}
}
while (!queue.isEmpty()) {
int node = queue.poll();
set.add(node);
for (int nei : map[node]) {
degree[nei]--;
if (degree[nei] == 0) {
queue.add(nei);
}
}
}
List<Integer> ans = new ArrayList(set);
Collections.sort(ans);
return ans;
}
}
# https://leetcode.com/problems/find-eventual-safe-states/discuss/119871/Straightforward-Java-solution-easy-to-understand!
# 14ms 60.33%
class Solution {
// value of color represents three states:
static int NOT_V = 0; // 0:have not been visited
static int SAFE = 1; // 1:safe
static int LOOP = 2; // 2:unsafe
public List<Integer> eventualSafeNodes(int[][] graph) {
List<Integer> res = new ArrayList();
int[] color = new int[graph.length];
for (int i = 0; i < graph.length; i++) {
if (dfs(graph, color, i)) res.add(i);
}
return res;
}
private boolean dfs(int[][] graph, int[] color, int start) {
if (color[start] == LOOP) return false;
if (color[start] == SAFE) return true;
color[start] = LOOP;
for (int nei : graph[start]) {
if (!dfs(graph, color, nei)) return false;
}
color[start] = SAFE;
return true;
}
}
'''
| apache-2.0 | -8,531,149,714,826,902,000 | 29.586066 | 122 | 0.536781 | false |
openstack/congress | congress/utils.py | 1 | 9459 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import contextlib
import json
import os
import shutil
import tempfile
import yaml
from oslo_config import cfg
from oslo_log import log as logging
import six
LOG = logging.getLogger(__name__)
utils_opts = [
cfg.StrOpt('tempdir',
help='Explicitly specify the temporary working directory'),
]
CONF = cfg.CONF
CONF.register_opts(utils_opts)
# Note(thread-safety): blocking function
@contextlib.contextmanager
def tempdir(**kwargs):
argdict = kwargs.copy()
if 'dir' not in argdict:
argdict['dir'] = CONF.tempdir
tmpdir = tempfile.mkdtemp(**argdict)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.error(('Could not remove tmpdir: %s'), e)
def value_to_congress(value):
if isinstance(value, six.string_types):
# TODO(ayip): This throws away high unicode data because congress does
# not have full support for unicode yet. We'll need to fix this to
# handle unicode coming from datasources.
try:
six.text_type(value).encode('ascii')
except UnicodeEncodeError:
LOG.warning('Ignoring non-ascii characters')
# Py3: decode back into str for compat (bytes != str)
return six.text_type(value).encode('ascii', 'ignore').decode('ascii')
# Check for bool before int, because True and False are also ints.
elif isinstance(value, bool):
return str(value)
elif (isinstance(value, six.integer_types) or
isinstance(value, float)):
return value
return str(value)
def tuple_to_congress(value_tuple):
return tuple(value_to_congress(v) for v in value_tuple)
# Note(thread-safety): blocking function
def create_datasource_policy(bus, datasource, engine):
# Get the schema for the datasource using
# Note(thread-safety): blocking call
schema = bus.rpc(datasource, 'get_datasource_schema',
{'source_id': datasource})
# Create policy and sets the schema once datasource is created.
args = {'name': datasource, 'schema': schema}
# Note(thread-safety): blocking call
bus.rpc(engine, 'initialize_datasource', args)
def get_root_path():
return os.path.dirname(os.path.dirname(__file__))
class Location (object):
"""A location in the program source code."""
__slots__ = ['line', 'col']
def __init__(self, line=None, col=None, obj=None):
try:
self.line = obj.location.line
self.col = obj.location.col
except AttributeError:
pass
self.col = col
self.line = line
def __str__(self):
s = ""
if self.line is not None:
s += " line: {}".format(self.line)
if self.col is not None:
s += " col: {}".format(self.col)
return s
def __repr__(self):
return "Location(line={}, col={})".format(
repr(self.line), repr(self.col))
def __hash__(self):
return hash(('Location', hash(self.line), hash(self.col)))
def pretty_json(data):
print(json.dumps(data, sort_keys=True,
indent=4, separators=(',', ': ')))
def pretty_rule(rule_str):
# remove line breaks
rule_str = ''.join(
[line.strip() for line in rule_str.strip().splitlines()])
head_and_body = rule_str.split(':-')
# drop empty body
head_and_body = [item.strip()
for item in head_and_body if len(item.strip()) > 0]
head = head_and_body[0]
if len(head_and_body) == 1:
return head
else:
body = head_and_body[1]
# split the literal by spliting on ')'
body_list = body.split(')')
body_list = body_list[:-1] # drop part behind the final ')'
new_body_list = []
for literal in body_list:
# remove commas between literals
if literal[0] == ',':
literal = literal[1:]
# add back the ')', also add an indent
new_body_list.append(' ' + literal.strip() + ')')
pretty_rule_str = head + " :-\n" + ",\n".join(new_body_list)
return pretty_rule_str
class YamlConfigs (object):
def __init__(self, dir_path, key_attrib, reusables_path=None):
self.dir_path = dir_path
self.key_attrib = key_attrib
self.reusables_path = reusables_path
# dictionary of loaded structures
# indexed by the value of each struct[key_attrib]
self.loaded_structures = {}
# dictionary of reusable yaml-style structures
# indexed by unique name
self.reusables = {}
yaml.SafeLoader.add_constructor(
'!ref', self._resolve_reuse_reference_constructor)
def _resolve_reuse_reference_constructor(self, loader, node):
import six
if not isinstance(node.value, six.string_types):
raise yaml.YAMLError(
'Cannot resolve reference {} because the value is not '
'a string.'.format(node))
if node.value in self.reusables:
return self.reusables[node.value]
else:
raise yaml.YAMLError(
'Cannot resolve reference {} because no reusable '
'data has been defined with the name "{}". Please double '
'check the reference name or the reusables file "{}".'.format(
node, node.value, self.reusables_path))
def load_from_files(self):
'''load YAML config files from directory
return total number of files on which error encountered.
Separately callable apart from __init__ to support reloading changed
files.
'''
if self.reusables_path is not None:
self.reusables = {}
try:
with open(self.reusables_path, "r") as stream:
try:
self.reusables = yaml.safe_load(stream)
except Exception:
LOG.warning(
'Unable to YAML-load reusables file at path %s. '
'Proceeding with empty reusables.',
self.reusables_path)
except IOError:
LOG.warning('Unable to find or open reusables file at path %s.'
' Proceeding with empty reusables.',
self.reusables_path)
if not isinstance(self.reusables, dict):
LOG.warning('The loaded reusables file does not conform to the'
' expected format (must be a hash at the top '
'level). Proceeding with empty reusables. '
'Provided structure: %s', self.reusables)
def _load_yaml_config_file(full_path):
try:
success_yaml_count = 0
error_yaml_count = 0
doc_num_in_file = 0
file_error = False
with open(full_path, "r") as stream:
policies = yaml.safe_load_all(stream)
for policy in policies:
doc_num_in_file += 1
# FIXME: validate YAML config
if policy[self.key_attrib] in self.loaded_structures:
error_yaml_count += 1
LOG.warning('Duplicate name')
else:
self.loaded_structures[
policy[self.key_attrib]] = policy
success_yaml_count += 1
except Exception:
LOG.exception(
'Failed to load YAML config file %s', full_path)
file_error = True
return success_yaml_count, file_error or (error_yaml_count > 0)
file_count = 0
file_error_count = 0
policy_count = 0
for (dirpath, dirnames, filenames) in os.walk(
self.dir_path):
for filename in filenames:
name, extension = os.path.splitext(filename)
if extension in ['.yaml', '.yml']:
count, has_error = _load_yaml_config_file(
os.path.join(dirpath, filename))
if count > 0:
file_count += 1
policy_count += count
if has_error:
file_error_count += 1
return file_error_count
| apache-2.0 | 7,097,890,734,267,145,000 | 34.69434 | 79 | 0.566445 | false |
lixiangning888/whole_project | modules/signatures/antiemu_wine_func.py | 3 | 1273 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Accuvant, Inc. ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class WineDetectFunc(Signature):
name = "antiemu_wine_func"
description = "通过功能名检测是否存在Wine模拟器"
severity = 3
categories = ["anti-emulation"]
authors = ["Accuvant"]
minimum = "1.0"
evented = True
filter_apinames = set(["LdrGetProcedureAddress"])
def on_call(self, call, process):
funcname = self.get_argument(call, "FunctionName")
if not call["status"] and funcname == "wine_get_unix_file_name":
return True
| lgpl-3.0 | 4,143,826,323,957,912,600 | 36.727273 | 72 | 0.71004 | false |
chhao91/QGIS | python/plugins/processing/gui/MessageBarProgress.py | 5 | 2541 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MessageBarProgress.py
---------------------
Date : April 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'April 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import Qt, QCoreApplication
from PyQt4.QtGui import QProgressBar
from qgis.utils import iface
from qgis.gui import QgsMessageBar
class MessageBarProgress:
def __init__(self, algname=None):
self.progressMessageBar = \
iface.messageBar().createMessage(self.tr('Executing algorithm <i>{0}</i>'.format(algname if algname else '')))
self.progress = QProgressBar()
self.progress.setMaximum(100)
self.progress.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
self.progressMessageBar.layout().addWidget(self.progress)
iface.messageBar().pushWidget(self.progressMessageBar,
iface.messageBar().INFO)
def error(self, msg):
iface.messageBar().clearWidgets()
iface.messageBar().pushMessage(self.tr('Error'),
msg, level=QgsMessageBar.CRITICAL, duration=3)
def setText(self, text):
pass
def setPercentage(self, i):
self.progress.setValue(i)
def setInfo(self, _):
pass
def setCommand(self, _):
pass
def setDebugInfo(self, _):
pass
def setConsoleInfo(self, _):
pass
def close(self):
iface.messageBar().clearWidgets()
def tr(self, string, context=''):
if context == '':
context = 'MessageBarProgress'
return QCoreApplication.translate(context, string)
| gpl-2.0 | 7,812,491,997,703,610,000 | 32.88 | 122 | 0.512003 | false |
vishesh/pycket | pycket/prims/parameter.py | 1 | 2433 |
from pycket import values
from pycket import values_parameter
from pycket.argument_parser import ArgParser, EndOfInput
from pycket.arity import Arity
from pycket.base import W_Object
from pycket.error import SchemeException
from pycket.prims.expose import expose, expose_val, default, procedure
from rpython.rlib import jit
@expose("make-parameter",
[values.W_Object, default(values.W_Object, values.w_false)])
def make_parameter(init, guard):
return values_parameter.W_Parameter(init, guard)
@expose("make-derived-parameter",
[values_parameter.W_BaseParameter, procedure, procedure])
def make_derived_parameter(param, guard, wrap):
return values_parameter.W_DerivedParameter(param, guard, wrap)
@expose("extend-parameterization", arity=Arity.geq(1))
@jit.unroll_safe
def scheme_extend_parameterization(args):
if len(args) == 0:
raise SchemeException("extend-parameterization: expected 1 or more arguments")
config = args[0]
argc = len(args)
if argc < 2 or not isinstance(config, values_parameter.W_Parameterization) or argc % 2 != 1:
return config
parser = ArgParser("extend-parameterization", args, start_at=1)
while parser.has_more():
param = parser.parameter()
key = parser.object()
config = config.extend([param], [key])
return config
def call_with_parameterization(f, args, paramz, env, cont):
cont.update_cm(values.parameterization_key, paramz)
return f.call(args, env, cont)
@expose("call-with-parameterization",
[values.W_Object, values_parameter.W_Parameterization], simple=False)
def call_w_paramz(f, paramz, env, cont):
return call_with_parameterization(f, [], paramz, env, cont)
def call_with_extended_paramz(f, args, keys, vals, env, cont):
from pycket.values import parameterization_key
# XXX seems untested?
paramz = cont.get_mark_first(parameterization_key)
assert isinstance(paramz, values_parameter.W_Parameterization) # XXX is this always right?
paramz_new = paramz.extend(keys, vals)
return call_with_parameterization(f, args, paramz_new, env, cont)
expose_val("parameterization-key", values.parameterization_key)
expose_val("print-mpair-curly-braces", values_parameter.W_Parameter(values.w_false))
expose_val("print-pair-curly-braces", values_parameter.W_Parameter(values.w_false))
| mit | -6,313,794,861,346,296,000 | 38.885246 | 96 | 0.708179 | false |
VirusTotal/msticpy | tests/test_ip_utils.py | 1 | 3873 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""IP Utils test class."""
import unittest
import json
import os
import pandas as pd
from msticpy.sectools.ip_utils import get_whois_info, get_whois_df, get_ip_type
_test_data_folders = [
d for d, _, _ in os.walk(os.getcwd()) if d.endswith("/tests/testdata")
]
if len(_test_data_folders) == 1:
_TEST_DATA = _test_data_folders[0]
else:
_TEST_DATA = "./tests/testdata"
class TestIpUtils(unittest.TestCase):
"""Unit test class."""
IPV4 = {
"Private": ("10.0.0.1", ["Private", "Reserved"]),
"Multicast": ("224.0.0.1", None),
"Unspecified": ("0.0.0.0", None),
"Reserved": ("198.51.100.1", ["Private", "Reserved"]),
"Loopback": ("127.0.0.1", None),
"Public": ("153.2.3.4", None),
"Link Local": ("169.254.0.1", None),
}
IPV6 = {
"Private": ("FC00::C001:1DFF:FEE0:0", None),
"Multicast": ("FF00::", None),
"Unspecified": ("::", None),
"Reserved": ("2001:db8::", ["Private", "Reserved"]),
"Loopback": ("::1", None),
"Public": ("2340:0023:AABA:0A01:0055:5054:9ABC:ABB0", None),
"Link Local": ("FE80::C001:1DFF:FEE0:0", None),
}
def setUp(self):
input_file = os.path.join(_TEST_DATA, "az_net_flows.csv")
self.input_df = pd.read_csv(input_file).sample(10)
def test_get_ip_type(self):
for ip_type, (addr, alts) in self.IPV4.items():
print(addr, ip_type)
if alts:
self.assertIn(get_ip_type(addr), alts)
else:
self.assertEqual(get_ip_type(addr), ip_type)
for ip_type, (addr, alts) in self.IPV6.items():
print(addr, ip_type)
if alts:
self.assertIn(get_ip_type(addr), alts)
else:
self.assertEqual(get_ip_type(addr), ip_type)
def test_get_whois(self):
ms_ip = "13.107.4.50"
ms_asn = "MICROSOFT-CORP-MSN-AS-BLOCK, US"
asn, whois = get_whois_info(ms_ip)
self.assertEqual(asn, ms_asn)
asn, whois = get_whois_info(self.IPV4["Private"][0])
invalid_type = "No ASN Information for IP type: Private"
self.assertEqual(asn, invalid_type)
def test_get_whois_df(self):
results = get_whois_df(data=self.input_df, ip_column="AllExtIPs")
self.assertEqual(len(results), len(self.input_df))
self.assertIn("AsnDescription", results.columns)
results2 = get_whois_df(
data=self.input_df, ip_column="AllExtIPs", asn_col="asn", whois_col="whois"
)
self.assertEqual(len(results2), len(self.input_df))
self.assertIn("asn", results2.columns)
self.assertIn("whois", results2.columns)
self.assertEqual(len(results2[~results2["asn"].isna()]), len(self.input_df))
self.assertEqual(len(results2[~results2["whois"].isna()]), len(self.input_df))
def test_whois_pdext(self):
results = self.input_df.mp_whois.lookup(ip_column="AllExtIPs")
self.assertEqual(len(results), len(self.input_df))
self.assertIn("AsnDescription", results.columns)
results2 = self.input_df.mp_whois.lookup(
ip_column="AllExtIPs", asn_col="asn", whois_col="whois"
)
self.assertEqual(len(results2), len(self.input_df))
self.assertIn("asn", results2.columns)
self.assertIn("whois", results2.columns)
self.assertEqual(len(results2[~results2["asn"].isna()]), len(self.input_df))
self.assertEqual(len(results2[~results2["whois"].isna()]), len(self.input_df))
| mit | 7,113,057,487,669,496,000 | 36.970588 | 87 | 0.566228 | false |
KnowNo/reviewboard | reviewboard/reviews/views.py | 3 | 64687 | from __future__ import unicode_literals
import logging
import time
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db.models import Q
from django.http import (Http404,
HttpResponse,
HttpResponseNotFound,
HttpResponseNotModified,
HttpResponseRedirect)
from django.shortcuts import (get_object_or_404, get_list_or_404,
render_to_response)
from django.template.context import RequestContext
from django.template.loader import render_to_string
from django.utils import six, timezone
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils.http import http_date
from django.utils.safestring import mark_safe
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.dates import get_latest_timestamp
from djblets.util.decorators import augment_method_from
from djblets.util.http import (encode_etag, set_last_modified,
set_etag, etag_if_none_match)
from reviewboard.accounts.decorators import (check_login_required,
valid_prefs_required)
from reviewboard.accounts.models import ReviewRequestVisit, Profile
from reviewboard.attachments.models import (FileAttachment,
FileAttachmentHistory)
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.diffviewer.diffutils import (convert_to_unicode,
get_file_chunks_in_range,
get_last_header_before_line,
get_last_line_number_in_diff,
get_original_file,
get_patched_file)
from reviewboard.diffviewer.models import DiffSet
from reviewboard.diffviewer.views import (DiffFragmentView, DiffViewerView,
exception_traceback_string)
from reviewboard.hostingsvcs.bugtracker import BugTracker
from reviewboard.reviews.ui.screenshot import LegacyScreenshotReviewUI
from reviewboard.reviews.context import (comment_counts,
diffsets_with_comments,
has_comments_in_diffsets_excluding,
interdiffs_with_comments,
make_review_request_context)
from reviewboard.reviews.fields import get_review_request_fieldsets
from reviewboard.reviews.markdown_utils import is_rich_text_default_for_user
from reviewboard.reviews.models import (BaseComment, Comment,
FileAttachmentComment,
ReviewRequest, Review,
Screenshot, ScreenshotComment)
from reviewboard.reviews.ui.base import FileAttachmentReviewUI
from reviewboard.scmtools.models import Repository
from reviewboard.site.decorators import check_local_site_access
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.webapi.encoder import status_to_string
#
# Helper functions
#
def _render_permission_denied(
request,
template_name='reviews/review_request_permission_denied.html'):
"""Renders a Permission Denied error for this review request."""
response = render_to_response(template_name, RequestContext(request))
response.status_code = 403
return response
def _find_review_request_object(review_request_id, local_site):
"""Finds a review request given an ID and an optional LocalSite name.
If a local site is passed in on the URL, we want to look up the review
request using the local_id instead of the pk. This allows each LocalSite
configured to have its own review request ID namespace starting from 1.
"""
q = ReviewRequest.objects.all()
if local_site:
q = q.filter(local_site=local_site,
local_id=review_request_id)
else:
q = q.filter(pk=review_request_id)
try:
q = q.select_related('submitter', 'repository')
return q.get()
except ReviewRequest.DoesNotExist:
raise Http404
def _find_review_request(request, review_request_id, local_site):
"""Finds a review request matching an ID, checking user access permissions.
If the review request is accessible by the user, we return
(ReviewRequest, None). Otherwise, we return (None, response).
"""
review_request = _find_review_request_object(review_request_id, local_site)
if review_request.is_accessible_by(request.user):
return review_request, None
else:
return None, _render_permission_denied(request)
def _build_id_map(objects):
"""Builds an ID map out of a list of objects.
The resulting map makes it easy to quickly look up an object from an ID.
"""
id_map = {}
for obj in objects:
id_map[obj.pk] = obj
return id_map
def _query_for_diff(review_request, user, revision, draft):
"""
Queries for a diff based on several parameters.
If the draft does not exist, this throws an Http404 exception.
"""
# Normalize the revision, since it might come in as a string.
if revision:
revision = int(revision)
# This will try to grab the diff associated with a draft if the review
# request has an associated draft and is either the revision being
# requested or no revision is being requested.
if (draft and draft.diffset_id and
(revision is None or draft.diffset.revision == revision)):
return draft.diffset
query = Q(history=review_request.diffset_history_id)
# Grab a revision if requested.
if revision is not None:
query = query & Q(revision=revision)
try:
return DiffSet.objects.filter(query).latest()
except DiffSet.DoesNotExist:
raise Http404
def build_diff_comment_fragments(
comments, context,
comment_template_name='reviews/diff_comment_fragment.html',
error_template_name='diffviewer/diff_fragment_error.html',
lines_of_context=None,
show_controls=False):
comment_entries = []
had_error = False
siteconfig = SiteConfiguration.objects.get_current()
if lines_of_context is None:
lines_of_context = [0, 0]
for comment in comments:
try:
max_line = get_last_line_number_in_diff(context, comment.filediff,
comment.interfilediff)
first_line = max(1, comment.first_line - lines_of_context[0])
last_line = min(comment.last_line + lines_of_context[1], max_line)
num_lines = last_line - first_line + 1
chunks = list(get_file_chunks_in_range(context,
comment.filediff,
comment.interfilediff,
first_line,
num_lines))
content = render_to_string(comment_template_name, {
'comment': comment,
'header': get_last_header_before_line(context,
comment.filediff,
comment.interfilediff,
first_line),
'chunks': chunks,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get('site_domain_method'),
'lines_of_context': lines_of_context,
'expandable_above': show_controls and first_line != 1,
'expandable_below': show_controls and last_line != max_line,
'collapsible': lines_of_context != [0, 0],
'lines_above': first_line - 1,
'lines_below': max_line - last_line,
'first_line': first_line,
})
except Exception as e:
content = exception_traceback_string(
None, e, error_template_name, {
'comment': comment,
'file': {
'depot_filename': comment.filediff.source_file,
'index': None,
'filediff': comment.filediff,
},
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
})
# It's bad that we failed, and we'll return a 500, but we'll
# still return content for anything we have. This will prevent any
# caching.
had_error = True
chunks = []
comment_entries.append({
'comment': comment,
'html': content,
'chunks': chunks,
})
return had_error, comment_entries
#
# View functions
#
@check_login_required
@valid_prefs_required
def root(request, local_site_name=None):
"""Handles the root URL of Review Board or a Local Site.
If the user is authenticated, this will redirect to their Dashboard.
Otherwise, they'll be redirected to the All Review Requests page.
Either page may then redirect for login or show a Permission Denied,
depending on the settings.
"""
if request.user.is_authenticated():
url_name = 'dashboard'
else:
url_name = 'all-review-requests'
return HttpResponseRedirect(
local_site_reverse(url_name, local_site_name=local_site_name))
@login_required
@check_local_site_access
def new_review_request(request,
local_site=None,
template_name='reviews/new_review_request.html'):
"""Displays the New Review Request UI.
This handles the creation of a review request based on either an existing
changeset or the provided information.
"""
valid_repos = []
repos = Repository.objects.accessible(request.user, local_site=local_site)
if local_site:
local_site_name = local_site.name
else:
local_site_name = ''
for repo in repos.order_by('name'):
try:
scmtool = repo.get_scmtool()
valid_repos.append({
'id': repo.id,
'name': repo.name,
'scmtool_name': scmtool.name,
'supports_post_commit': repo.supports_post_commit,
'local_site_name': local_site_name,
'files_only': False,
'requires_change_number': scmtool.supports_pending_changesets,
'requires_basedir': not scmtool.get_diffs_use_absolute_paths(),
})
except Exception:
logging.exception('Error loading SCMTool for repository "%s" '
'(ID %d)',
repo.name, repo.id)
valid_repos.insert(0, {
'id': '',
'name': _('(None - File attachments only)'),
'scmtool_name': '',
'supports_post_commit': False,
'files_only': True,
'local_site_name': local_site_name,
})
return render_to_response(template_name, RequestContext(request, {
'repos': valid_repos,
}))
def _get_latest_file_attachments(file_attachments):
file_attachment_histories = FileAttachmentHistory.objects.filter(
file_attachments__in=file_attachments)
latest = dict([
(data['id'], data['latest_revision'])
for data in file_attachment_histories.values('id', 'latest_revision')
])
return [
f
for f in file_attachments
if (not f.is_from_diff and
f.attachment_revision == latest[f.attachment_history_id])
]
@check_login_required
@check_local_site_access
def review_detail(request,
review_request_id,
local_site=None,
template_name="reviews/review_detail.html"):
"""
Main view for review requests. This covers the review request information
and all the reviews on it.
"""
# If there's a local_site passed in the URL, we want to look up the review
# request based on the local_id instead of the pk. This allows each
# local_site configured to have its own review request ID namespace
# starting from 1.
review_request, response = _find_review_request(
request, review_request_id, local_site)
if not review_request:
return response
# The review request detail page needs a lot of data from the database,
# and going through standard model relations will result in far too many
# queries. So we'll be optimizing quite a bit by prefetching and
# re-associating data.
#
# We will start by getting the list of reviews. We'll filter this out into
# some other lists, build some ID maps, and later do further processing.
entries = []
public_reviews = []
body_top_replies = {}
body_bottom_replies = {}
replies = {}
reply_timestamps = {}
reviews_entry_map = {}
reviews_id_map = {}
review_timestamp = 0
visited = None
# Start by going through all reviews that point to this review request.
# This includes draft reviews. We'll be separating these into a list of
# public reviews and a mapping of replies.
#
# We'll also compute the latest review timestamp early, for the ETag
# generation below.
#
# The second pass will come after the ETag calculation.
all_reviews = list(review_request.reviews.select_related('user'))
for review in all_reviews:
review._body_top_replies = []
review._body_bottom_replies = []
if review.public:
# This is a review we'll display on the page. Keep track of it
# for later display and filtering.
public_reviews.append(review)
parent_id = review.base_reply_to_id
if parent_id is not None:
# This is a reply to a review. We'll store the reply data
# into a map, which associates a review ID with its list of
# replies, and also figures out the timestamps.
#
# Later, we'll use this to associate reviews and replies for
# rendering.
if parent_id not in replies:
replies[parent_id] = [review]
reply_timestamps[parent_id] = review.timestamp
else:
replies[parent_id].append(review)
reply_timestamps[parent_id] = max(
reply_timestamps[parent_id],
review.timestamp)
elif (request.user.is_authenticated() and
review.user_id == request.user.pk and
(review_timestamp == 0 or review.timestamp > review_timestamp)):
# This is the latest draft so far from the current user, so
# we'll use this timestamp in the ETag.
review_timestamp = review.timestamp
if review.public or (request.user.is_authenticated() and
review.user_id == request.user.pk):
reviews_id_map[review.pk] = review
# If this review is replying to another review's body_top or
# body_bottom fields, store that data.
for reply_id, reply_list in (
(review.body_top_reply_to_id, body_top_replies),
(review.body_bottom_reply_to_id, body_bottom_replies)):
if reply_id is not None:
if reply_id not in reply_list:
reply_list[reply_id] = [review]
else:
reply_list[reply_id].append(review)
pending_review = review_request.get_pending_review(request.user)
review_ids = list(reviews_id_map.keys())
last_visited = 0
starred = False
if request.user.is_authenticated():
try:
visited, visited_is_new = \
ReviewRequestVisit.objects.get_or_create(
user=request.user, review_request=review_request)
last_visited = visited.timestamp.replace(tzinfo=utc)
except ReviewRequestVisit.DoesNotExist:
# Somehow, this visit was seen as created but then not
# accessible. We need to log this and then continue on.
logging.error('Unable to get or create ReviewRequestVisit '
'for user "%s" on review request at %s',
request.user.username,
review_request.get_absolute_url())
# If the review request is public and pending review and if the user
# is logged in, mark that they've visited this review request.
if (review_request.public and
review_request.status == review_request.PENDING_REVIEW):
visited.timestamp = timezone.now()
visited.save()
try:
profile = request.user.get_profile()
starred_review_requests = \
profile.starred_review_requests.filter(pk=review_request.pk)
starred = (starred_review_requests.count() > 0)
except Profile.DoesNotExist:
pass
draft = review_request.get_draft(request.user)
review_request_details = draft or review_request
# Map diffset IDs to their object.
diffsets = review_request.get_diffsets()
diffsets_by_id = {}
for diffset in diffsets:
diffsets_by_id[diffset.pk] = diffset
# Find out if we can bail early. Generate an ETag for this.
last_activity_time, updated_object = \
review_request.get_last_activity(diffsets, public_reviews)
if draft:
draft_timestamp = draft.last_updated
else:
draft_timestamp = ""
if visited:
visibility = visited.visibility
else:
visibility = None
blocks = review_request.get_blocks()
etag = encode_etag(
'%s:%s:%s:%s:%s:%s:%s:%s:%s:%s'
% (request.user, last_activity_time, draft_timestamp,
review_timestamp, review_request.last_review_activity_timestamp,
is_rich_text_default_for_user(request.user),
[r.pk for r in blocks],
starred, visibility, settings.AJAX_SERIAL))
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
# Get the list of public ChangeDescriptions.
#
# We want to get the latest ChangeDescription along with this. This is
# best done here and not in a separate SQL query.
changedescs = list(review_request.changedescs.filter(public=True))
if changedescs:
# We sort from newest to oldest, so the latest one is the first.
latest_timestamp = changedescs[0].timestamp
else:
latest_timestamp = None
# Now that we have the list of public reviews and all that metadata,
# being processing them and adding entries for display in the page.
#
# We do this here and not above because we don't want to build *too* much
# before the ETag check.
for review in public_reviews:
if not review.is_reply():
state = ''
# Mark as collapsed if the review is older than the latest
# change.
if latest_timestamp and review.timestamp < latest_timestamp:
state = 'collapsed'
latest_reply = reply_timestamps.get(review.pk, None)
# Mark as expanded if there is a reply newer than last_visited
if latest_reply and last_visited and last_visited < latest_reply:
state = ''
entry = {
'review': review,
'comments': {
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': []
},
'timestamp': review.timestamp,
'class': state,
'collapsed': state == 'collapsed',
'issue_open_count': 0,
'has_issues': False,
}
reviews_entry_map[review.pk] = entry
entries.append(entry)
# Link up all the review body replies.
for key, reply_list in (('_body_top_replies', body_top_replies),
('_body_bottom_replies', body_bottom_replies)):
for reply_id, replies in six.iteritems(reply_list):
setattr(reviews_id_map[reply_id], key, replies)
# Get all the file attachments and screenshots and build a couple maps,
# so we can easily associate those objects in comments.
#
# Note that we're fetching inactive file attachments and screenshots.
# is because any file attachments/screenshots created after the initial
# creation of the review request that were later removed will still need
# to be rendered as an added file in a change box.
file_attachments = []
inactive_file_attachments = []
screenshots = []
inactive_screenshots = []
for attachment in review_request_details.get_file_attachments():
attachment._comments = []
file_attachments.append(attachment)
for attachment in review_request_details.get_inactive_file_attachments():
attachment._comments = []
inactive_file_attachments.append(attachment)
for screenshot in review_request_details.get_screenshots():
screenshot._comments = []
screenshots.append(screenshot)
for screenshot in review_request_details.get_inactive_screenshots():
screenshot._comments = []
inactive_screenshots.append(screenshot)
file_attachment_id_map = _build_id_map(file_attachments)
file_attachment_id_map.update(_build_id_map(inactive_file_attachments))
screenshot_id_map = _build_id_map(screenshots)
screenshot_id_map.update(_build_id_map(inactive_screenshots))
issues = {
'total': 0,
'open': 0,
'resolved': 0,
'dropped': 0
}
# Get all the comments and attach them to the reviews.
for model, key, ordering in (
(Comment, 'diff_comments',
('comment__filediff', 'comment__first_line', 'comment__timestamp')),
(ScreenshotComment, 'screenshot_comments', None),
(FileAttachmentComment, 'file_attachment_comments', None)):
# Due to how we initially made the schema, we have a ManyToManyField
# inbetween comments and reviews, instead of comments having a
# ForeignKey to the review. This makes it difficult to easily go
# from a comment to a review ID.
#
# The solution to this is to not query the comment objects, but rather
# the through table. This will let us grab the review and comment in
# one go, using select_related.
related_field = model.review.related.field
comment_field_name = related_field.m2m_reverse_field_name()
through = related_field.rel.through
q = through.objects.filter(review__in=review_ids).select_related()
if ordering:
q = q.order_by(*ordering)
objs = list(q)
# Two passes. One to build a mapping, and one to actually process
# comments.
comment_map = {}
for obj in objs:
comment = getattr(obj, comment_field_name)
comment_map[comment.pk] = comment
comment._replies = []
for obj in objs:
comment = getattr(obj, comment_field_name)
# Short-circuit some object fetches for the comment by setting
# some internal state on them.
assert obj.review_id in reviews_id_map
parent_review = reviews_id_map[obj.review_id]
comment._review = parent_review
comment._review_request = review_request
# If the comment has an associated object that we've already
# queried, attach it to prevent a future lookup.
if isinstance(comment, ScreenshotComment):
if comment.screenshot_id in screenshot_id_map:
screenshot = screenshot_id_map[comment.screenshot_id]
comment.screenshot = screenshot
screenshot._comments.append(comment)
elif isinstance(comment, FileAttachmentComment):
if comment.file_attachment_id in file_attachment_id_map:
file_attachment = \
file_attachment_id_map[comment.file_attachment_id]
comment.file_attachment = file_attachment
file_attachment._comments.append(comment)
diff_against_id = comment.diff_against_file_attachment_id
if diff_against_id in file_attachment_id_map:
file_attachment = file_attachment_id_map[diff_against_id]
comment.diff_against_file_attachment = file_attachment
uncollapse = False
if parent_review.is_reply():
# This is a reply to a comment. Add it to the list of replies.
assert obj.review_id not in reviews_entry_map
assert parent_review.base_reply_to_id in reviews_entry_map
# If there's an entry that isn't a reply, then it's
# orphaned. Ignore it.
if comment.is_reply():
replied_comment = comment_map[comment.reply_to_id]
replied_comment._replies.append(comment)
if not parent_review.public:
uncollapse = True
elif parent_review.public:
# This is a comment on a public review we're going to show.
# Add it to the list.
assert obj.review_id in reviews_entry_map
entry = reviews_entry_map[obj.review_id]
entry['comments'][key].append(comment)
if comment.issue_opened:
status_key = \
comment.issue_status_to_string(comment.issue_status)
issues[status_key] += 1
issues['total'] += 1
entry['has_issues'] = True
if comment.issue_status == BaseComment.OPEN:
entry['issue_open_count'] += 1
if review_request.submitter == request.user:
uncollapse = True
# If the box was collapsed, uncollapse it.
if uncollapse and entry['collapsed']:
entry['class'] = ''
entry['collapsed'] = False
# Sort all the reviews and ChangeDescriptions into a single list, for
# display.
for changedesc in changedescs:
# Process the list of fields, in order by fieldset. These will be
# put into groups composed of inline vs. full-width field values,
# for render into the box.
fields_changed_groups = []
cur_field_changed_group = None
fieldsets = get_review_request_fieldsets(
include_main=True,
include_change_entries_only=True)
for fieldset in fieldsets:
for field_cls in fieldset.field_classes:
field_id = field_cls.field_id
if field_id not in changedesc.fields_changed:
continue
inline = field_cls.change_entry_renders_inline
if (not cur_field_changed_group or
cur_field_changed_group['inline'] != inline):
# Begin a new group of fields.
cur_field_changed_group = {
'inline': inline,
'fields': [],
}
fields_changed_groups.append(cur_field_changed_group)
if hasattr(field_cls, 'locals_vars'):
field = field_cls(review_request, request=request,
locals_vars=locals())
else:
field = field_cls(review_request, request=request)
cur_field_changed_group['fields'] += \
field.get_change_entry_sections_html(
changedesc.fields_changed[field_id])
# See if the review request has had a status change.
status_change = changedesc.fields_changed.get('status')
if status_change:
assert 'new' in status_change
new_status = status_to_string(status_change['new'][0])
else:
new_status = None
# Mark as collapsed if the change is older than a newer change
if latest_timestamp and changedesc.timestamp < latest_timestamp:
state = 'collapsed'
collapsed = True
else:
state = ''
collapsed = False
entries.append({
'new_status': new_status,
'fields_changed_groups': fields_changed_groups,
'changedesc': changedesc,
'timestamp': changedesc.timestamp,
'class': state,
'collapsed': collapsed,
})
entries.sort(key=lambda item: item['timestamp'])
close_description, close_description_rich_text = \
review_request.get_close_description()
latest_file_attachments = _get_latest_file_attachments(file_attachments)
siteconfig = SiteConfiguration.objects.get_current()
context_data = make_review_request_context(request, review_request, {
'blocks': blocks,
'draft': draft,
'review_request_details': review_request_details,
'review_request_visit': visited,
'send_email': siteconfig.get('mail_send_review_mail'),
'entries': entries,
'last_activity_time': last_activity_time,
'review': pending_review,
'request': request,
'close_description': close_description,
'close_description_rich_text': close_description_rich_text,
'issues': issues,
'has_diffs': (draft and draft.diffset_id) or len(diffsets) > 0,
'file_attachments': latest_file_attachments,
'all_file_attachments': file_attachments,
'screenshots': screenshots,
})
response = render_to_response(template_name,
RequestContext(request, context_data))
set_etag(response, etag)
return response
class ReviewsDiffViewerView(DiffViewerView):
"""Renders the diff viewer for a review request.
This wraps the base DiffViewerView to display a diff for the given
review request and the given diff revision or range.
The view expects the following parameters to be provided:
* review_request_id
- The ID of the ReviewRequest containing the diff to render.
The following may also be provided:
* revision
- The DiffSet revision to render.
* interdiff_revision
- The second DiffSet revision in an interdiff revision range.
* local_site
- The LocalSite the ReviewRequest must be on, if any.
See DiffViewerView's documentation for the accepted query parameters.
"""
@method_decorator(check_login_required)
@method_decorator(check_local_site_access)
@augment_method_from(DiffViewerView)
def dispatch(self, *args, **kwargs):
pass
def get(self, request, review_request_id, revision=None,
interdiff_revision=None, local_site=None):
"""Handles GET requests for this view.
This will look up the review request and DiffSets, given the
provided information, and pass them to the parent class for rendering.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
self.review_request = review_request
self.draft = review_request.get_draft(request.user)
self.diffset = _query_for_diff(review_request, request.user,
revision, self.draft)
self.interdiffset = None
if interdiff_revision and interdiff_revision != revision:
# An interdiff revision was specified. Try to find a matching
# diffset.
self.interdiffset = _query_for_diff(review_request, request.user,
interdiff_revision, self.draft)
return super(ReviewsDiffViewerView, self).get(
request, self.diffset, self.interdiffset)
def get_context_data(self, *args, **kwargs):
"""Calculates additional context data for rendering.
This provides some additional data used for rendering the diff
viewer. This data is more specific to the reviewing functionality,
as opposed to the data calculated by DiffViewerView.get_context_data,
which is more focused on the actual diff.
"""
# Try to find an existing pending review of this diff from the
# current user.
pending_review = \
self.review_request.get_pending_review(self.request.user)
has_draft_diff = self.draft and self.draft.diffset
is_draft_diff = has_draft_diff and self.draft.diffset == self.diffset
is_draft_interdiff = (has_draft_diff and self.interdiffset and
self.draft.diffset == self.interdiffset)
# Get the list of diffsets. We only want to calculate this once.
diffsets = self.review_request.get_diffsets()
num_diffs = len(diffsets)
if num_diffs > 0:
latest_diffset = diffsets[-1]
else:
latest_diffset = None
if self.draft and self.draft.diffset:
num_diffs += 1
last_activity_time, updated_object = \
self.review_request.get_last_activity(diffsets)
file_attachments = list(self.review_request.get_file_attachments())
screenshots = list(self.review_request.get_screenshots())
latest_file_attachments = \
_get_latest_file_attachments(file_attachments)
# Compute the lists of comments based on filediffs and interfilediffs.
# We do this using the 'through' table so that we can select_related
# the reviews and comments.
comments = {}
q = Comment.review.related.field.rel.through.objects.filter(
review__review_request=self.review_request)
q = q.select_related()
for obj in q:
comment = obj.comment
comment._review = obj.review
key = (comment.filediff_id, comment.interfilediff_id)
comments.setdefault(key, []).append(comment)
close_description, close_description_rich_text = \
self.review_request.get_close_description()
context = super(ReviewsDiffViewerView, self).get_context_data(
*args, **kwargs)
siteconfig = SiteConfiguration.objects.get_current()
context.update({
'close_description': close_description,
'close_description_rich_text': close_description_rich_text,
'diffsets': diffsets,
'latest_diffset': latest_diffset,
'review': pending_review,
'review_request_details': self.draft or self.review_request,
'draft': self.draft,
'last_activity_time': last_activity_time,
'file_attachments': latest_file_attachments,
'all_file_attachments': file_attachments,
'screenshots': screenshots,
'comments': comments,
'send_email': siteconfig.get('mail_send_review_mail'),
})
context.update(
make_review_request_context(self.request, self.review_request))
diffset_pair = context['diffset_pair']
context['diff_context'].update({
'num_diffs': num_diffs,
'comments_hint': {
'has_other_comments': has_comments_in_diffsets_excluding(
pending_review, diffset_pair),
'diffsets_with_comments': [
{
'revision': diffset_info['diffset'].revision,
'is_current': diffset_info['is_current'],
}
for diffset_info in diffsets_with_comments(
pending_review, diffset_pair)
],
'interdiffs_with_comments': [
{
'old_revision': pair['diffset'].revision,
'new_revision': pair['interdiff'].revision,
'is_current': pair['is_current'],
}
for pair in interdiffs_with_comments(
pending_review, diffset_pair)
],
},
})
context['diff_context']['revision'].update({
'latest_revision': (latest_diffset.revision
if latest_diffset else None),
'is_draft_diff': is_draft_diff,
'is_draft_interdiff': is_draft_interdiff,
})
files = []
for f in context['files']:
filediff = f['filediff']
interfilediff = f['interfilediff']
data = {
'newfile': f['newfile'],
'binary': f['binary'],
'deleted': f['deleted'],
'id': f['filediff'].pk,
'depot_filename': f['depot_filename'],
'dest_filename': f['dest_filename'],
'dest_revision': f['dest_revision'],
'revision': f['revision'],
'filediff': {
'id': filediff.id,
'revision': filediff.diffset.revision,
},
'index': f['index'],
'comment_counts': comment_counts(self.request.user, comments,
filediff, interfilediff),
}
if interfilediff:
data['interfilediff'] = {
'id': interfilediff.id,
'revision': interfilediff.diffset.revision,
}
if f['force_interdiff']:
data['force_interdiff'] = True
data['interdiff_revision'] = f['force_interdiff_revision']
files.append(data)
context['diff_context']['files'] = files
return context
@check_login_required
@check_local_site_access
def raw_diff(request, review_request_id, revision=None, local_site=None):
"""
Displays a raw diff of all the filediffs in a diffset for the
given review request.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
draft = review_request.get_draft(request.user)
diffset = _query_for_diff(review_request, request.user, revision, draft)
tool = review_request.repository.get_scmtool()
data = tool.get_parser('').raw_diff(diffset)
resp = HttpResponse(data, content_type='text/x-patch')
if diffset.name == 'diff':
filename = "rb%d.patch" % review_request.display_id
else:
filename = six.text_type(diffset.name).encode('ascii', 'ignore')
# Content-Disposition headers containing commas break on Chrome 16 and
# newer. To avoid this, replace any commas in the filename with an
# underscore. Was bug 3704.
filename = filename.replace(',', '_')
resp['Content-Disposition'] = 'attachment; filename=%s' % filename
set_last_modified(resp, diffset.timestamp)
return resp
@check_login_required
@check_local_site_access
def comment_diff_fragments(
request,
review_request_id,
comment_ids,
template_name='reviews/load_diff_comment_fragments.js',
comment_template_name='reviews/diff_comment_fragment.html',
error_template_name='diffviewer/diff_fragment_error.html',
local_site=None):
"""
Returns the fragment representing the parts of a diff referenced by the
specified list of comment IDs. This is used to allow batch lazy-loading
of these diff fragments based on filediffs, since they may not be cached
and take time to generate.
"""
comments = get_list_or_404(Comment, pk__in=comment_ids.split(","))
latest_timestamp = get_latest_timestamp(comment.timestamp
for comment in comments)
etag = encode_etag(
'%s:%s:%s'
% (comment_ids, latest_timestamp, settings.TEMPLATE_SERIAL))
if etag_if_none_match(request, etag):
response = HttpResponseNotModified()
else:
# While we don't actually need the review request, we still want to do
# this lookup in order to get the permissions checking.
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
lines_of_context = request.GET.get('lines_of_context', '0,0')
container_prefix = request.GET.get('container_prefix')
try:
lines_of_context = [int(i) for i in lines_of_context.split(',')]
# Ensure that we have 2 values for lines_of_context. If only one is
# given, assume it is both the before and after context. If more than
# two are given, only consider the first two. If somehow we get no
# lines of context value, we will default to [0, 0].
if len(lines_of_context) == 1:
lines_of_context.append(lines_of_context[0])
elif len(lines_of_context) > 2:
lines_of_context = lines_of_context[0:2]
elif len(lines_of_context) == 0:
raise ValueError
except ValueError:
lines_of_context = [0, 0]
context = RequestContext(request, {
'comment_entries': [],
'container_prefix': container_prefix,
'queue_name': request.GET.get('queue'),
'show_controls': request.GET.get('show_controls', False),
})
had_error, context['comment_entries'] = (
build_diff_comment_fragments(
comments,
context,
comment_template_name,
error_template_name,
lines_of_context=lines_of_context,
show_controls='draft' not in container_prefix))
page_content = render_to_string(template_name, context)
response = HttpResponse(
page_content,
content_type='application/javascript')
if had_error:
return response
set_etag(response, etag)
response['Expires'] = http_date(time.time() + 60 * 60 * 24 * 365) # 1 year
return response
class ReviewsDiffFragmentView(DiffFragmentView):
"""Renders a fragment from a file in the diff viewer.
Displays just a fragment of a diff or interdiff owned by the given
review request. The fragment is identified by the chunk index in the
diff.
The view expects the following parameters to be provided:
* review_request_id
- The ID of the ReviewRequest containing the diff to render.
* revision
- The DiffSet revision to render.
* filediff_id
- The ID of the FileDiff within the DiffSet.
The following may also be provided:
* interdiff_revision
- The second DiffSet revision in an interdiff revision range.
* chunkindex
- The index (0-based) of the chunk to render. If left out, the
entire file will be rendered.
* local_site
- The LocalSite the ReviewRequest must be on, if any.
See DiffFragmentView's documentation for the accepted query parameters.
"""
@method_decorator(check_login_required)
@method_decorator(check_local_site_access)
@augment_method_from(DiffFragmentView)
def dispatch(self, *args, **kwargs):
pass
def process_diffset_info(self, review_request_id, revision,
interdiff_revision=None, local_site=None,
*args, **kwargs):
"""Process and return information on the desired diff.
The diff IDs and other data passed to the view can be processed and
converted into DiffSets. A dictionary with the DiffSet and FileDiff
information will be returned.
If the review request cannot be accessed by the user, an HttpResponse
will be returned instead.
"""
self.review_request, response = \
_find_review_request(self.request, review_request_id, local_site)
if not self.review_request:
return response
user = self.request.user
draft = self.review_request.get_draft(user)
if interdiff_revision is not None:
interdiffset = _query_for_diff(self.review_request, user,
interdiff_revision, draft)
else:
interdiffset = None
diffset = _query_for_diff(self.review_request, user, revision, draft)
return super(ReviewsDiffFragmentView, self).process_diffset_info(
diffset_or_id=diffset,
interdiffset_or_id=interdiffset,
**kwargs)
def create_renderer(self, diff_file, *args, **kwargs):
"""Creates the DiffRenderer for this fragment.
This will augment the renderer for binary files by looking up
file attachments, if review UIs are involved, disabling caching.
"""
renderer = super(ReviewsDiffFragmentView, self).create_renderer(
diff_file=diff_file, *args, **kwargs)
if diff_file['binary']:
# Determine the file attachments to display in the diff viewer,
# if any.
filediff = diff_file['filediff']
interfilediff = diff_file['interfilediff']
orig_attachment = None
modified_attachment = None
if diff_file['force_interdiff']:
orig_attachment = self._get_diff_file_attachment(filediff)
modified_attachment = \
self._get_diff_file_attachment(interfilediff)
else:
modified_attachment = self._get_diff_file_attachment(filediff)
if not diff_file['is_new_file']:
orig_attachment = \
self._get_diff_file_attachment(filediff, False)
diff_review_ui = None
diff_review_ui_html = None
orig_review_ui = None
orig_review_ui_html = None
modified_review_ui = None
modified_review_ui_html = None
if orig_attachment:
orig_review_ui = orig_attachment.review_ui
if modified_attachment:
modified_review_ui = modified_attachment.review_ui
# See if we're able to generate a diff review UI for these files.
if (orig_review_ui and modified_review_ui and
orig_review_ui.__class__ is modified_review_ui.__class__ and
modified_review_ui.supports_diffing):
# Both files are able to be diffed by this review UI.
# We'll display a special diff review UI instead of two
# side-by-side review UIs.
diff_review_ui = modified_review_ui
diff_review_ui.set_diff_against(orig_attachment)
diff_review_ui_html = \
self._render_review_ui(diff_review_ui, False)
else:
# We won't be showing a diff of these files. Instead, just
# grab the review UIs and render them.
orig_review_ui_html = \
self._render_review_ui(orig_review_ui)
modified_review_ui_html = \
self._render_review_ui(modified_review_ui)
if (diff_review_ui_html or orig_review_ui_html or
modified_review_ui_html):
# Don't cache the view, because the Review UI may care about
# state that we can't anticipate. At the least, it may have
# comments or other data that change between renders, and we
# don't want that to go stale.
renderer.allow_caching = False
renderer.extra_context.update({
'orig_diff_file_attachment': orig_attachment,
'modified_diff_file_attachment': modified_attachment,
'orig_attachment_review_ui_html': orig_review_ui_html,
'modified_attachment_review_ui_html': modified_review_ui_html,
'diff_attachment_review_ui_html': diff_review_ui_html,
})
renderer.extra_context.update(
self._get_download_links(renderer, diff_file))
return renderer
def get_context_data(self, **kwargs):
return {
'review_request': self.review_request,
}
def _get_download_links(self, renderer, diff_file):
if diff_file['binary']:
orig_attachment = \
renderer.extra_context['orig_diff_file_attachment']
modified_attachment = \
renderer.extra_context['modified_diff_file_attachment']
if orig_attachment:
download_orig_url = orig_attachment.get_absolute_url()
else:
download_orig_url = None
if modified_attachment:
download_modified_url = modified_attachment.get_absolute_url()
else:
download_modified_url = None
else:
filediff = diff_file['filediff']
interfilediff = diff_file['interfilediff']
diffset = filediff.diffset
if interfilediff:
orig_url_name = 'download-modified-file'
modified_revision = interfilediff.diffset.revision
modified_filediff_id = interfilediff.pk
else:
orig_url_name = 'download-orig-file'
modified_revision = diffset.revision
modified_filediff_id = filediff.pk
download_orig_url = local_site_reverse(
orig_url_name,
request=self.request,
kwargs={
'review_request_id': self.review_request.display_id,
'revision': diffset.revision,
'filediff_id': filediff.pk,
})
download_modified_url = local_site_reverse(
'download-modified-file',
request=self.request,
kwargs={
'review_request_id': self.review_request.display_id,
'revision': modified_revision,
'filediff_id': modified_filediff_id,
})
return {
'download_orig_url': download_orig_url,
'download_modified_url': download_modified_url,
}
def _render_review_ui(self, review_ui, inline_only=True):
"""Renders the review UI for a file attachment."""
if review_ui and (not inline_only or review_ui.allow_inline):
return mark_safe(review_ui.render_to_string(self.request))
return None
def _get_diff_file_attachment(self, filediff, use_modified=True):
"""Fetch the FileAttachment associated with a FileDiff.
This will query for the FileAttachment based on the provided filediff,
and set the retrieved diff file attachment to a variable whose name is
provided as an argument to this tag.
If 'use_modified' is True, the FileAttachment returned will be from the
modified version of the new file. Otherwise, it's the original file
that's being modified.
If no matching FileAttachment is found or if there is more than one
FileAttachment associated with one FileDiff, None is returned. An error
is logged in the latter case.
"""
if not filediff:
return None
try:
return FileAttachment.objects.get_for_filediff(filediff,
use_modified)
except ObjectDoesNotExist:
return None
except MultipleObjectsReturned:
# Only one FileAttachment should be associated with a FileDiff
logging.error('More than one FileAttachments associated with '
'FileDiff %s',
filediff.pk,
exc_info=1)
return None
@check_login_required
@check_local_site_access
def preview_review_request_email(
request,
review_request_id,
format,
text_template_name='notifications/review_request_email.txt',
html_template_name='notifications/review_request_email.html',
changedesc_id=None,
local_site=None):
"""
Previews the e-mail message that would be sent for an initial
review request or an update.
This is mainly used for debugging.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
extra_context = {}
if changedesc_id:
changedesc = get_object_or_404(ChangeDescription, pk=changedesc_id)
extra_context['change_text'] = changedesc.text
extra_context['changes'] = changedesc.fields_changed
siteconfig = SiteConfiguration.objects.get_current()
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
return HttpResponse(render_to_string(
template_name,
RequestContext(request, dict({
'review_request': review_request,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}, **extra_context)),
), content_type=mimetype)
@check_login_required
@check_local_site_access
def preview_review_email(request, review_request_id, review_id, format,
text_template_name='notifications/review_email.txt',
html_template_name='notifications/review_email.html',
extra_context={},
local_site=None):
"""
Previews the e-mail message that would be sent for a review of a
review request.
This is mainly used for debugging.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
review = get_object_or_404(Review, pk=review_id,
review_request=review_request)
siteconfig = SiteConfiguration.objects.get_current()
review.ordered_comments = \
review.comments.order_by('filediff', 'first_line')
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
context = {
'review_request': review_request,
'review': review,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}
context.update(extra_context)
has_error, context['comment_entries'] = \
build_diff_comment_fragments(
review.ordered_comments, context,
"notifications/email_diff_comment_fragment.html")
return HttpResponse(
render_to_string(template_name, RequestContext(request, context)),
content_type=mimetype)
@check_login_required
@check_local_site_access
def preview_reply_email(request, review_request_id, review_id, reply_id,
format,
text_template_name='notifications/reply_email.txt',
html_template_name='notifications/reply_email.html',
local_site=None):
"""
Previews the e-mail message that would be sent for a reply to a
review of a review request.
This is mainly used for debugging.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
review = get_object_or_404(Review, pk=review_id,
review_request=review_request)
reply = get_object_or_404(Review, pk=reply_id, base_reply_to=review)
siteconfig = SiteConfiguration.objects.get_current()
reply.ordered_comments = \
reply.comments.order_by('filediff', 'first_line')
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
context = {
'review_request': review_request,
'review': review,
'reply': reply,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}
has_error, context['comment_entries'] = \
build_diff_comment_fragments(
reply.ordered_comments, context,
"notifications/email_diff_comment_fragment.html")
return HttpResponse(
render_to_string(template_name, RequestContext(request, context)),
content_type=mimetype)
@check_login_required
@check_local_site_access
def review_file_attachment(request, review_request_id, file_attachment_id,
file_attachment_diff_id=None, local_site=None):
"""Displays a file attachment with a review UI."""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
file_attachment = get_object_or_404(FileAttachment, pk=file_attachment_id)
review_ui = file_attachment.review_ui
if not review_ui:
review_ui = FileAttachmentReviewUI(review_request, file_attachment)
if file_attachment_diff_id:
file_attachment_revision = get_object_or_404(
FileAttachment.objects.filter(
attachment_history=file_attachment.attachment_history),
pk=file_attachment_diff_id)
review_ui.set_diff_against(file_attachment_revision)
try:
is_enabled_for = review_ui.is_enabled_for(
user=request.user,
review_request=review_request,
file_attachment=file_attachment)
except Exception as e:
logging.error('Error when calling is_enabled_for for '
'FileAttachmentReviewUI %r: %s',
review_ui, e, exc_info=1)
is_enabled_for = False
if review_ui and is_enabled_for:
return review_ui.render_to_response(request)
else:
raise Http404
@check_login_required
@check_local_site_access
def view_screenshot(request, review_request_id, screenshot_id,
local_site=None):
"""
Displays a screenshot, along with any comments that were made on it.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
screenshot = get_object_or_404(Screenshot, pk=screenshot_id)
review_ui = LegacyScreenshotReviewUI(review_request, screenshot)
return review_ui.render_to_response(request)
@check_login_required
@check_local_site_access
def user_infobox(request, username,
template_name='accounts/user_infobox.html',
local_site=None):
"""Displays a user info popup.
This is meant to be embedded in other pages, rather than being
a standalone page.
"""
user = get_object_or_404(User, username=username)
show_profile = user.is_profile_visible(request.user)
etag = encode_etag(':'.join([
user.first_name,
user.last_name,
user.email,
six.text_type(user.last_login),
six.text_type(settings.TEMPLATE_SERIAL),
six.text_type(show_profile)
]))
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
response = render_to_response(template_name, RequestContext(request, {
'show_profile': show_profile,
'requested_user': user,
}))
set_etag(response, etag)
return response
def bug_url(request, review_request_id, bug_id, local_site=None):
"""Redirects user to bug tracker issue page."""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
return HttpResponseRedirect(review_request.repository.bug_tracker % bug_id)
def bug_infobox(request, review_request_id, bug_id,
template_name='reviews/bug_infobox.html',
local_site=None):
"""Displays a bug info popup.
This is meant to be embedded in other pages, rather than being
a standalone page.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
repository = review_request.repository
bug_tracker = repository.bug_tracker_service
if not bug_tracker:
return HttpResponseNotFound(_('Unable to find bug tracker service'))
if not isinstance(bug_tracker, BugTracker):
return HttpResponseNotFound(
_('Bug tracker %s does not support metadata') % bug_tracker.name)
bug_info = bug_tracker.get_bug_info(repository, bug_id)
bug_description = bug_info['description']
bug_summary = bug_info['summary']
bug_status = bug_info['status']
if not bug_summary and not bug_description:
return HttpResponseNotFound(
_('No bug metadata found for bug %(bug_id)s on bug tracker '
'%(bug_tracker)s') % {
'bug_id': bug_id,
'bug_tracker': bug_tracker.name,
})
# Don't do anything for single newlines, but treat two newlines as a
# paragraph break.
escaped_description = escape(bug_description).replace('\n\n', '<br/><br/>')
return render_to_response(template_name, RequestContext(request, {
'bug_id': bug_id,
'bug_description': mark_safe(escaped_description),
'bug_status': bug_status,
'bug_summary': bug_summary
}))
def _download_diff_file(modified, request, review_request_id, revision,
filediff_id, local_site=None):
"""Downloads an original or modified file from a diff.
This will fetch the file from a FileDiff, optionally patching it,
and return the result as an HttpResponse.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
draft = review_request.get_draft(request.user)
diffset = _query_for_diff(review_request, request.user, revision, draft)
filediff = get_object_or_404(diffset.files, pk=filediff_id)
encoding_list = diffset.repository.get_encoding_list()
data = get_original_file(filediff, request, encoding_list)
if modified:
data = get_patched_file(data, filediff, request)
data = convert_to_unicode(data, encoding_list)[1]
return HttpResponse(data, content_type='text/plain; charset=utf-8')
@check_login_required
@check_local_site_access
def download_orig_file(*args, **kwargs):
"""Downloads an original file from a diff."""
return _download_diff_file(False, *args, **kwargs)
@check_login_required
@check_local_site_access
def download_modified_file(*args, **kwargs):
"""Downloads a modified file from a diff."""
return _download_diff_file(True, *args, **kwargs)
| mit | -2,637,150,893,756,488,000 | 36.477984 | 81 | 0.596704 | false |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/io/tests/test_pickle.py | 7 | 10831 | # pylint: disable=E1101,E1103,W0232
""" manage legacy pickle tests """
import nose
import os
from distutils.version import LooseVersion
import pandas as pd
from pandas import Index
from pandas.compat import u, is_platform_little_endian
import pandas
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, MonthEnd
class TestPickle():
"""
How to add pickle tests:
1. Install pandas version intended to output the pickle.
2. Execute "generate_legacy_storage_files.py" to create the pickle.
$ python generate_legacy_storage_files.py <output_dir> pickle
3. Move the created pickle to "data/legacy_pickle/<version>" directory.
NOTE: TestPickle can't be a subclass of tm.Testcase to use test generator.
http://stackoverflow.com/questions/6689537/
nose-test-generators-inside-class
"""
_multiprocess_can_split_ = True
def setUp(self):
from pandas.io.tests.generate_legacy_storage_files import (
create_pickle_data)
self.data = create_pickle_data()
self.path = u('__%s__.pickle' % tm.rands(10))
def compare_element(self, result, expected, typ, version=None):
if isinstance(expected, Index):
tm.assert_index_equal(expected, result)
return
if typ.startswith('sp_'):
comparator = getattr(tm, "assert_%s_equal" % typ)
comparator(result, expected, exact_indices=False)
elif typ == 'timestamp':
if expected is pd.NaT:
assert result is pd.NaT
else:
tm.assert_equal(result, expected)
tm.assert_equal(result.freq, expected.freq)
else:
comparator = getattr(tm, "assert_%s_equal" %
typ, tm.assert_almost_equal)
comparator(result, expected)
def compare(self, vf, version):
# py3 compat when reading py2 pickle
try:
data = pandas.read_pickle(vf)
except (ValueError) as e:
if 'unsupported pickle protocol:' in str(e):
# trying to read a py3 pickle in py2
return
else:
raise
for typ, dv in data.items():
for dt, result in dv.items():
try:
expected = self.data[typ][dt]
except (KeyError):
if version in ('0.10.1', '0.11.0') and dt == 'reg':
break
else:
raise
# use a specific comparator
# if available
comparator = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
comparator = getattr(self, comparator, self.compare_element)
comparator(result, expected, typ, version)
return data
def compare_sp_series_ts(self, res, exp, typ, version):
# SparseTimeSeries integrated into SparseSeries in 0.12.0
# and deprecated in 0.17.0
if version and LooseVersion(version) <= "0.12.0":
tm.assert_sp_series_equal(res, exp, check_series_type=False)
else:
tm.assert_sp_series_equal(res, exp)
def compare_series_ts(self, result, expected, typ, version):
# GH 7748
tm.assert_series_equal(result, expected)
tm.assert_equal(result.index.freq, expected.index.freq)
tm.assert_equal(result.index.freq.normalize, False)
tm.assert_series_equal(result > 0, expected > 0)
# GH 9291
freq = result.index.freq
tm.assert_equal(freq + Day(1), Day(2))
res = freq + pandas.Timedelta(hours=1)
tm.assert_equal(isinstance(res, pandas.Timedelta), True)
tm.assert_equal(res, pandas.Timedelta(days=1, hours=1))
res = freq + pandas.Timedelta(nanoseconds=1)
tm.assert_equal(isinstance(res, pandas.Timedelta), True)
tm.assert_equal(res, pandas.Timedelta(days=1, nanoseconds=1))
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_series_cat(self, result, expected, typ, version):
# Categorical dtype is added in 0.15.0
# ordered is changed in 0.16.0
if LooseVersion(version) < '0.15.0':
tm.assert_series_equal(result, expected, check_dtype=False,
check_categorical=False)
elif LooseVersion(version) < '0.16.0':
tm.assert_series_equal(result, expected, check_categorical=False)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def compare_frame_cat_onecol(self, result, expected, typ, version):
# Categorical dtype is added in 0.15.0
# ordered is changed in 0.16.0
if LooseVersion(version) < '0.15.0':
tm.assert_frame_equal(result, expected, check_dtype=False,
check_categorical=False)
elif LooseVersion(version) < '0.16.0':
tm.assert_frame_equal(result, expected, check_categorical=False)
else:
tm.assert_frame_equal(result, expected)
def compare_frame_cat_and_float(self, result, expected, typ, version):
self.compare_frame_cat_onecol(result, expected, typ, version)
def compare_index_period(self, result, expected, typ, version):
tm.assert_index_equal(result, expected)
tm.assertIsInstance(result.freq, MonthEnd)
tm.assert_equal(result.freq, MonthEnd())
tm.assert_equal(result.freqstr, 'M')
tm.assert_index_equal(result.shift(2), expected.shift(2))
def compare_sp_frame_float(self, result, expected, typ, version):
if LooseVersion(version) <= '0.18.1':
tm.assert_sp_frame_equal(result, expected, exact_indices=False,
check_dtype=False)
else:
tm.assert_sp_frame_equal(result, expected)
def read_pickles(self, version):
if not is_platform_little_endian():
raise nose.SkipTest("known failure on non-little endian")
pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version)))
n = 0
for f in os.listdir(pth):
vf = os.path.join(pth, f)
data = self.compare(vf, version)
if data is None:
continue
n += 1
assert n > 0, 'Pickle files are not tested'
def test_pickles(self):
pickle_path = tm.get_data_path('legacy_pickle')
n = 0
for v in os.listdir(pickle_path):
pth = os.path.join(pickle_path, v)
if os.path.isdir(pth):
yield self.read_pickles, v
n += 1
assert n > 0, 'Pickle files are not tested'
def test_round_trip_current(self):
try:
import cPickle as c_pickle
def c_pickler(obj, path):
with open(path, 'wb') as fh:
c_pickle.dump(obj, fh, protocol=-1)
def c_unpickler(path):
with open(path, 'rb') as fh:
fh.seek(0)
return c_pickle.load(fh)
except:
c_pickler = None
c_unpickler = None
import pickle as python_pickle
def python_pickler(obj, path):
with open(path, 'wb') as fh:
python_pickle.dump(obj, fh, protocol=-1)
def python_unpickler(path):
with open(path, 'rb') as fh:
fh.seek(0)
return python_pickle.load(fh)
for typ, dv in self.data.items():
for dt, expected in dv.items():
for writer in [pd.to_pickle, c_pickler, python_pickler]:
if writer is None:
continue
with tm.ensure_clean(self.path) as path:
# test writing with each pickler
writer(expected, path)
# test reading with each unpickler
result = pd.read_pickle(path)
self.compare_element(result, expected, typ)
if c_unpickler is not None:
result = c_unpickler(path)
self.compare_element(result, expected, typ)
result = python_unpickler(path)
self.compare_element(result, expected, typ)
def test_pickle_v0_14_1(self):
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_14_1.pickle')
# This code was executed once on v0.14.1 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_pickle_v0_15_2(self):
# ordered -> _ordered
# GH 9347
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| mit | -1,783,715,224,599,658,500 | 36.219931 | 78 | 0.549626 | false |
sbalde/edxplatform | lms/djangoapps/oauth2_handler/tests.py | 57 | 9001 | # pylint: disable=missing-docstring
from django.core.cache import cache
from django.test.utils import override_settings
from lang_pref import LANGUAGE_KEY
from xmodule.modulestore.tests.factories import (check_mongo_calls, CourseFactory)
from student.models import anonymous_id_for_user
from student.models import UserProfile
from student.roles import (CourseInstructorRole, CourseStaffRole, GlobalStaff,
OrgInstructorRole, OrgStaffRole)
from student.tests.factories import UserFactory, UserProfileFactory
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
# Will also run default tests for IDTokens and UserInfo
from oauth2_provider.tests import IDTokenTestCase, UserInfoTestCase
class BaseTestMixin(ModuleStoreTestCase):
profile = None
def setUp(self):
super(BaseTestMixin, self).setUp()
self.course_key = CourseFactory.create(emit_signals=True).id
self.course_id = unicode(self.course_key)
self.user_factory = UserFactory
self.set_user(self.make_user())
def set_user(self, user):
super(BaseTestMixin, self).set_user(user)
self.profile = UserProfileFactory(user=self.user)
class IDTokenTest(BaseTestMixin, IDTokenTestCase):
def setUp(self):
super(IDTokenTest, self).setUp()
# CourseAccessHandler uses the application cache.
cache.clear()
def test_sub_claim(self):
scopes, claims = self.get_id_token_values('openid')
self.assertIn('openid', scopes)
sub = claims['sub']
expected_sub = anonymous_id_for_user(self.user, None)
self.assertEqual(sub, expected_sub)
def test_user_name_claim(self):
_scopes, claims = self.get_id_token_values('openid profile')
claim_name = claims['name']
user_profile = UserProfile.objects.get(user=self.user)
user_name = user_profile.name
self.assertEqual(claim_name, user_name)
@override_settings(LANGUAGE_CODE='en')
def test_user_without_locale_claim(self):
scopes, claims = self.get_id_token_values('openid profile')
self.assertIn('profile', scopes)
self.assertEqual(claims['locale'], 'en')
def test_user_with_locale_claim(self):
language = 'en'
set_user_preference(self.user, LANGUAGE_KEY, language)
scopes, claims = self.get_id_token_values('openid profile')
self.assertIn('profile', scopes)
locale = claims['locale']
self.assertEqual(language, locale)
def test_no_special_course_access(self):
with check_mongo_calls(0):
scopes, claims = self.get_id_token_values('openid course_instructor course_staff')
self.assertNotIn('course_staff', scopes)
self.assertNotIn('staff_courses', claims)
self.assertNotIn('course_instructor', scopes)
self.assertNotIn('instructor_courses', claims)
def test_course_staff_courses(self):
CourseStaffRole(self.course_key).add_users(self.user)
with check_mongo_calls(0):
scopes, claims = self.get_id_token_values('openid course_staff')
self.assertIn('course_staff', scopes)
self.assertNotIn('staff_courses', claims) # should not return courses in id_token
def test_course_instructor_courses(self):
with check_mongo_calls(0):
CourseInstructorRole(self.course_key).add_users(self.user)
scopes, claims = self.get_id_token_values('openid course_instructor')
self.assertIn('course_instructor', scopes)
self.assertNotIn('instructor_courses', claims) # should not return courses in id_token
def test_course_staff_courses_with_claims(self):
CourseStaffRole(self.course_key).add_users(self.user)
course_id = unicode(self.course_key)
nonexistent_course_id = 'some/other/course'
claims = {
'staff_courses': {
'values': [course_id, nonexistent_course_id],
'essential': True,
}
}
with check_mongo_calls(0):
scopes, claims = self.get_id_token_values(scope='openid course_staff', claims=claims)
self.assertIn('course_staff', scopes)
self.assertIn('staff_courses', claims)
self.assertEqual(len(claims['staff_courses']), 1)
self.assertIn(course_id, claims['staff_courses'])
self.assertNotIn(nonexistent_course_id, claims['staff_courses'])
def test_permissions_scope(self):
scopes, claims = self.get_id_token_values('openid profile permissions')
self.assertIn('permissions', scopes)
self.assertFalse(claims['administrator'])
self.user.is_staff = True
self.user.save()
_scopes, claims = self.get_id_token_values('openid profile permissions')
self.assertTrue(claims['administrator'])
class UserInfoTest(BaseTestMixin, UserInfoTestCase):
def setUp(self):
super(UserInfoTest, self).setUp()
# create another course in the DB that only global staff have access to
CourseFactory.create(emit_signals=True)
def token_for_scope(self, scope):
full_scope = 'openid %s' % scope
self.set_access_token_scope(full_scope)
token = self.access_token.token # pylint: disable=no-member
return full_scope, token
def get_with_scope(self, scope):
scope, token = self.token_for_scope(scope)
result, claims = self.get_userinfo(token, scope)
self.assertEqual(result.status_code, 200)
return claims
def get_with_claim_value(self, scope, claim, values):
_full_scope, token = self.token_for_scope(scope)
result, claims = self.get_userinfo(
token,
claims={claim: {'values': values}}
)
self.assertEqual(result.status_code, 200)
return claims
def _assert_role_using_scope(self, scope, claim, assert_one_course=True):
with check_mongo_calls(0):
claims = self.get_with_scope(scope)
self.assertEqual(len(claims), 2)
courses = claims[claim]
self.assertIn(self.course_id, courses)
if assert_one_course:
self.assertEqual(len(courses), 1)
def test_request_global_staff_courses_using_scope(self):
GlobalStaff().add_users(self.user)
self._assert_role_using_scope('course_staff', 'staff_courses', assert_one_course=False)
def test_request_org_staff_courses_using_scope(self):
OrgStaffRole(self.course_key.org).add_users(self.user)
self._assert_role_using_scope('course_staff', 'staff_courses')
def test_request_org_instructor_courses_using_scope(self):
OrgInstructorRole(self.course_key.org).add_users(self.user)
self._assert_role_using_scope('course_instructor', 'instructor_courses')
def test_request_staff_courses_using_scope(self):
CourseStaffRole(self.course_key).add_users(self.user)
self._assert_role_using_scope('course_staff', 'staff_courses')
def test_request_instructor_courses_using_scope(self):
CourseInstructorRole(self.course_key).add_users(self.user)
self._assert_role_using_scope('course_instructor', 'instructor_courses')
def _assert_role_using_claim(self, scope, claim):
values = [self.course_id, 'some_invalid_course']
with check_mongo_calls(0):
claims = self.get_with_claim_value(scope, claim, values)
self.assertEqual(len(claims), 2)
courses = claims[claim]
self.assertIn(self.course_id, courses)
self.assertEqual(len(courses), 1)
def test_request_global_staff_courses_with_claims(self):
GlobalStaff().add_users(self.user)
self._assert_role_using_claim('course_staff', 'staff_courses')
def test_request_org_staff_courses_with_claims(self):
OrgStaffRole(self.course_key.org).add_users(self.user)
self._assert_role_using_claim('course_staff', 'staff_courses')
def test_request_org_instructor_courses_with_claims(self):
OrgInstructorRole(self.course_key.org).add_users(self.user)
self._assert_role_using_claim('course_instructor', 'instructor_courses')
def test_request_staff_courses_with_claims(self):
CourseStaffRole(self.course_key).add_users(self.user)
self._assert_role_using_claim('course_staff', 'staff_courses')
def test_request_instructor_courses_with_claims(self):
CourseInstructorRole(self.course_key).add_users(self.user)
self._assert_role_using_claim('course_instructor', 'instructor_courses')
def test_permissions_scope(self):
claims = self.get_with_scope('permissions')
self.assertIn('administrator', claims)
self.assertFalse(claims['administrator'])
self.user.is_staff = True
self.user.save()
claims = self.get_with_scope('permissions')
self.assertTrue(claims['administrator'])
| agpl-3.0 | 5,138,256,475,205,075,000 | 37.465812 | 97 | 0.67037 | false |
eugene1g/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py | 121 | 7646 | # Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest2 as unittest
from optparse import make_option
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.multicommandtool import MultiCommandTool, Command, TryAgain
class TrivialCommand(Command):
name = "trivial"
show_in_main_help = True
help_text = "help text"
def __init__(self, **kwargs):
Command.__init__(self, **kwargs)
def execute(self, options, args, tool):
pass
class UncommonCommand(TrivialCommand):
name = "uncommon"
show_in_main_help = False
class LikesToRetry(Command):
name = "likes-to-retry"
show_in_main_help = True
help_text = "help text"
def __init__(self, **kwargs):
Command.__init__(self, **kwargs)
self.execute_count = 0
def execute(self, options, args, tool):
self.execute_count += 1
if self.execute_count < 2:
raise TryAgain()
class CommandTest(unittest.TestCase):
def test_name_with_arguments(self):
TrivialCommand.argument_names = "ARG1 ARG2"
command_with_args = TrivialCommand()
self.assertEqual(command_with_args.name_with_arguments(), "trivial ARG1 ARG2")
TrivialCommand.argument_names = None
command_with_args = TrivialCommand(options=[make_option("--my_option")])
self.assertEqual(command_with_args.name_with_arguments(), "trivial [options]")
def test_parse_required_arguments(self):
self.assertEqual(Command._parse_required_arguments("ARG1 ARG2"), ["ARG1", "ARG2"])
self.assertEqual(Command._parse_required_arguments("[ARG1] [ARG2]"), [])
self.assertEqual(Command._parse_required_arguments("[ARG1] ARG2"), ["ARG2"])
# Note: We might make our arg parsing smarter in the future and allow this type of arguments string.
self.assertRaises(Exception, Command._parse_required_arguments, "[ARG1 ARG2]")
def test_required_arguments(self):
TrivialCommand.argument_names = "ARG1 ARG2 [ARG3]"
two_required_arguments = TrivialCommand()
expected_logs = "2 arguments required, 1 argument provided. Provided: 'foo' Required: ARG1 ARG2\nSee 'trivial-tool help trivial' for usage.\n"
exit_code = OutputCapture().assert_outputs(self, two_required_arguments.check_arguments_and_execute, [None, ["foo"], TrivialTool()], expected_logs=expected_logs)
self.assertEqual(exit_code, 1)
TrivialCommand.argument_names = None
class TrivialTool(MultiCommandTool):
def __init__(self, commands=None):
MultiCommandTool.__init__(self, name="trivial-tool", commands=commands)
def path(self):
return __file__
def should_execute_command(self, command):
return (True, None)
class MultiCommandToolTest(unittest.TestCase):
def _assert_split(self, args, expected_split):
self.assertEqual(MultiCommandTool._split_command_name_from_args(args), expected_split)
def test_split_args(self):
# MultiCommandToolTest._split_command_name_from_args returns: (command, args)
full_args = ["--global-option", "command", "--option", "arg"]
full_args_expected = ("command", ["--global-option", "--option", "arg"])
self._assert_split(full_args, full_args_expected)
full_args = []
full_args_expected = (None, [])
self._assert_split(full_args, full_args_expected)
full_args = ["command", "arg"]
full_args_expected = ("command", ["arg"])
self._assert_split(full_args, full_args_expected)
def test_command_by_name(self):
# This also tests Command auto-discovery.
tool = TrivialTool()
self.assertEqual(tool.command_by_name("trivial").name, "trivial")
self.assertEqual(tool.command_by_name("bar"), None)
def _assert_tool_main_outputs(self, tool, main_args, expected_stdout, expected_stderr = "", expected_exit_code=0):
exit_code = OutputCapture().assert_outputs(self, tool.main, [main_args], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
self.assertEqual(exit_code, expected_exit_code)
def test_retry(self):
likes_to_retry = LikesToRetry()
tool = TrivialTool(commands=[likes_to_retry])
tool.main(["tool", "likes-to-retry"])
self.assertEqual(likes_to_retry.execute_count, 2)
def test_global_help(self):
tool = TrivialTool(commands=[TrivialCommand(), UncommonCommand()])
expected_common_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
Options:
-h, --help show this help message and exit
Common trivial-tool commands:
trivial help text
See 'trivial-tool help --all-commands' to list all commands.
See 'trivial-tool help COMMAND' for more information on a specific command.
"""
self._assert_tool_main_outputs(tool, ["tool"], expected_common_commands_help)
self._assert_tool_main_outputs(tool, ["tool", "help"], expected_common_commands_help)
expected_all_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
Options:
-h, --help show this help message and exit
All trivial-tool commands:
help Display information about this program or its subcommands
trivial help text
uncommon help text
See 'trivial-tool help --all-commands' to list all commands.
See 'trivial-tool help COMMAND' for more information on a specific command.
"""
self._assert_tool_main_outputs(tool, ["tool", "help", "--all-commands"], expected_all_commands_help)
# Test that arguments can be passed before commands as well
self._assert_tool_main_outputs(tool, ["tool", "--all-commands", "help"], expected_all_commands_help)
def test_command_help(self):
TrivialCommand.long_help = "LONG HELP"
command_with_options = TrivialCommand(options=[make_option("--my_option")])
tool = TrivialTool(commands=[command_with_options])
expected_subcommand_help = "trivial [options] help text\n\nLONG HELP\n\nOptions:\n --my_option=MY_OPTION\n\n"
self._assert_tool_main_outputs(tool, ["tool", "help", "trivial"], expected_subcommand_help)
| bsd-3-clause | 2,118,831,475,188,653,800 | 41.477778 | 169 | 0.694481 | false |
lgarren/spack | var/spack/repos/builtin/packages/pathfinder/package.py | 3 | 2190 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Pathfinder(MakefilePackage):
"""Proxy Application. Signature search."""
homepage = "https://mantevo.org/packages/"
url = "http://mantevo.org/downloads/releaseTarballs/miniapps/PathFinder/PathFinder_1.0.0.tgz"
tags = ['proxy-app']
version('1.0.0', '374269e8d42c305eda3e392444e22dde')
build_targets = ['--directory=PathFinder_ref', 'CC=cc']
def edit(self, spec, prefix):
makefile = FileFilter('PathFinder_ref/Makefile')
makefile.filter('-fopenmp', self.compiler.openmp_flag)
def install(self, spec, prefix):
# Manual installation
mkdirp(prefix.bin)
mkdirp(prefix.doc)
install('PathFinder_ref/PathFinder.x', prefix.bin)
install('PathFinder_ref/MicroTestData.adj_list', prefix.bin)
install('README', prefix.doc)
install_tree('generatedData/', prefix.doc.generatedData)
install_tree('scaleData/', prefix.doc.scaleData)
| lgpl-2.1 | 3,253,582,773,949,579,300 | 38.818182 | 102 | 0.668037 | false |
blueboxgroup/cinder | cinder/volume/drivers/san/hp/hp_lefthand_iscsi.py | 3 | 5725 | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for HP LeftHand Storage array.
This driver requires 11.5 or greater firmware on the LeftHand array, using
the 1.0 or greater version of the hplefthandclient.
You will need to install the python hplefthandclient.
sudo pip install hplefthandclient
Set the following in the cinder.conf file to enable the
LeftHand Channel Driver along with the required flags:
volume_driver=cinder.volume.drivers.san.hp.hp_lefthand_iscsi.
HPLeftHandISCSIDriver
It also requires the setting of hplefthand_api_url, hplefthand_username,
hplefthand_password for credentials to talk to the REST service on the
LeftHand array.
"""
from cinder import exception
from cinder.i18n import _LE, _LI
from cinder.openstack.common import log as logging
from cinder.volume.driver import VolumeDriver
from cinder.volume.drivers.san.hp import hp_lefthand_cliq_proxy as cliq_proxy
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy as rest_proxy
LOG = logging.getLogger(__name__)
MIN_CLIENT_VERSION = '1.0.3'
class HPLeftHandISCSIDriver(VolumeDriver):
"""Executes commands relating to HP/LeftHand SAN ISCSI volumes.
Version history:
1.0.0 - Initial driver
1.0.1 - Added support for retype
1.0.2 - Added support for volume migrate
1.0.3 - Fix for no handler for logger during tests
1.0.4 - Removing locks bug #1395953
"""
VERSION = "1.0.4"
def __init__(self, *args, **kwargs):
super(HPLeftHandISCSIDriver, self).__init__(*args, **kwargs)
self.proxy = None
self.args = args
self.kwargs = kwargs
def _create_proxy(self, *args, **kwargs):
try:
proxy = rest_proxy.HPLeftHandRESTProxy(*args, **kwargs)
except exception.NotFound:
proxy = cliq_proxy.HPLeftHandCLIQProxy(*args, **kwargs)
return proxy
def check_for_setup_error(self):
self.proxy.check_for_setup_error()
def do_setup(self, context):
self.proxy = self._create_proxy(*self.args, **self.kwargs)
LOG.info(_LI("HPLeftHand driver %(driver_ver)s, "
"proxy %(proxy_ver)s") % {
"driver_ver": self.VERSION,
"proxy_ver": self.proxy.get_version_string()})
if isinstance(self.proxy, cliq_proxy.HPLeftHandCLIQProxy):
self.proxy.do_setup(context)
else:
# Check minimum client version for REST proxy
client_version = rest_proxy.hplefthandclient.version
if (client_version < MIN_CLIENT_VERSION):
ex_msg = (_LE("Invalid hplefthandclient version found ("
"%(found)s). Version %(minimum)s or greater "
"required.")
% {'found': client_version,
'minimum': MIN_CLIENT_VERSION})
LOG.error(ex_msg)
raise exception.InvalidInput(reason=ex_msg)
def create_volume(self, volume):
"""Creates a volume."""
return self.proxy.create_volume(volume)
def extend_volume(self, volume, new_size):
"""Extend the size of an existing volume."""
self.proxy.extend_volume(volume, new_size)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self.proxy.create_volume_from_snapshot(volume, snapshot)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.proxy.create_snapshot(snapshot)
def delete_volume(self, volume):
"""Deletes a volume."""
self.proxy.delete_volume(volume)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.proxy.delete_snapshot(snapshot)
def initialize_connection(self, volume, connector):
"""Assigns the volume to a server."""
return self.proxy.initialize_connection(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
"""Unassign the volume from the host."""
self.proxy.terminate_connection(volume, connector)
def get_volume_stats(self, refresh):
data = self.proxy.get_volume_stats(refresh)
data['driver_version'] = self.VERSION
return data
def create_cloned_volume(self, volume, src_vref):
return self.proxy.create_cloned_volume(volume, src_vref)
def create_export(self, context, volume):
return self.proxy.create_export(context, volume)
def ensure_export(self, context, volume):
return self.proxy.ensure_export(context, volume)
def remove_export(self, context, volume):
return self.proxy.remove_export(context, volume)
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
return self.proxy.retype(context, volume, new_type, diff, host)
def migrate_volume(self, ctxt, volume, host):
"""Migrate directly if source and dest are managed by same storage."""
return self.proxy.migrate_volume(ctxt, volume, host)
| apache-2.0 | -820,982,533,157,085,000 | 36.175325 | 78 | 0.659039 | false |
mahak/spark | examples/src/main/python/mllib/naive_bayes_example.py | 27 | 2246 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
NaiveBayes Example.
Usage:
`spark-submit --master local[4] examples/src/main/python/mllib/naive_bayes_example.py`
"""
import shutil
from pyspark import SparkContext
# $example on$
from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonNaiveBayesExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split data approximately into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4])
# Train a naive Bayes model.
model = NaiveBayes.train(training, 1.0)
# Make prediction and test accuracy.
predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
print('model accuracy {}'.format(accuracy))
# Save and load model
output_dir = 'target/tmp/myNaiveBayesModel'
shutil.rmtree(output_dir, ignore_errors=True)
model.save(sc, output_dir)
sameModel = NaiveBayesModel.load(sc, output_dir)
predictionAndLabel = test.map(lambda p: (sameModel.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
print('sameModel accuracy {}'.format(accuracy))
# $example off$
| apache-2.0 | 8,011,080,475,587,510,000 | 34.650794 | 96 | 0.720837 | false |
xHeliotrope/injustice_dropper | env/lib/python3.4/site-packages/setuptools/command/rotate.py | 461 | 2038 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import os
from setuptools import Command
from setuptools.compat import basestring
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, basestring):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name() + '*' + pattern
files = glob(os.path.join(self.dist_dir, pattern))
files = [(os.path.getmtime(f), f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t, f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
os.unlink(f)
| mit | -7,676,551,182,617,431,000 | 32.409836 | 78 | 0.571639 | false |
GitHublong/hue | desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/__init__.py | 205 | 1112 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| apache-2.0 | 1,199,796,690,065,275,000 | 49.545455 | 74 | 0.768885 | false |
crosick/zhishu | ENV/lib/python2.7/site-packages/pip/wheel.py | 187 | 30186 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import errno
import functools
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import sys
import tempfile
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
import pip
from pip.download import path_to_url, unpack_url
from pip.exceptions import InvalidWheelFilename, UnsupportedWheel
from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME
from pip import pep425tags
from pip.utils import (
call_subprocess, ensure_dir, make_path_relative, captured_stdout,
rmtree)
from pip.utils.logging import indent_log
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.six.moves import configparser
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelCache(object):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir, format_control):
"""Create a wheel cache.
:param cache_dir: The root of the cache.
:param format_control: A pip.index.FormatControl object to limit
binaries being read from the cache.
"""
self._cache_dir = os.path.expanduser(cache_dir) if cache_dir else None
self._format_control = format_control
def cached_wheel(self, link, package_name):
return cached_wheel(
self._cache_dir, link, self._format_control, package_name)
def _cache_for_link(cache_dir, link):
"""
Return a directory to store cached wheels in for link.
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were not
unique. E.g. ./package might have dozens of installs done for it and build
a version of 0.0...and if we built and cached a wheel, we'd end up using
the same wheel even if the source has been edited.
:param cache_dir: The cache_dir being used by pip.
:param link: The link of the sdist for which this will cache wheels.
"""
# We want to generate an url to use as our cache key, we don't want to just
# re-use the URL because it might have other items in the fragment and we
# don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and thus
# less secure). However the differences don't make a lot of difference for
# our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top level
# directories where we might run out of sub directories on some FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
# Inside of the base location for cached wheels, expand our parts and join
# them all together.
return os.path.join(cache_dir, "wheels", *parts)
def cached_wheel(cache_dir, link, format_control, package_name):
if not cache_dir:
return link
if not link:
return link
if link.is_wheel:
return link
if not link.is_artifact:
return link
if not package_name:
return link
canonical_name = pkg_resources.safe_name(package_name).lower()
formats = pip.index.fmt_ctl_formats(format_control, canonical_name)
if "binary" not in formats:
return link
root = _cache_for_link(cache_dir, link)
try:
wheel_names = os.listdir(root)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return link
raise
candidates = []
for wheel_name in wheel_names:
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
candidates.sort()
path = os.path.join(root, candidates[0][1])
return pip.index.Link(path_to_url(path), trusted=True)
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
block = f.read(blocksize)
while block:
length += len(block)
h.update(block)
block = f.read(blocksize)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = configparser.RawConfigParser()
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return make_path_relative(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
# is self.req.project_name case preserving?
s.lower().startswith(
req.project_name.replace('-', '_').lower())):
assert not info_dir, 'Multiple .dist-info directories'
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadat 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((f, h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self._cache_root = requirement_set._wheel_cache._cache_dir
self._wheel_dir = requirement_set.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req, output_dir):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
tempd = tempfile.mkdtemp('pip-wheel-')
try:
if self.__build_one(req, tempd):
try:
wheel_name = os.listdir(tempd)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(os.path.join(tempd, wheel_name), wheel_path)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
return None
return None
finally:
rmtree(tempd)
def __build_one(self, req, tempd):
base_args = [
sys.executable, '-c',
"import setuptools;__file__=%r;"
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), "
"__file__, 'exec'))" % req.setup_py
] + list(self.global_options)
logger.info('Running setup.py bdist_wheel for %s', req.name)
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
try:
call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed building wheel for %s', req.name)
return False
def build(self, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from the with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
assert self._wheel_dir or (autobuilding and self._cache_root)
# unpack sdists and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name)
elif req.editable:
if not autobuilding:
logger.info(
'Skipping bdist_wheel for %s, due to being editable',
req.name)
elif autobuilding and req.link and not req.link.is_artifact:
pass
elif autobuilding and not req.source_dir:
pass
else:
if autobuilding:
link = req.link
base, ext = link.splitext()
if pip.index.egg_info_matches(base, None, link) is None:
# Doesn't look like a package - don't autobuild a wheel
# because we'll have no way to lookup the result sanely
continue
if "binary" not in pip.index.fmt_ctl_formats(
self.finder.format_control,
pkg_resources.safe_name(req.name).lower()):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name)
continue
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
if autobuilding:
output_dir = _cache_for_link(self._cache_root, req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warn("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(req, output_dir)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.requirement_set.build_dir)
# Update the link for this.
req.link = pip.index.Link(
path_to_url(wheel_file), trusted=True)
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=self.requirement_set.session)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
| mit | 6,295,287,343,320,624,000 | 36.405204 | 79 | 0.574405 | false |
bowlofstew/Herd | herd/BitTornado/launchmanycore.py | 5 | 12499 | #!/usr/bin/env python
# Written by John Hoffman
# see LICENSE.txt for license information
from BitTornado import PSYCO
if PSYCO.psyco:
try:
import psyco
assert psyco.__version__ >= 0x010100f0
psyco.full()
except:
pass
from download_bt1 import BT1Download
from RawServer import RawServer, UPnP_ERROR
from RateLimiter import RateLimiter
from ServerPortHandler import MultiHandler
from parsedir import parsedir
from natpunch import UPnP_test
from random import seed
from socket import error as socketerror
from threading import Event
from sys import argv, exit
import sys, os
from clock import clock
from __init__ import createPeerID, mapbase64, version
from cStringIO import StringIO
from traceback import print_exc
try:
True
except:
True = 1
False = 0
def fmttime(n):
try:
n = int(n) # n may be None or too large
assert n < 5184000 # 60 days
except:
return 'downloading'
m, s = divmod(n, 60)
h, m = divmod(m, 60)
return '%d:%02d:%02d' % (h, m, s)
class SingleDownload:
def __init__(self, controller, hash, response, config, myid):
self.controller = controller
self.hash = hash
self.response = response
self.config = config
self.doneflag = Event()
self.waiting = True
self.checking = False
self.working = False
self.seed = False
self.closed = False
self.status_msg = ''
self.status_err = ['']
self.status_errtime = 0
self.status_done = 0.0
self.rawserver = controller.handler.newRawServer(hash, self.doneflag)
d = BT1Download(self.display, self.finished, self.error,
controller.exchandler, self.doneflag, config, response,
hash, myid, self.rawserver, controller.listen_port)
self.d = d
def start(self):
if not self.d.saveAs(self.saveAs):
self._shutdown()
return
self._hashcheckfunc = self.d.initFiles()
if not self._hashcheckfunc:
self._shutdown()
return
self.controller.hashchecksched(self.hash)
def saveAs(self, name, length, saveas, isdir):
return self.controller.saveAs(self.hash, name, saveas, isdir)
def hashcheck_start(self, donefunc):
if self.is_dead():
self._shutdown()
return
self.waiting = False
self.checking = True
self._hashcheckfunc(donefunc)
def hashcheck_callback(self):
self.checking = False
if self.is_dead():
self._shutdown()
return
if not self.d.startEngine(ratelimiter = self.controller.ratelimiter):
self._shutdown()
return
self.d.startRerequester()
self.statsfunc = self.d.startStats()
self.rawserver.start_listening(self.d.getPortHandler())
self.working = True
def is_dead(self):
return self.doneflag.isSet()
def _shutdown(self):
self.shutdown(False)
def shutdown(self, quiet=True):
if self.closed:
return
self.doneflag.set()
self.rawserver.shutdown()
if self.checking or self.working:
self.d.shutdown()
self.waiting = False
self.checking = False
self.working = False
self.closed = True
self.controller.was_stopped(self.hash)
if not quiet:
self.controller.died(self.hash)
def display(self, activity = None, fractionDone = None):
# really only used by StorageWrapper now
if activity:
self.status_msg = activity
if fractionDone is not None:
self.status_done = float(fractionDone)
def finished(self):
self.seed = True
def error(self, msg):
if self.doneflag.isSet():
self._shutdown()
self.status_err.append(msg)
self.status_errtime = clock()
class LaunchMany:
def __init__(self, config, Output):
try:
self.config = config
self.Output = Output
self.torrent_dir = config['torrent_dir']
self.torrent_cache = {}
self.file_cache = {}
self.blocked_files = {}
self.scan_period = config['parse_dir_interval']
self.stats_period = config['display_interval']
self.torrent_list = []
self.downloads = {}
self.counter = 0
self.doneflag = Event()
self.hashcheck_queue = []
self.hashcheck_current = None
self.rawserver = RawServer(self.doneflag, config['timeout_check_interval'],
config['timeout'], ipv6_enable = config['ipv6_enabled'],
failfunc = self.failed, errorfunc = self.exchandler)
upnp_type = UPnP_test(config['upnp_nat_access'])
while True:
try:
self.listen_port = self.rawserver.find_and_bind(
config['minport'], config['maxport'], config['bind'],
ipv6_socket_style = config['ipv6_binds_v4'],
upnp = upnp_type, randomizer = config['random_port'])
break
except socketerror, e:
if upnp_type and e == UPnP_ERROR:
self.Output.message('WARNING: COULD NOT FORWARD VIA UPnP')
upnp_type = 0
continue
self.failed("Couldn't listen - " + str(e))
return
self.ratelimiter = RateLimiter(self.rawserver.add_task,
config['upload_unit_size'])
self.ratelimiter.set_upload_rate(config['max_upload_rate'])
self.handler = MultiHandler(self.rawserver, self.doneflag)
seed(createPeerID())
self.rawserver.add_task(self.scan, 0)
self.rawserver.add_task(self.stats, 0)
self.handler.listen_forever()
self.Output.message('shutting down')
self.hashcheck_queue = []
for hash in self.torrent_list:
self.Output.message('dropped "'+self.torrent_cache[hash]['path']+'"')
self.downloads[hash].shutdown()
self.rawserver.shutdown()
except:
data = StringIO()
print_exc(file = data)
Output.exception(data.getvalue())
def scan(self):
self.rawserver.add_task(self.scan, self.scan_period)
r = parsedir(self.torrent_dir, self.torrent_cache,
self.file_cache, self.blocked_files,
return_metainfo = True, errfunc = self.Output.message)
( self.torrent_cache, self.file_cache, self.blocked_files,
added, removed ) = r
for hash, data in removed.items():
self.Output.message('dropped "'+data['path']+'"')
self.remove(hash)
for hash, data in added.items():
self.Output.message('added "'+data['path']+'"')
self.add(hash, data)
def stats(self):
self.rawserver.add_task(self.stats, self.stats_period)
data = []
for hash in self.torrent_list:
cache = self.torrent_cache[hash]
if self.config['display_path']:
name = cache['path']
else:
name = cache['name']
size = cache['length']
d = self.downloads[hash]
progress = '0.0%'
peers = 0
seeds = 0
seedsmsg = "S"
dist = 0.0
uprate = 0.0
dnrate = 0.0
upamt = 0
dnamt = 0
t = 0
if d.is_dead():
status = 'stopped'
elif d.waiting:
status = 'waiting for hash check'
elif d.checking:
status = d.status_msg
progress = '%.1f%%' % (d.status_done*100)
else:
stats = d.statsfunc()
s = stats['stats']
if d.seed:
status = 'seeding'
progress = '100.0%'
seeds = s.numOldSeeds
seedsmsg = "s"
dist = s.numCopies
else:
if s.numSeeds + s.numPeers:
t = stats['time']
if t == 0: # unlikely
t = 0.01
status = fmttime(t)
else:
t = -1
status = 'connecting to peers'
progress = '%.1f%%' % (int(stats['frac']*1000)/10.0)
seeds = s.numSeeds
dist = s.numCopies2
dnrate = stats['down']
peers = s.numPeers
uprate = stats['up']
upamt = s.upTotal
dnamt = s.downTotal
if d.is_dead() or d.status_errtime+300 > clock():
msg = d.status_err[-1]
else:
msg = ''
data.append(( name, status, progress, peers, seeds, seedsmsg, dist,
uprate, dnrate, upamt, dnamt, size, t, msg ))
stop = self.Output.display(data)
if stop:
self.doneflag.set()
def remove(self, hash):
self.torrent_list.remove(hash)
self.downloads[hash].shutdown()
del self.downloads[hash]
def add(self, hash, data):
c = self.counter
self.counter += 1
x = ''
for i in xrange(3):
x = mapbase64[c & 0x3F]+x
c >>= 6
peer_id = createPeerID(x)
d = SingleDownload(self, hash, data['metainfo'], self.config, peer_id)
self.torrent_list.append(hash)
self.downloads[hash] = d
d.start()
def saveAs(self, hash, name, saveas, isdir):
x = self.torrent_cache[hash]
style = self.config['saveas_style']
if style == 1 or style == 3:
if saveas:
saveas = os.path.join(saveas,x['file'][:-1-len(x['type'])])
else:
saveas = x['path'][:-1-len(x['type'])]
if style == 3:
if not os.path.isdir(saveas):
try:
os.mkdir(saveas)
except:
raise OSError("couldn't create directory for "+x['path']
+" ("+saveas+")")
if not isdir:
saveas = os.path.join(saveas, name)
else:
if saveas:
saveas = os.path.join(saveas, name)
else:
saveas = os.path.join(os.path.split(x['path'])[0], name)
if isdir and not os.path.isdir(saveas):
try:
os.mkdir(saveas)
except:
raise OSError("couldn't create directory for "+x['path']
+" ("+saveas+")")
return saveas
def hashchecksched(self, hash = None):
if hash:
self.hashcheck_queue.append(hash)
if not self.hashcheck_current:
self._hashcheck_start()
def _hashcheck_start(self):
self.hashcheck_current = self.hashcheck_queue.pop(0)
self.downloads[self.hashcheck_current].hashcheck_start(self.hashcheck_callback)
def hashcheck_callback(self):
self.downloads[self.hashcheck_current].hashcheck_callback()
if self.hashcheck_queue:
self._hashcheck_start()
else:
self.hashcheck_current = None
def died(self, hash):
if self.torrent_cache.has_key(hash):
self.Output.message('DIED: "'+self.torrent_cache[hash]['path']+'"')
def was_stopped(self, hash):
try:
self.hashcheck_queue.remove(hash)
except:
pass
if self.hashcheck_current == hash:
self.hashcheck_current = None
if self.hashcheck_queue:
self._hashcheck_start()
def failed(self, s):
self.Output.message('FAILURE: '+s)
def exchandler(self, s):
self.Output.exception(s)
| mit | -6,484,234,279,455,621,000 | 31.805774 | 89 | 0.512121 | false |
benfinkelcbt/CPD200 | CPD200-Lab13-Python/pyasn1/type/tag.py | 162 | 4499 | # ASN.1 types tags
from operator import getitem
from pyasn1 import error
tagClassUniversal = 0x00
tagClassApplication = 0x40
tagClassContext = 0x80
tagClassPrivate = 0xC0
tagFormatSimple = 0x00
tagFormatConstructed = 0x20
tagCategoryImplicit = 0x01
tagCategoryExplicit = 0x02
tagCategoryUntagged = 0x04
class Tag:
def __init__(self, tagClass, tagFormat, tagId):
if tagId < 0:
raise error.PyAsn1Error(
'Negative tag ID (%s) not allowed' % (tagId,)
)
self.__tag = (tagClass, tagFormat, tagId)
self.uniq = (tagClass, tagId)
self.__hashedUniqTag = hash(self.uniq)
def __str__(self):
return '[%s:%s:%s]' % self.__tag
def __repr__(self):
return '%s(tagClass=%s, tagFormat=%s, tagId=%s)' % (
(self.__class__.__name__,) + self.__tag
)
# These is really a hotspot -- expose public "uniq" attribute to save on
# function calls
def __eq__(self, other): return self.uniq == other.uniq
def __ne__(self, other): return self.uniq != other.uniq
def __lt__(self, other): return self.uniq < other.uniq
def __le__(self, other): return self.uniq <= other.uniq
def __gt__(self, other): return self.uniq > other.uniq
def __ge__(self, other): return self.uniq >= other.uniq
def __hash__(self): return self.__hashedUniqTag
def __getitem__(self, idx): return self.__tag[idx]
def __and__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag&tagClass, self.__tag&tagFormat, self.__tag&tagId
)
def __or__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag[0]|tagClass,
self.__tag[1]|tagFormat,
self.__tag[2]|tagId
)
def asTuple(self): return self.__tag # __getitem__() is slow
class TagSet:
def __init__(self, baseTag=(), *superTags):
self.__baseTag = baseTag
self.__superTags = superTags
self.__hashedSuperTags = hash(superTags)
_uniq = ()
for t in superTags:
_uniq = _uniq + t.uniq
self.uniq = _uniq
self.__lenOfSuperTags = len(superTags)
def __str__(self):
return self.__superTags and '+'.join([str(x) for x in self.__superTags]) or '[untagged]'
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
'(), ' + ', '.join([repr(x) for x in self.__superTags])
)
def __add__(self, superTag):
return self.__class__(
self.__baseTag, *self.__superTags + (superTag,)
)
def __radd__(self, superTag):
return self.__class__(
self.__baseTag, *(superTag,) + self.__superTags
)
def tagExplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if tagClass == tagClassUniversal:
raise error.PyAsn1Error(
'Can\'t tag with UNIVERSAL-class tag'
)
if tagFormat != tagFormatConstructed:
superTag = Tag(tagClass, tagFormatConstructed, tagId)
return self + superTag
def tagImplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if self.__superTags:
superTag = Tag(tagClass, self.__superTags[-1][1], tagId)
return self[:-1] + superTag
def getBaseTag(self): return self.__baseTag
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(
self.__baseTag, *getitem(self.__superTags, idx)
)
return self.__superTags[idx]
def __eq__(self, other): return self.uniq == other.uniq
def __ne__(self, other): return self.uniq != other.uniq
def __lt__(self, other): return self.uniq < other.uniq
def __le__(self, other): return self.uniq <= other.uniq
def __gt__(self, other): return self.uniq > other.uniq
def __ge__(self, other): return self.uniq >= other.uniq
def __hash__(self): return self.__hashedSuperTags
def __len__(self): return self.__lenOfSuperTags
def isSuperTagSetOf(self, tagSet):
if len(tagSet) < self.__lenOfSuperTags:
return
idx = self.__lenOfSuperTags - 1
while idx >= 0:
if self.__superTags[idx] != tagSet[idx]:
return
idx = idx - 1
return 1
def initTagSet(tag): return TagSet(tag, tag)
| gpl-3.0 | 967,501,181,214,894,500 | 34.148438 | 96 | 0.564792 | false |
diging/jars | cookies/operations.py | 1 | 16043 | from django.contrib.contenttypes.models import ContentType
from django.db.models import Q, QuerySet
from django.conf import settings
from cookies.models import *
from concepts.models import Concept
from cookies import authorization
import jsonpickle, datetime, copy, requests
from itertools import groupby, combinations
from collections import Counter
import networkx as nx
import os
from cookies.exceptions import *
logger = settings.LOGGER
def add_creation_metadata(resource, user):
"""
Convenience function for creating a provenance relation when a
:class:`.User` adds a :class:`.Resource`\.
Parameters
----------
resource : :class:`.Resource`
user : :class:`.User`
"""
__provenance__, _ = Field.objects.get_or_create(uri=settings.PROVENANCE)
_now = str(datetime.datetime.now())
_creation_message = u'Added by %s on %s' % (user.username, _now)
Relation.objects.create(**{
'source': resource,
'predicate': __provenance__,
'target': Value.objects.create(**{
'_value': jsonpickle.encode(_creation_message),
'container': resource.container,
}),
'container': resource.container,
})
def _transfer_all_relations(from_instance, to_instance, content_type):
"""
Transfers relations from one model instance to another.
Parameters
----------
from_instance : object
An instance of any model, usually a :class:`.Resource` or
:class:`.ConceptEntity`\.
to_instance :
content_type : :class:`.ContentType`
:class:`.ContentType` for the model of the instance that will inherit
relations.
"""
from_instance.relations_from.update(source_type=content_type,
source_instance_id=to_instance.id)
from_instance.relations_to.update(target_type=content_type,
target_instance_id=to_instance.id)
def prune_relations(resource, user=None):
"""
Search for and aggressively remove duplicate relations for a
:class:`.Resource`\.
Use at your own peril.
Parameters
----------
resource : :class:`.Resource`
user : :class:`.User`
If provided, data manipulation will be limited to by the authorizations
attached to a specific user. Default is ``None`` (superuser auths).
"""
value_type = ContentType.objects.get_for_model(Value)
def _search_and_destroy(relations):
def _delete_dupes(objs): # objs is an iterator of values() dicts.
for obj in objs[1:]: # Leave the first object.
Relation.objects.get(pk=obj[-1]).delete()
# We're looking for relations with the same predicate, whose
# complementary object is of the same type and is either identical or
# (if a Value) has the same value/content.
for pred, pr_relations in groupby(relations, lambda o: o[0]):
for ctype, ct_relations in groupby(pr_relations, lambda o: o[1]):
# We need to use this iterator twice, so we consume it now, and
# keep it around as a list.
ct_r = list(ct_relations)
for iid, id_relations in groupby(ct_relations, lambda o: o[2]):
_delete_dupes(list(id_relations)) # Target is the same.
if ctype != value_type.id: # Only applies to Value instances.
continue
values = Value.objects.filter(pk__in=zip(*ct_r)[2]) \
.order_by('id').values('id', '_value')
key = lambda *o: o[0][1]['_value']
for _, vl_r in groupby(sorted(zip(ct_r, values), key=key), key):
_delete_dupes(zip(*list(vl_r))[0])
fields = ['predicate_id', 'target_type', 'target_instance_id', 'id']
relations_from = resource.relations_from.all()
if user and type(resource) is Resource:
relations_from = authorization.apply_filter(ResourceAuthorization.EDIT, user, relations_from)
_search_and_destroy(relations_from.order_by(*fields).values_list(*fields))
fields = ['predicate_id', 'source_type', 'source_instance_id', 'id']
relations_to = resource.relations_to.all()
if user and type(resource) is Resource:
relations_to = authorization.apply_filter(ResourceAuthorization.EDIT, user, relations_to)
_search_and_destroy(relations_to.order_by(*fields).values_list(*fields))
def merge_conceptentities(entities, master_id=None, delete=True, user=None):
"""
Merge :class:`.ConceptEntity` instances in the QuerySet ``entities``.
As of 0.4, no :class:`.ConceptEntity` instances are deleted. Instead, they
are added to an :class:`.Identity` instance. ``master`` will become the
:prop:`.Identity.representative`\.
Parameters
----------
entities : QuerySet
master_id : int
(optional) The primary key of the :class:`.ConceptEntity` to use as the
"master" instance into which the remaining instances will be merged.
Returns
-------
master : :class:`.ConceptEntity`
Raises
------
RuntimeError
If less than two :class:`.ConceptEntity` instances are present in
``entities``, or if more than one unique :class:`.Concept` is
implicated.
"""
conceptentity_type = ContentType.objects.get_for_model(ConceptEntity)
if isinstance(entities, QuerySet):
_len = lambda qs: qs.count()
_uri = lambda qs: qs.values_list('concept__uri', flat=True)
_get_master = lambda qs, pk: qs.get(pk=pk)
_get_rep = lambda qs: qs.filter(represents__isnull=False).first()
_first = lambda qs: qs.first()
elif isinstance(entities, list):
_len = lambda qs: len(qs)
_uri = lambda qs: [concept.uri for obj in qs for concept in obj.concept.all()]#[getattr(o.concept, 'uri', None) for o in qs]
_get_master = lambda qs, pk: [e for e in entities if e.id == pk].pop()
_get_rep = lambda qs: [e for e in entities if e.represents.count() > 0].pop()
_first = lambda qs: qs[0]
if _len(entities) < 2:
raise RuntimeError("Need more than one ConceptEntity instance to merge")
# _concepts = list(set([v for v in _uri(entities) if v]))
# if len(_concepts) > 1:
# raise RuntimeError("Cannot merge two ConceptEntity instances with"
# " conflicting external concepts")
# _uri = _concepts[0] if _concepts else None
master = None
if master_id: # If a master is specified, use it...
try:
master = _get_master(entities, pk)
except:
pass
if not master:
# Prefer entities that are already representative.
try:
master = _get_rep(entities)
except:
pass
if not master:
try: # ...otherwise, try to use the first instance.
master = _first(entities)
except AssertionError: # If a slice has already been taken.
master = entities[0]
concepts = filter(lambda pk: pk is not None, entities.values_list('concept__id', flat=True))
if concepts:
master.concept.add(*Concept.objects.filter(pk__in=concepts))
master.save()
identity = Identity.objects.create(
created_by = user,
representative = master,
)
identity.entities.add(*entities)
map(lambda e: e.identities.update(representative=master), entities)
return master
def merge_resources(resources, master_id=None, delete=True, user=None):
"""
Merge selected resources to a single resource.
Parameters
-------------
resources : ``QuerySet``
The :class:`.Resource` instances that will be merged.
master_id : int
(optional) The primary key of the :class:`.Resource` to use as the
"master" instance into which the remaining instances will be merged.
Returns
-------
master : :class:`.Resource`
Raises
------
RuntimeError
If less than two :class:`.Resource` instances are present in
``resources``, or if :class:`.Resource` instances are not the
same with respect to content.
"""
resource_type = ContentType.objects.get_for_model(Resource)
if resources.count() < 2:
raise RuntimeError("Need more than one Resource instance to merge")
with_content = resources.filter(content_resource=True)
if with_content.count() != 0 and with_content.count() != resources.count():
raise RuntimeError("Cannot merge content and non-content resources")
if user is None:
user, _ = User.objects.get_or_create(username='AnonymousUser')
if master_id:
master = resources.get(pk=master_id)
else:
master = resources.first()
to_merge = resources.filter(~Q(pk=master.id))
for resource in to_merge:
_transfer_all_relations(resource, master, resource_type)
resource.content.all().update(for_resource=master)
for rel in ['resource_set', 'conceptentity_set', 'relation_set', 'content_relations', 'value_set']:
getattr(resource.container, rel).update(container_id=master.container.id)
# for collection in resource.part_of.all():
# master.part_of.add(collection)
prune_relations(master, user)
master.save()
if delete:
to_merge.delete()
return master
def add_resources_to_collection(resources, collection):
"""
Adds selected resources to a collection.
Number of resources should be greater than or equal to 1.
And one collection has to be selected
Returns the collection after making changes.
Parameters
-------------
resources : ``QuerySet``
The :class:`.Resource` instances that will be added to ``collection``.
collection : :class:`.Collection`
The :class:`.Collection` instance to which ``resources`` will be added.
Returns
---------
collection : :class:`.Collection`
Updated :class:`.Collection` instance.
Raises
------
RuntimeError
If less than one :class:`.Resource` instance is in queryset
or if collection is not a :class:`.ConceptEntity` instance
"""
if resources.count() < 1 :
raise RuntimeError("Need at least one resource to add to collection.")
if not isinstance(collection, Collection):
raise RuntimeError("Invalid collection to add resources to.")
collection.resources.add(*map(lambda r: r.container, resources))
collection.save()
return collection
def isolate_conceptentity(instance):
"""
Clone ``instance`` (and its relations) such that there is a separate
:class:`.ConceptEntity` instance for each related :class:`.Resource`\.
Prior to 0.3, merging involved actually combining records (and deleting all
but one). As of 0.4, merging does not result in deletion or combination,
but rather the reation of a :class:`.Identity`\.
Parameters
----------
instance : :class:`.ConceptEntity`
"""
if instance.relations_to.count() <= 1:
return
entities = []
for relation in instance.relations_to.all():
clone = copy.copy(instance)
clone.pk = None
clone.save()
relation.target = clone
relation.save()
for alt_relation in instance.relations_from.all():
alt_relation_target = alt_relation.target
cloned_relation_target = copy.copy(alt_relation_target)
cloned_relation_target.pk = None
cloned_relation_target.save()
cloned_relation = copy.copy(alt_relation)
cloned_relation.pk = None
cloned_relation.save()
cloned_relation.source = clone
cloned_relation.target = cloned_relation_target
cloned_relation.save()
entities.append(clone)
merge_conceptentities(entities, user=instance.created_by)
def generate_collection_coauthor_graph(collection,
author_predicate_uri="http://purl.org/net/biblio#authors"):
"""
Create a graph describing co-occurrences of :class:`.ConceptEntity`
instances linked to individual :class:`.Resource` instances via an
authorship :class:`.Relation` instance.
Parameters
----------
collection : :class:`.Collection`
author_predicate_uri : str
Defaults to the Biblio #authors predicate. This is the predicate that
will be used to identify author :class:`.Relation` instances.
Returns
-------
:class:`networkx.Graph`
Nodes will be :class:`.ConceptEntity` PK ids (int), edges will indicate
co-authorship; each edge should have a ``weight`` attribute indicating
the number of :class:`.Resource` instances on which the pair of CEs are
co-located.
"""
# This is a check to see if the collection parameter is an instance of the
# :class:`.Collection`. If it is not a RuntimeError exception is raised.
if not isinstance(collection, Collection):
raise RuntimeError("Invalid collection to export co-author data from")
resource_type_id = ContentType.objects.get_for_model(Resource).id
# This will hold node attributes for all ConceptEntity instances across the
# entire collection.
node_labels = {}
node_uris = {}
# Since a particular pair of ConceptEntity instances may co-occur on more
# than one Resource in this Collection, we compile the number of
# co-occurrences prior to building the networkx Graph object.
edges = Counter()
# The co-occurrence graph will be comprised of ConceptEntity instances
# (identified by their PK ids. An edge between two nodes indicates that
# the two constituent CEs occur together on the same Resource (with an
# author Relation). A ``weight`` attribute on each edge will record the
# number of Resource instances on which each respective pair of CEs
# co-occur.
for resource_id in collection.resourcecontainer_set.values_list('primary__id', flat=True):
# We only need a few columns from the ConceptEntity table, from rows
# referenced by responding Relations.
author_relations = Relation.objects\
.filter(source_type_id=resource_type_id,
source_instance_id=resource_id,
predicate__uri=author_predicate_uri)\
.prefetch_related('target')
# If there are no author relations, there are no nodes to be created for
# the resource.
if author_relations.count() <= 1:
continue
ids, labels, uris = zip(*list(set(((r.target.id, r.target.name, r.target.uri) for r in author_relations))))
# It doesn't matter if we overwrite node attribute values, since they
# won't vary.
node_labels.update(dict(zip(ids, labels)))
node_uris.update(dict(zip(ids, uris)))
# The keys here are ConceptEntity PK ids, which will be the primary
# identifiers used in the graph.
for edge in combinations(ids, 2):
edges[edge] += 1
# Instantiate the Graph from the edge data generated above.
graph = nx.Graph()
for (u, v), weight in edges.iteritems():
graph.add_edge(u, v, weight=weight)
# This is more efficient than setting the node attribute as we go along.
# If there is only one author, there is no need to set node attributes as
# there is no co-authorship for that Collection.
if len(node_labels.keys()) > 1:
nx.set_node_attributes(graph, 'label', node_labels)
nx.set_node_attributes(graph, 'uri', node_uris)
return graph
def ping_remote_resource(path):
"""
Check whether a remote resource is accessible.
"""
try:
response = requests.head(path)
except requests.exceptions.ConnectTimeout:
return False, {}
return response.status_code == requests.codes.ok, response.headers
| gpl-3.0 | -5,096,327,721,887,805,000 | 34.651111 | 132 | 0.634545 | false |
stefano-meschiari/SMESCHIA | .emacs.d/elpa/elpy-20140810.7/elpy/tests/test_pydocutils.py | 6 | 3370 | import os
import unittest
import shutil
import sys
import tempfile
import mock
import elpy.pydocutils
class TestGetPydocCompletions(unittest.TestCase):
def test_should_return_top_level_modules(self):
modules = elpy.pydocutils.get_pydoc_completions("")
self.assertIn('sys', modules)
self.assertIn('json', modules)
self.assertIn('elpy', modules)
def test_should_return_submodules(self):
modules = elpy.pydocutils.get_pydoc_completions("elpy")
self.assertIn("elpy.rpc", modules)
self.assertIn("elpy.server", modules)
modules = elpy.pydocutils.get_pydoc_completions("os")
self.assertIn("os.path", modules)
def test_should_find_objects_in_module(self):
self.assertIn("elpy.tests.test_pydocutils.TestGetPydocCompletions",
elpy.pydocutils.get_pydoc_completions
("elpy.tests.test_pydocutils"))
def test_should_find_attributes_of_objects(self):
attribs = elpy.pydocutils.get_pydoc_completions(
"elpy.tests.test_pydocutils.TestGetPydocCompletions")
self.assertIn("elpy.tests.test_pydocutils.TestGetPydocCompletions."
"test_should_find_attributes_of_objects",
attribs)
def test_should_return_none_for_inexisting_module(self):
self.assertEqual([],
elpy.pydocutils.get_pydoc_completions
("does_not_exist"))
def test_should_work_for_unicode_strings(self):
self.assertIsNotNone(elpy.pydocutils.get_pydoc_completions
(u"sys"))
def test_should_find_partial_completions(self):
self.assertIn("multiprocessing",
elpy.pydocutils.get_pydoc_completions
("multiprocess"))
self.assertIn("multiprocessing.util",
elpy.pydocutils.get_pydoc_completions
("multiprocessing.ut"))
def test_should_ignore_trailing_dot(self):
self.assertIn("elpy.pydocutils",
elpy.pydocutils.get_pydoc_completions
("elpy."))
class TestGetModules(unittest.TestCase):
def test_should_return_top_level_modules(self):
modules = elpy.pydocutils.get_modules()
self.assertIn('sys', modules)
self.assertIn('json', modules)
self.assertIn('elpy', modules)
def test_should_return_submodules(self):
modules = elpy.pydocutils.get_modules("elpy")
self.assertIn("rpc", modules)
self.assertIn("server", modules)
@mock.patch.object(elpy.pydocutils, 'safeimport')
def test_should_catch_import_errors(self, safeimport):
def raise_function(message):
raise elpy.pydocutils.ErrorDuringImport(message,
(None, None, None))
safeimport.side_effect = raise_function
self.assertEqual([], elpy.pydocutils.get_modules("foo.bar"))
def test_should_not_fail_for_permission_denied(self):
tmpdir = tempfile.mkdtemp(prefix="test-elpy-get-modules-")
sys.path.append(tmpdir)
os.chmod(tmpdir, 0o000)
try:
elpy.pydocutils.get_modules()
finally:
os.chmod(tmpdir, 0o755)
shutil.rmtree(tmpdir)
sys.path.remove(tmpdir)
| mit | -7,241,288,110,066,562,000 | 36.444444 | 75 | 0.616914 | false |
ToontownUprising/src | toontown/toonbase/ContentPacksManager.py | 3 | 4454 | from direct.directnotify.DirectNotifyGlobal import directNotify
import fnmatch
import os
from panda3d.core import Multifile, Filename, VirtualFileSystem
import yaml
APPLICABLE_FILE_PATTERNS = ('*.mf', 'ambience.yaml')
CONTENT_EXT_WHITELIST = ('.jpg', '.jpeg', '.rgb', '.png', '.ogg', '.ttf')
class ContentPackError(Exception):
pass
class ContentPacksManager:
notify = directNotify.newCategory('ContentPacksManager')
notify.setInfo(True)
def __init__(self, filepath='contentpacks/', sortFilename='sort.yaml'):
self.filepath = filepath
self.sortFilename = os.path.join(self.filepath, sortFilename)
if __debug__:
self.mountPoint = '../resources'
else:
self.mountPoint = '/'
self.vfs = VirtualFileSystem.getGlobalPtr()
self.sort = []
self.ambience = {}
def isApplicable(self, filename):
"""
Returns whether or not the specified file is applicable.
"""
# Does this file exist?
if not os.path.exists(os.path.join(self.filepath, filename)):
return False
# Does this file match one of the applicable file patterns?
basename = os.path.basename(filename)
for pattern in APPLICABLE_FILE_PATTERNS:
if fnmatch.fnmatch(basename, pattern):
return True
return False
def applyMultifile(self, filename):
"""
Apply the specified multifile.
"""
mf = Multifile()
mf.openReadWrite(Filename(os.path.join(self.filepath, filename)))
# Discard content with non-whitelisted extensions:
for subfileName in mf.getSubfileNames():
ext = os.path.splitext(subfileName)[1]
if ext not in CONTENT_EXT_WHITELIST:
mf.removeSubfile(subfileName)
self.vfs.mount(mf, self.mountPoint, 0)
def applyAmbience(self, filename):
"""
Apply the specified ambience configuration file.
"""
with open(os.path.join(self.filepath, filename), 'r') as f:
self.ambience.update(yaml.load(f) or {})
def apply(self, filename):
"""
Apply the specified content pack file.
"""
self.notify.info('Applying %s...' % filename)
basename = os.path.basename(filename)
if basename.endswith('.mf'):
self.applyMultifile(filename)
elif basename == 'ambience.yaml':
self.applyAmbience(filename)
def applyAll(self):
"""
Using the sort configuration, recursively apply all applicable content
pack files under the configured content packs directory.
"""
# First, read the sort configuration:
self.readSortConfig()
# Next, apply the sorted files:
for filename in self.sort[:]:
if self.isApplicable(filename):
self.apply(filename)
else:
self.notify.warning('Invalidating %s...' % filename)
self.sort.remove(filename)
# Apply the non-sorted files:
for root, _, filenames in os.walk(self.filepath):
root = root[len(self.filepath):]
for filename in filenames:
filename = os.path.join(root, filename).replace('\\', '/')
# Ensure this file isn't sorted:
if filename in self.sort:
continue
# Ensure this file is applicable:
if not self.isApplicable(filename):
continue
# Apply this file, and add it to the sort configuration:
self.apply(filename)
self.sort.append(filename)
# Finally, write the new sort configuration:
self.writeSortConfig()
def readSortConfig(self):
"""
Read the sort configuration.
"""
if not os.path.exists(self.sortFilename):
return
with open(self.sortFilename, 'r') as f:
self.sort = yaml.load(f) or []
def writeSortConfig(self):
"""
Write the sort configuration to disk.
"""
with open(self.sortFilename, 'w') as f:
for filename in self.sort:
f.write('- %s\n' % filename)
def getAmbience(self, group):
"""
Returns the ambience configurations for the specified group.
"""
return self.ambience.get(group, {})
| mit | -3,879,206,102,597,707,000 | 30.366197 | 78 | 0.583071 | false |
nesdis/djongo | tests/django_tests/tests/v22/tests/db_functions/text/test_trim.py | 71 | 1357 | from django.db.models import CharField
from django.db.models.functions import LTrim, RTrim, Trim
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import Author
class TrimTests(TestCase):
def test_trim(self):
Author.objects.create(name=' John ', alias='j')
Author.objects.create(name='Rhonda', alias='r')
authors = Author.objects.annotate(
ltrim=LTrim('name'),
rtrim=RTrim('name'),
trim=Trim('name'),
)
self.assertQuerysetEqual(
authors.order_by('alias'), [
('John ', ' John', 'John'),
('Rhonda', 'Rhonda', 'Rhonda'),
],
lambda a: (a.ltrim, a.rtrim, a.trim)
)
def test_trim_transform(self):
Author.objects.create(name=' John ')
Author.objects.create(name='Rhonda')
tests = (
(LTrim, 'John '),
(RTrim, ' John'),
(Trim, 'John'),
)
for transform, trimmed_name in tests:
with self.subTest(transform=transform):
with register_lookup(CharField, transform):
authors = Author.objects.filter(**{'name__%s' % transform.lookup_name: trimmed_name})
self.assertQuerysetEqual(authors, [' John '], lambda a: a.name)
| agpl-3.0 | -2,442,543,236,167,448,000 | 34.710526 | 105 | 0.556374 | false |
listamilton/supermilton.repository | script.module.youtube.dl/lib/youtube_dl/postprocessor/ffmpeg.py | 13 | 22506 | from __future__ import unicode_literals
import io
import os
import subprocess
import time
from .common import AudioConversionError, PostProcessor
from ..compat import (
compat_subprocess_get_DEVNULL,
)
from ..utils import (
encodeArgument,
encodeFilename,
get_exe_version,
is_outdated_version,
PostProcessingError,
prepend_extension,
shell_quote,
subtitles_filename,
dfxp2srt,
ISO639Utils,
)
EXT_TO_OUT_FORMATS = {
"aac": "adts",
"m4a": "ipod",
"mka": "matroska",
"mkv": "matroska",
"mpg": "mpeg",
"ogv": "ogg",
"ts": "mpegts",
"wma": "asf",
"wmv": "asf",
}
class FFmpegPostProcessorError(PostProcessingError):
pass
class FFmpegPostProcessor(PostProcessor):
def __init__(self, downloader=None):
PostProcessor.__init__(self, downloader)
self._determine_executables()
def check_version(self):
if not self.available:
raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.')
required_version = '10-0' if self.basename == 'avconv' else '1.0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
self.basename, self.basename, required_version)
if self._downloader:
self._downloader.report_warning(warning)
@staticmethod
def get_versions(downloader=None):
return FFmpegPostProcessor(downloader)._versions
def _determine_executables(self):
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
prefer_ffmpeg = False
self.basename = None
self.probe_basename = None
self._paths = None
self._versions = None
if self._downloader:
prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False)
location = self._downloader.params.get('ffmpeg_location')
if location is not None:
if not os.path.exists(location):
self._downloader.report_warning(
'ffmpeg-location %s does not exist! '
'Continuing without avconv/ffmpeg.' % (location))
self._versions = {}
return
elif not os.path.isdir(location):
basename = os.path.splitext(os.path.basename(location))[0]
if basename not in programs:
self._downloader.report_warning(
'Cannot identify executable %s, its basename should be one of %s. '
'Continuing without avconv/ffmpeg.' %
(location, ', '.join(programs)))
self._versions = {}
return None
location = os.path.dirname(os.path.abspath(location))
if basename in ('ffmpeg', 'ffprobe'):
prefer_ffmpeg = True
self._paths = dict(
(p, os.path.join(location, p)) for p in programs)
self._versions = dict(
(p, get_exe_version(self._paths[p], args=['-version']))
for p in programs)
if self._versions is None:
self._versions = dict(
(p, get_exe_version(p, args=['-version'])) for p in programs)
self._paths = dict((p, p) for p in programs)
if prefer_ffmpeg:
prefs = ('ffmpeg', 'avconv')
else:
prefs = ('avconv', 'ffmpeg')
for p in prefs:
if self._versions[p]:
self.basename = p
break
if prefer_ffmpeg:
prefs = ('ffprobe', 'avprobe')
else:
prefs = ('avprobe', 'ffprobe')
for p in prefs:
if self._versions[p]:
self.probe_basename = p
break
@property
def available(self):
return self.basename is not None
@property
def executable(self):
return self._paths[self.basename]
@property
def probe_available(self):
return self.probe_basename is not None
@property
def probe_executable(self):
return self._paths[self.probe_basename]
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
self.check_version()
oldest_mtime = min(
os.stat(encodeFilename(path)).st_mtime for path in input_paths)
opts += self._configuration_args()
files_cmd = []
for path in input_paths:
files_cmd.extend([
encodeArgument('-i'),
encodeFilename(self._ffmpeg_filename_argument(path), True)
])
cmd = ([encodeFilename(self.executable, True), encodeArgument('-y')] +
files_cmd +
[encodeArgument(o) for o in opts] +
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode('utf-8', 'replace')
msg = stderr.strip().split('\n')[-1]
raise FFmpegPostProcessorError(msg)
self.try_utime(out_path, oldest_mtime, oldest_mtime)
def run_ffmpeg(self, path, out_path, opts):
self.run_ffmpeg_multiple_files([path], out_path, opts)
def _ffmpeg_filename_argument(self, fn):
# Always use 'file:' because the filename may contain ':' (ffmpeg
# interprets that as a protocol) or can start with '-' (-- is broken in
# ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details)
# Also leave '-' intact in order not to break streaming to stdout.
return 'file:' + fn if fn != '-' else fn
class FFmpegExtractAudioPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
FFmpegPostProcessor.__init__(self, downloader)
if preferredcodec is None:
preferredcodec = 'best'
self._preferredcodec = preferredcodec
self._preferredquality = preferredquality
self._nopostoverwrites = nopostoverwrites
def get_audio_codec(self, path):
if not self.probe_available:
raise PostProcessingError('ffprobe or avprobe not found. Please install one.')
try:
cmd = [
encodeFilename(self.probe_executable, True),
encodeArgument('-show_streams'),
encodeFilename(self._ffmpeg_filename_argument(path), True)]
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] %s command line: %s' % (self.basename, shell_quote(cmd)))
handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE, stdin=subprocess.PIPE)
output = handle.communicate()[0]
if handle.wait() != 0:
return None
except (IOError, OSError):
return None
audio_codec = None
for line in output.decode('ascii', 'ignore').split('\n'):
if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec
return None
def run_ffmpeg(self, path, out_path, codec, more_opts):
if codec is None:
acodec_opts = []
else:
acodec_opts = ['-acodec', codec]
opts = ['-vn'] + acodec_opts + more_opts
try:
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
except FFmpegPostProcessorError as err:
raise AudioConversionError(err.msg)
def run(self, information):
path = information['filepath']
filecodec = self.get_audio_codec(path)
if filecodec is None:
raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
# Lossless, but in another container
acodec = 'copy'
extension = 'm4a'
more_opts = ['-bsf:a', 'aac_adtstoasc']
elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
# Lossless if possible
acodec = 'copy'
extension = filecodec
if filecodec == 'aac':
more_opts = ['-f', 'adts']
if filecodec == 'vorbis':
extension = 'ogg'
else:
# MP3 otherwise.
acodec = 'libmp3lame'
extension = 'mp3'
more_opts = []
if self._preferredquality is not None:
if int(self._preferredquality) < 10:
more_opts += ['-q:a', self._preferredquality]
else:
more_opts += ['-b:a', self._preferredquality + 'k']
else:
# We convert the audio (lossy)
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
extension = self._preferredcodec
more_opts = []
if self._preferredquality is not None:
# The opus codec doesn't support the -aq option
if int(self._preferredquality) < 10 and extension != 'opus':
more_opts += ['-q:a', self._preferredquality]
else:
more_opts += ['-b:a', self._preferredquality + 'k']
if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts']
if self._preferredcodec == 'm4a':
more_opts += ['-bsf:a', 'aac_adtstoasc']
if self._preferredcodec == 'vorbis':
extension = 'ogg'
if self._preferredcodec == 'wav':
extension = 'wav'
more_opts += ['-f', 'wav']
prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups
new_path = prefix + sep + extension
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
if (new_path == path or
(self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))):
self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path)
return [], information
try:
self._downloader.to_screen('[ffmpeg] Destination: ' + new_path)
self.run_ffmpeg(path, new_path, acodec, more_opts)
except AudioConversionError as e:
raise PostProcessingError(
'audio conversion failed: ' + e.msg)
except Exception:
raise PostProcessingError('error running ' + self.basename)
# Try to update the date time for extracted audio file.
if information.get('filetime') is not None:
self.try_utime(
new_path, time.time(), information['filetime'],
errnote='Cannot update utime of audio file')
information['filepath'] = new_path
information['ext'] = extension
return [path], information
class FFmpegVideoConvertorPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferedformat=None):
super(FFmpegVideoConvertorPP, self).__init__(downloader)
self._preferedformat = preferedformat
def run(self, information):
path = information['filepath']
if information['ext'] == self._preferedformat:
self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
return [], information
options = []
if self._preferedformat == 'avi':
options.extend(['-c:v', 'libxvid', '-vtag', 'XVID'])
prefix, sep, ext = path.rpartition('.')
outpath = prefix + sep + self._preferedformat
self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
self.run_ffmpeg(path, outpath, options)
information['filepath'] = outpath
information['format'] = self._preferedformat
information['ext'] = self._preferedformat
return [path], information
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
def run(self, information):
if information['ext'] not in ('mp4', 'webm', 'mkv'):
self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4, webm or mkv files')
return [], information
subtitles = information.get('requested_subtitles')
if not subtitles:
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed')
return [], information
filename = information['filepath']
ext = information['ext']
sub_langs = []
sub_filenames = []
webm_vtt_warn = False
for lang, sub_info in subtitles.items():
sub_ext = sub_info['ext']
if ext != 'webm' or ext == 'webm' and sub_ext == 'vtt':
sub_langs.append(lang)
sub_filenames.append(subtitles_filename(filename, lang, sub_ext))
else:
if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt':
webm_vtt_warn = True
self._downloader.to_screen('[ffmpeg] Only WebVTT subtitles can be embedded in webm files')
if not sub_langs:
return [], information
input_files = [filename] + sub_filenames
opts = [
'-map', '0',
'-c', 'copy',
# Don't copy the existing subtitles, we may be running the
# postprocessor a second time
'-map', '-0:s',
]
if information['ext'] == 'mp4':
opts += ['-c:s', 'mov_text']
for (i, lang) in enumerate(sub_langs):
opts.extend(['-map', '%d:0' % (i + 1)])
lang_code = ISO639Utils.short2long(lang)
if lang_code is not None:
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
temp_filename = prepend_extension(filename, 'temp')
self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename)
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return sub_filenames, information
class FFmpegMetadataPP(FFmpegPostProcessor):
def run(self, info):
metadata = {}
def add(meta_list, info_list=None):
if not info_list:
info_list = meta_list
if not isinstance(meta_list, (list, tuple)):
meta_list = (meta_list,)
if not isinstance(info_list, (list, tuple)):
info_list = (info_list,)
for info_f in info_list:
if info.get(info_f) is not None:
for meta_f in meta_list:
metadata[meta_f] = info[info_f]
break
add('title', ('track', 'title'))
add('date', 'upload_date')
add(('description', 'comment'), 'description')
add('purl', 'webpage_url')
add('track', 'track_number')
add('artist', ('artist', 'creator', 'uploader', 'uploader_id'))
add('genre')
add('album')
add('album_artist')
add('disc', 'disc_number')
if not metadata:
self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add')
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
if info['ext'] == 'm4a':
options = ['-vn', '-acodec', 'copy']
else:
options = ['-c', 'copy']
for (name, value) in metadata.items():
options.extend(['-metadata', '%s=%s' % (name, value)])
self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegMergerPP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0']
self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename)
self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args)
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return info['__files_to_merge'], info
def can_merge(self):
# TODO: figure out merge-capable ffmpeg version
if self.basename != 'avconv':
return True
required_version = '10-0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, '
'youtube-dl will download single file media. '
'Update %s to version %s or newer to fix this.') % (
self.basename, self.basename, required_version)
if self._downloader:
self._downloader.report_warning(warning)
return False
return True
class FFmpegFixupStretchedPP(FFmpegPostProcessor):
def run(self, info):
stretched_ratio = info.get('stretched_ratio')
if stretched_ratio is None or stretched_ratio == 1:
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-aspect', '%f' % stretched_ratio]
self._downloader.to_screen('[ffmpeg] Fixing aspect ratio in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegFixupM4aPP(FFmpegPostProcessor):
def run(self, info):
if info.get('container') != 'm4a_dash':
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-f', 'mp4']
self._downloader.to_screen('[ffmpeg] Correcting container in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegFixupM3u8PP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
self._downloader.to_screen('[ffmpeg] Fixing malformated aac bitstream in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
def __init__(self, downloader=None, format=None):
super(FFmpegSubtitlesConvertorPP, self).__init__(downloader)
self.format = format
def run(self, info):
subs = info.get('requested_subtitles')
filename = info['filepath']
new_ext = self.format
new_format = new_ext
if new_format == 'vtt':
new_format = 'webvtt'
if subs is None:
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to convert')
return [], info
self._downloader.to_screen('[ffmpeg] Converting subtitles')
sub_filenames = []
for lang, sub in subs.items():
ext = sub['ext']
if ext == new_ext:
self._downloader.to_screen(
'[ffmpeg] Subtitle file for %s is already in the requested'
'format' % new_ext)
continue
old_file = subtitles_filename(filename, lang, ext)
sub_filenames.append(old_file)
new_file = subtitles_filename(filename, lang, new_ext)
if ext == 'dfxp' or ext == 'ttml' or ext == 'tt':
self._downloader.report_warning(
'You have requested to convert dfxp (TTML) subtitles into another format, '
'which results in style information loss')
dfxp_file = old_file
srt_file = subtitles_filename(filename, lang, 'srt')
with io.open(dfxp_file, 'rt', encoding='utf-8') as f:
srt_data = dfxp2srt(f.read())
with io.open(srt_file, 'wt', encoding='utf-8') as f:
f.write(srt_data)
old_file = srt_file
subs[lang] = {
'ext': 'srt',
'data': srt_data
}
if new_ext == 'srt':
continue
else:
sub_filenames.append(srt_file)
self.run_ffmpeg(old_file, new_file, ['-f', new_format])
with io.open(new_file, 'rt', encoding='utf-8') as f:
subs[lang] = {
'ext': new_ext,
'data': f.read(),
}
return sub_filenames, info
| gpl-2.0 | -6,507,072,544,954,738,000 | 37.803448 | 157 | 0.557629 | false |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/pip-7.1.0-py3.4.egg/pip/_vendor/requests/packages/urllib3/connection.py | 483 | 9011 | import datetime
import sys
import socket
from socket import timeout as SocketTimeout
import warnings
from .packages import six
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection as _HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
HTTPSConnection = DummyConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
ConnectTimeoutError,
SystemTimeWarning,
SecurityWarning,
)
from .packages.ssl_match_hostname import match_hostname
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
assert_fingerprint,
)
from .util import connection
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. '
'This feature is being removed by major browsers and deprecated by RFC 2818. '
'(See https://github.com/shazow/urllib3/issues/497 for details.)'),
SecurityWarning
)
match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
| mit | 4,082,394,298,936,221,700 | 33.132576 | 109 | 0.616025 | false |
bgarnaat/codewars_katas | src/python/6kyu/string_average/test_string_average.py | 1 | 1185 | """
TEST CASES:
Test.describe("Basic tests")
Test.assert_equals(average_string("zero nine five two"), "four")
Test.assert_equals(average_string("four six two three"), "three")
Test.assert_equals(average_string("one two three four five"), "three")
Test.assert_equals(average_string("five four"), "four")
Test.assert_equals(average_string("zero zero zero zero zero"), "zero")
Test.assert_equals(average_string("one one eight one"), "two")
Test.assert_equals(average_string("one"), "one")
Test.assert_equals(average_string(""), "n/a")
Test.assert_equals(average_string("ten"), "n/a")
Test.assert_equals(average_string("pippi"), "n/a")
"""
import pytest
TEST_CASES = [
("zero nine five two", "four"),
("four six two three", "three"),
("one two three four five", "three"),
("five four", "four"),
("zero zero zero zero zero", "zero"),
("one one eight one", "two"),
("one", "one"),
("", "n/a"),
("ten", "n/a"),
("pippi", "n/a"),
]
@pytest.mark.parametrize('test_input, test_output', TEST_CASES)
def test_string_average(test_input, test_output):
from string_average import average_string
assert average_string(test_input) == test_output
| mit | 1,762,866,812,907,619,300 | 29.384615 | 70 | 0.654852 | false |
DigitalSlideArchive/HistomicsTK | histomicstk/features/compute_intensity_features.py | 1 | 5874 | """Compute intensity features in labeled image."""
import numpy as np
import pandas as pd
import scipy.stats
from skimage.measure import regionprops
def compute_intensity_features(
im_label, im_intensity, num_hist_bins=10,
rprops=None, feature_list=None):
"""Calculate intensity features from an intensity image.
Parameters
----------
im_label : array_like
A labeled mask image wherein intensity of a pixel is the ID of the
object it belongs to. Non-zero values are considered to be foreground
objects.
im_intensity : array_like
Intensity image.
num_hist_bins: int, optional
Number of bins used to computed the intensity histogram of an object.
Histogram is used to energy and entropy features. Default is 10.
rprops : output of skimage.measure.regionprops, optional
rprops = skimage.measure.regionprops( im_label ). If rprops is not
passed then it will be computed inside which will increase the
computation time.
feature_list : list, default is None
list of intensity features to return.
If none, all intensity features are returned.
Returns
-------
fdata: pandas.DataFrame
A pandas dataframe containing the intensity features listed below for
each object/label.
Notes
-----
List of intensity features computed by this function:
Intensity.Min : float
Minimum intensity of object pixels.
Intensity.Max : float
Maximum intensity of object pixels.
Intensity.Mean : float
Mean intensity of object pixels
Intensity.Median : float
Median intensity of object pixels
Intensity.MeanMedianDiff : float
Difference between mean and median intensities of object pixels.
Intensity.Std : float
Standard deviation of the intensities of object pixels
Intensity.IQR: float
Inter-quartile range of the intensities of object pixels
Intensity.MAD: float
Median absolute deviation of the intensities of object pixels
Intensity.Skewness : float
Skewness of the intensities of object pixels. Value is 0 when all
intensity values are equal.
Intensity.Kurtosis : float
Kurtosis of the intensities of object pixels. Value is -3 when all
values are equal.
Intensity.HistEnergy : float
Energy of the intensity histogram of object pixels
Intensity.HistEntropy : float
Entropy of the intensity histogram of object pixels.
References
----------
.. [#] Daniel Zwillinger and Stephen Kokoska. "CRC standard probability
and statistics tables and formulae," Crc Press, 1999.
"""
default_feature_list = [
'Intensity.Min',
'Intensity.Max',
'Intensity.Mean',
'Intensity.Median',
'Intensity.MeanMedianDiff',
'Intensity.Std',
'Intensity.IQR',
'Intensity.MAD',
'Intensity.Skewness',
'Intensity.Kurtosis',
'Intensity.HistEnergy',
'Intensity.HistEntropy',
]
# List of feature names
if feature_list is None:
feature_list = default_feature_list
else:
assert all(j in default_feature_list for j in feature_list), \
"Some feature names are not recognized."
# compute object properties if not provided
if rprops is None:
rprops = regionprops(im_label)
# create pandas data frame containing the features for each object
numFeatures = len(feature_list)
numLabels = len(rprops)
fdata = pd.DataFrame(np.zeros((numLabels, numFeatures)),
columns=feature_list)
# conditionally execute calculations if x in the features list
def _conditional_execution(feature, func, *args, **kwargs):
if feature in feature_list:
fdata.at[i, feature] = func(*args, **kwargs)
def _return_input(x):
return x
for i in range(numLabels):
# get intensities of object pixels
pixelIntensities = np.sort(
im_intensity[rprops[i].coords[:, 0], rprops[i].coords[:, 1]]
)
# simple descriptors
meanIntensity = np.mean(pixelIntensities)
medianIntensity = np.median(pixelIntensities)
_conditional_execution('Intensity.Min', np.min, pixelIntensities)
_conditional_execution('Intensity.Max', np.max, pixelIntensities)
_conditional_execution('Intensity.Mean', _return_input, meanIntensity)
_conditional_execution(
'Intensity.Median', _return_input, medianIntensity)
_conditional_execution(
'Intensity.MeanMedianDiff', _return_input,
meanIntensity - medianIntensity)
_conditional_execution('Intensity.Std', np.std, pixelIntensities)
_conditional_execution(
'Intensity.Skewness', scipy.stats.skew, pixelIntensities)
_conditional_execution(
'Intensity.Kurtosis', scipy.stats.kurtosis, pixelIntensities)
# inter-quartile range
_conditional_execution(
'Intensity.IQR', scipy.stats.iqr, pixelIntensities)
# median absolute deviation
_conditional_execution(
'Intensity.MAD', np.median,
np.abs(pixelIntensities - medianIntensity))
# histogram-based features
if any(j in feature_list for j in [
'Intensity.HistEntropy', 'Intensity.HistEnergy']):
# compute intensity histogram
hist, bins = np.histogram(pixelIntensities, bins=num_hist_bins)
prob = hist/np.sum(hist, dtype=np.float32)
# entropy and energy
_conditional_execution(
'Intensity.HistEntropy', scipy.stats.entropy, prob)
_conditional_execution('Intensity.HistEnergy', np.sum, prob**2)
return fdata
| apache-2.0 | -7,113,539,644,315,206,000 | 32.375 | 78 | 0.651515 | false |
Tigerwhit4/taiga-back | taiga/projects/custom_attributes/serializers.py | 18 | 5454 | # Copyright (C) 2015 Andrey Antukh <[email protected]>
# Copyright (C) 2015 Jesús Espino <[email protected]>
# Copyright (C) 2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from taiga.base.fields import JsonField
from taiga.base.api.serializers import ValidationError
from taiga.base.api.serializers import ModelSerializer
from . import models
######################################################
# Custom Attribute Serializer
#######################################################
class BaseCustomAttributeSerializer(ModelSerializer):
class Meta:
read_only_fields = ('id',)
exclude = ('created_date', 'modified_date')
def _validate_integrity_between_project_and_name(self, attrs, source):
"""
Check the name is not duplicated in the project. Check when:
- create a new one
- update the name
- update the project (move to another project)
"""
data_id = attrs.get("id", None)
data_name = attrs.get("name", None)
data_project = attrs.get("project", None)
if self.object:
data_id = data_id or self.object.id
data_name = data_name or self.object.name
data_project = data_project or self.object.project
model = self.Meta.model
qs = (model.objects.filter(project=data_project, name=data_name)
.exclude(id=data_id))
if qs.exists():
raise ValidationError(_("Already exists one with the same name."))
return attrs
def validate_name(self, attrs, source):
return self._validate_integrity_between_project_and_name(attrs, source)
def validate_project(self, attrs, source):
return self._validate_integrity_between_project_and_name(attrs, source)
class UserStoryCustomAttributeSerializer(BaseCustomAttributeSerializer):
class Meta(BaseCustomAttributeSerializer.Meta):
model = models.UserStoryCustomAttribute
class TaskCustomAttributeSerializer(BaseCustomAttributeSerializer):
class Meta(BaseCustomAttributeSerializer.Meta):
model = models.TaskCustomAttribute
class IssueCustomAttributeSerializer(BaseCustomAttributeSerializer):
class Meta(BaseCustomAttributeSerializer.Meta):
model = models.IssueCustomAttribute
######################################################
# Custom Attribute Serializer
#######################################################
class BaseCustomAttributesValuesSerializer(ModelSerializer):
attributes_values = JsonField(source="attributes_values", label="attributes values")
_custom_attribute_model = None
_container_field = None
class Meta:
exclude = ("id",)
def validate_attributes_values(self, attrs, source):
# values must be a dict
data_values = attrs.get("attributes_values", None)
if self.object:
data_values = (data_values or self.object.attributes_values)
if type(data_values) is not dict:
raise ValidationError(_("Invalid content. It must be {\"key\": \"value\",...}"))
# Values keys must be in the container object project
data_container = attrs.get(self._container_field, None)
if data_container:
project_id = data_container.project_id
elif self.object:
project_id = getattr(self.object, self._container_field).project_id
else:
project_id = None
values_ids = list(data_values.keys())
qs = self._custom_attribute_model.objects.filter(project=project_id,
id__in=values_ids)
if qs.count() != len(values_ids):
raise ValidationError(_("It contain invalid custom fields."))
return attrs
class UserStoryCustomAttributesValuesSerializer(BaseCustomAttributesValuesSerializer):
_custom_attribute_model = models.UserStoryCustomAttribute
_container_model = "userstories.UserStory"
_container_field = "user_story"
class Meta(BaseCustomAttributesValuesSerializer.Meta):
model = models.UserStoryCustomAttributesValues
class TaskCustomAttributesValuesSerializer(BaseCustomAttributesValuesSerializer, ModelSerializer):
_custom_attribute_model = models.TaskCustomAttribute
_container_field = "task"
class Meta(BaseCustomAttributesValuesSerializer.Meta):
model = models.TaskCustomAttributesValues
class IssueCustomAttributesValuesSerializer(BaseCustomAttributesValuesSerializer, ModelSerializer):
_custom_attribute_model = models.IssueCustomAttribute
_container_field = "issue"
class Meta(BaseCustomAttributesValuesSerializer.Meta):
model = models.IssueCustomAttributesValues
| agpl-3.0 | -8,455,804,908,561,824,000 | 36.6 | 99 | 0.673698 | false |
cirruscluster/cirruscluster | cirruscluster/ext/ansible/inventory/host.py | 1 | 2025 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import cirruscluster.ext.ansible.constants as C
class Host(object):
''' a single ansible host '''
__slots__ = [ 'name', 'vars', 'groups' ]
def __init__(self, name=None, port=None):
self.name = name
self.vars = {}
self.groups = []
if port and port != C.DEFAULT_REMOTE_PORT:
self.set_variable('ansible_ssh_port', int(port))
if self.name is None:
raise Exception("host name is required")
def add_group(self, group):
self.groups.append(group)
def set_variable(self, key, value):
self.vars[key]=value
def get_groups(self):
groups = {}
for g in self.groups:
groups[g.name] = g
ancestors = g.get_ancestors()
for a in ancestors:
groups[a.name] = a
return groups.values()
def get_variables(self):
results = {}
groups = self.get_groups()
for group in sorted(groups, key=lambda g: g.depth):
results.update(group.get_variables())
results.update(self.vars)
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
return results
| mit | 5,611,364,958,188,585,000 | 29.681818 | 85 | 0.628642 | false |
balle/chaosmap | lib/cymruwhois.py | 1 | 7940 | #!/usr/bin/env python
import socket
import errno
try :
import memcache
HAVE_MEMCACHE = True
except ImportError:
HAVE_MEMCACHE = False
def iterwindow(l, slice=50):
"""Generate sublists from an iterator
>>> list(iterwindow(iter(range(10)),11))
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]
>>> list(iterwindow(iter(range(10)),9))
[[0, 1, 2, 3, 4, 5, 6, 7, 8], [9]]
>>> list(iterwindow(iter(range(10)),5))
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> list(iterwindow(iter(range(10)),3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(iterwindow(iter(range(10)),1))
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]
"""
assert(slice > 0)
a=[]
for x in l:
if len(a) >= slice :
yield a
a=[]
a.append(x)
if a:
yield a
class record:
def __init__(self, asn, ip, prefix, cc, owner):
def fix(x):
x = x.strip()
if x == "NA":
return None
return str(x.decode('ascii','ignore'))
self.asn = fix(asn)
self.ip = fix(ip)
self.prefix = fix(prefix)
self.cc = fix(cc)
self.owner = fix(owner)
def __str__(self):
return "%-10s %-16s %-16s %s '%s'" % (self.asn, self.ip, self.prefix, self.cc, self.owner)
def __repr__(self):
return "<%s instance: %s|%s|%s|%s|%s>" % (self.__class__, self.asn, self.ip, self.prefix, self.cc, self.owner)
class Client:
"""Python interface to whois.cymru.com
**Usage**
>>> import socket
>>> ip = socket.gethostbyname("www.google.com")
>>> from cymruwhois import Client
>>> c=Client()
>>> r=c.lookup(ip)
>>> print r.asn
15169
>>> print r.owner
GOOGLE - Google Inc.
>>>
>>> ip_ms = socket.gethostbyname("www.microsoft.com")
>>> for r in c.lookupmany([ip, ip_ms]):
... print r.owner
GOOGLE - Google Inc.
MICROSOFT-CORP---MSN-AS-BLOCK - Microsoft Corp
"""
KEY_FMT = "cymruwhois:ip:%s"
def __init__(self, host="whois.cymru.com", port=43, memcache_host='localhost:11211'):
self.host=host
self.port=port
self._connected=False
self.c = None
if HAVE_MEMCACHE and memcache_host:
self.c = memcache.Client([memcache_host])
def _connect(self):
self.socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.socket.settimeout(5.0)
self.socket.connect((self.host,self.port))
self.socket.settimeout(1.0)
self.file = self.socket.makefile()
def _sendline(self, line):
self.file.write(line + "\r\n")
self.file.flush()
def _readline(self):
return self.file.readline()
def _disconnect(self):
self.file.close()
self.socket.close()
def read_and_discard(self):
self.socket.setblocking(0)
try :
try :
self.file.read(1024)
except socket.error, e:
if e.args[0]!=errno.EAGAIN:
raise
finally:
self.socket.setblocking(1)
def _begin(self):
"""Explicitly connect and send BEGIN to start the lookup process"""
self._connect()
self._sendline("BEGIN")
self._readline() #discard the message "Bulk mode; one IP per line. [2005-08-02 18:54:55 GMT]"
self._sendline("PREFIX")
self._sendline("COUNTRYCODE")
self._sendline("NOTRUNC")
self._connected=True
def disconnect(self):
"""Explicitly send END to stop the lookup process and disconnect"""
if not self._connected: return
self._sendline("END")
self._disconnect()
self._connected=False
def get_cached(self, ips):
if not self.c:
return {}
keys = [self.KEY_FMT % ip for ip in ips]
vals = self.c.get_multi(keys)
#convert cymruwhois:ip:1.2.3.4 into just 1.2.3.4
return dict((k.split(":")[-1], v) for k,v in vals.items())
def cache(self, r):
if not self.c:
return
self.c.set(self.KEY_FMT % r.ip, r, 60*60*6)
def lookup(self, ip):
"""Look up a single address. This function should not be called in
loop, instead call lookupmany"""
return list(self.lookupmany([ip]))[0]
def lookupmany(self, ips):
"""Look up many ip addresses"""
ips = [str(ip).strip() for ip in ips]
for batch in iterwindow(ips, 100):
cached = self.get_cached(batch)
not_cached = [ip for ip in batch if not cached.get(ip)]
#print "cached:%d not_cached:%d" % (len(cached), len(not_cached))
if not_cached:
for rec in self._lookupmany_raw(not_cached):
cached[rec.ip] = rec
for ip in batch:
if ip in cached:
yield cached[ip]
def lookupmany_dict(self, ips):
"""Look up many ip addresses, returning a dictionary of ip -> record"""
ips = set(ips)
return dict((r.ip, r) for r in self.lookupmany(ips))
def _lookupmany_raw(self, ips):
"""Do a look up for some ips"""
if not self._connected:
self._begin()
ips = set(ips)
for ip in ips:
self._sendline(ip)
need = len(ips)
last = None
while need:
result=self._readline()
if 'Error: no ASN or IP match on line' in result:
need -=1
continue
parts=result.split("|")
r=record(*parts)
#check for multiple records being returned for a single IP
#in this case, just skip any extra records
if last and r.ip == last.ip:
continue
self.cache(r)
yield r
last = r
need -=1
#skip any trailing records that might have been caused by multiple records for the last ip
self.read_and_discard()
#backwards compatibility
lookerupper = Client
def lookup_stdin():
from optparse import OptionParser
import fileinput
parser = OptionParser(usage = "usage: %prog [options] [files]")
parser.add_option("-d", "--delim", dest="delim", action="store", default=None,
help="delimiter to use instead of justified")
parser.add_option("-f", "--fields", dest="fields", action="append",
help="comma separated fields to include (asn,ip,prefix,cc,owner)")
if HAVE_MEMCACHE:
parser.add_option("-c", "--cache", dest="cache", action="store", default="localhost:11211",
help="memcache server (default localhost)")
parser.add_option("-n", "--no-cache", dest="cache", action="store_false",
help="don't use memcached")
else:
memcache_host = None
(options, args) = parser.parse_args()
#fix the fields: convert ['a,b','c'] into ['a','b','c'] if needed
fields = []
if options.fields:
for f in options.fields:
fields.extend(f.split(","))
else:
fields = 'asn ip prefix cc owner'.split()
#generate the format string
fieldwidths = {
'asn': 8,
'ip': 15,
'prefix': 18,
'cc': 2,
'owner': 0,
}
if options.delim:
format = options.delim.join("%%(%s)s" % f for f in fields)
else:
format = ' '.join("%%(%s)-%ds" % (f, fieldwidths[f]) for f in fields)
#setup the memcache option
if HAVE_MEMCACHE:
memcache_host = options.cache
if memcache_host and ':' not in memcache_host:
memcache_host += ":11211"
c=Client(memcache_host=memcache_host)
ips = []
for line in fileinput.input(args):
ip=line.strip()
ips.append(ip)
for r in c.lookupmany(ips):
print format % r.__dict__
if __name__ == "__main__":
lookup_stdin()
| gpl-3.0 | -4,367,014,186,561,682,000 | 28.962264 | 118 | 0.53728 | false |
appneta/boto | boto/rds/dbsecuritygroup.py | 185 | 6651 | # Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an DBSecurityGroup
"""
from boto.ec2.securitygroup import SecurityGroup
class DBSecurityGroup(object):
"""
Represents an RDS database security group
Properties reference available from the AWS documentation at
http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html
:ivar Status: The current status of the security group. Possible values are
[ active, ? ]. Reference documentation lacks specifics of possibilities
:ivar connection: :py:class:`boto.rds.RDSConnection` associated with the current object
:ivar description: The description of the security group
:ivar ec2_groups: List of :py:class:`EC2 Security Group
<boto.ec2.securitygroup.SecurityGroup>` objects that this security
group PERMITS
:ivar ip_ranges: List of :py:class:`boto.rds.dbsecuritygroup.IPRange`
objects (containing CIDR addresses) that this security group PERMITS
:ivar name: Name of the security group
:ivar owner_id: ID of the owner of the security group. Can be 'None'
"""
def __init__(self, connection=None, owner_id=None,
name=None, description=None):
self.connection = connection
self.owner_id = owner_id
self.name = name
self.description = description
self.ec2_groups = []
self.ip_ranges = []
def __repr__(self):
return 'DBSecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'IPRange':
cidr = IPRange(self)
self.ip_ranges.append(cidr)
return cidr
elif name == 'EC2SecurityGroup':
ec2_grp = EC2SecurityGroup(self)
self.ec2_groups.append(ec2_grp)
return ec2_grp
else:
return None
def endElement(self, name, value, connection):
if name == 'OwnerId':
self.owner_id = value
elif name == 'DBSecurityGroupName':
self.name = value
elif name == 'DBSecurityGroupDescription':
self.description = value
elif name == 'IPRanges':
pass
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_dbsecurity_group(self.name)
def authorize(self, cidr_ip=None, ec2_group=None):
"""
Add a new rule to this DBSecurity group.
You need to pass in either a CIDR block to authorize or
and EC2 SecurityGroup.
:type cidr_ip: string
:param cidr_ip: A valid CIDR IP range to authorize
:type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup`
:param ec2_group: An EC2 security group to authorize
:rtype: bool
:return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
else:
group_name = None
group_owner_id = None
return self.connection.authorize_dbsecurity_group(self.name,
cidr_ip,
group_name,
group_owner_id)
def revoke(self, cidr_ip=None, ec2_group=None):
"""
Revoke access to a CIDR range or EC2 SecurityGroup.
You need to pass in either a CIDR block or
an EC2 SecurityGroup from which to revoke access.
:type cidr_ip: string
:param cidr_ip: A valid CIDR IP range to revoke
:type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup`
:param ec2_group: An EC2 security group to revoke
:rtype: bool
:return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
return self.connection.revoke_dbsecurity_group(
self.name,
ec2_security_group_name=group_name,
ec2_security_group_owner_id=group_owner_id)
# Revoking by CIDR IP range
return self.connection.revoke_dbsecurity_group(
self.name, cidr_ip=cidr_ip)
class IPRange(object):
"""
Describes a CIDR address range for use in a DBSecurityGroup
:ivar cidr_ip: IP Address range
"""
def __init__(self, parent=None):
self.parent = parent
self.cidr_ip = None
self.status = None
def __repr__(self):
return 'IPRange:%s' % self.cidr_ip
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'CIDRIP':
self.cidr_ip = value
elif name == 'Status':
self.status = value
else:
setattr(self, name, value)
class EC2SecurityGroup(object):
"""
Describes an EC2 security group for use in a DBSecurityGroup
"""
def __init__(self, parent=None):
self.parent = parent
self.name = None
self.owner_id = None
def __repr__(self):
return 'EC2SecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'EC2SecurityGroupName':
self.name = value
elif name == 'EC2SecurityGroupOwnerId':
self.owner_id = value
else:
setattr(self, name, value)
| mit | 3,858,321,539,029,553,000 | 34.758065 | 98 | 0.623515 | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/jq-0.1-py2.5.egg/jq/queue/consumerend.py | 1 | 1699 | from twisted.internet.protocol import Protocol, ClientFactory
from twisted.internet import reactor, error
from twisted.python import log
from jq.common import VariablePacketProtocol
import pickle
import functools
class ConsumerClientProtocol(VariablePacketProtocol):
def connectionMade(self):
data = pickle.dumps((self.factory.job.type, self.factory.job.data))
self.sendPacket(data)
def packetRecieved(self, packetData):
error = pickle.loads(packetData)
self.factory.jobDone(error)
self.transport.loseConnection()
class ConsumerClientFactory(ClientFactory):
protocol = ConsumerClientProtocol
def __init__(self, job, callback):
self.job = job
self.callback = callback
def jobDone(self, error):
self.callback(error)
def clientConnectionLost(self, connector, reason):
log.msg('Lost connection. Reason: %s' % reason)
def clientConnectionFailed(self, connector, reason):
log.msg('Connection failed. Reason: %s' % reason)
class JobConsumer(object):
def performJob(job, onFinishClbk):
"""Performs the given Job, and call the onFinishCallback"""
raise NotImplementedError, "Dummy Implementation"
class TwistedJobConsumer(JobConsumer):
def __init__(self, host, port):
self.host = host
self.port = port
def performJob(self, job, onFinishClbk):
callback = functools.partial(onFinishClbk, self, job)
clientFactory = ConsumerClientFactory(job, callback)
reactor.connectTCP(self.host, self.port, clientFactory)
def __repr__(self):
return "<TwistedJobConsumer(host=%s, port=%s)>" % (self.host, self.port)
| bsd-3-clause | -3,788,942,848,196,670,500 | 32.98 | 80 | 0.696292 | false |
ykim362/mxnet | example/image-classification/fine-tune.py | 38 | 3215 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import argparse
import logging
logging.basicConfig(level=logging.DEBUG)
from common import find_mxnet
from common import data, fit, modelzoo
import mxnet as mx
def get_fine_tune_model(symbol, arg_params, num_classes, layer_name):
"""
symbol: the pre-trained network symbol
arg_params: the argument parameters of the pre-trained model
num_classes: the number of classes for the fine-tune datasets
layer_name: the layer name before the last fully-connected layer
"""
all_layers = symbol.get_internals()
net = all_layers[layer_name+'_output']
net = mx.symbol.FullyConnected(data=net, num_hidden=num_classes, name='fc')
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k})
return (net, new_args)
if __name__ == "__main__":
# parse args
parser = argparse.ArgumentParser(description="fine-tune a dataset",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
train = fit.add_fit_args(parser)
data.add_data_args(parser)
aug = data.add_data_aug_args(parser)
parser.add_argument('--pretrained-model', type=str,
help='the pre-trained model')
parser.add_argument('--layer-before-fullc', type=str, default='flatten0',
help='the name of the layer before the last fullc layer')
# use less augmentations for fine-tune
data.set_data_aug_level(parser, 1)
# use a small learning rate and less regularizations
parser.set_defaults(image_shape='3,224,224', num_epochs=30,
lr=.01, lr_step_epochs='20', wd=0, mom=0)
args = parser.parse_args()
# load pretrained model
dir_path = os.path.dirname(os.path.realpath(__file__))
(prefix, epoch) = modelzoo.download_model(
args.pretrained_model, os.path.join(dir_path, 'model'))
if prefix is None:
(prefix, epoch) = (args.pretrained_model, args.load_epoch)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
# remove the last fullc layer
(new_sym, new_args) = get_fine_tune_model(
sym, arg_params, args.num_classes, args.layer_before_fullc)
# train
fit.fit(args = args,
network = new_sym,
data_loader = data.get_rec_iter,
arg_params = new_args,
aux_params = aux_params)
| apache-2.0 | 3,726,488,884,919,110,000 | 40.753247 | 92 | 0.676827 | false |
molebot/brython | www/src/Lib/encodings/iso8859_10.py | 37 | 13896 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\xa7' # 0xA7 -> SECTION SIGN
'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
'\xad' # 0xAD -> SOFT HYPHEN
'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
'\xb7' # 0xB7 -> MIDDLE DOT
'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
'\u2015' # 0xBD -> HORIZONTAL BAR
'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause | -8,939,722,050,343,046,000 | 43.263844 | 109 | 0.519142 | false |
debugger87/spark | examples/src/main/python/mllib/elementwise_product_example.py | 106 | 1756 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.feature import ElementwiseProduct
from pyspark.mllib.linalg import Vectors
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="ElementwiseProductExample") # SparkContext
# $example on$
data = sc.textFile("data/mllib/kmeans_data.txt")
parsedData = data.map(lambda x: [float(t) for t in x.split(" ")])
# Create weight vector.
transformingVector = Vectors.dense([0.0, 1.0, 2.0])
transformer = ElementwiseProduct(transformingVector)
# Batch transform
transformedData = transformer.transform(parsedData)
# Single-row transform
transformedData2 = transformer.transform(parsedData.first())
# $example off$
print("transformedData:")
for each in transformedData.collect():
print(each)
print("transformedData2:")
for each in transformedData2:
print(each)
sc.stop()
| apache-2.0 | 1,547,029,056,104,505,000 | 33.431373 | 74 | 0.729499 | false |
origingod/hug | tests/module_fake.py | 10 | 1042 | """Fake HUG API module usable for testing importation of modules"""
import hug
@hug.directive(apply_globally=False)
def my_directive(default=None, **kwargs):
'''for testing'''
return default
@hug.default_input_format('application/made-up')
def made_up_formatter(data):
'''for testing'''
return data
@hug.default_output_format()
def output_formatter(data):
'''for testing'''
return hug.output_format.json(data)
@hug.get()
def made_up_api(hug_my_directive=True):
'''for testing'''
return hug_my_directive
@hug.directive(apply_globally=True)
def my_directive_global(default=None, **kwargs):
'''for testing'''
return default
@hug.default_input_format('application/made-up', apply_globally=True)
def made_up_formatter_global(data):
'''for testing'''
return data
@hug.default_output_format(apply_globally=True)
def output_formatter_global(data):
'''for testing'''
return hug.output_format.json(data)
@hug.request_middleware()
def handle_request(request, response):
return
| mit | 7,865,437,084,695,204,000 | 20.265306 | 69 | 0.699616 | false |
xmaruto/mcord | xos/tosca/resources/hpchealthcheck.py | 3 | 1219 | import importlib
import os
import pdb
import sys
import tempfile
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
import pdb
from services.hpc.models import HpcHealthCheck, HpcService
from xosresource import XOSResource
class XOSHpcHealthCheck(XOSResource):
provides = "tosca.nodes.HpcHealthCheck"
xos_model = HpcHealthCheck
name_field = None
copyin_props = ("kind", "resource_name", "result_contains")
def get_xos_args(self, throw_exception=True):
args = super(XOSHpcHealthCheck, self).get_xos_args()
service_name = self.get_requirement("tosca.relationships.MemberOfService", throw_exception=throw_exception)
if service_name:
args["hpcService"] = self.get_xos_object(HpcService, throw_exception=throw_exception, name=service_name)
return args
def get_existing_objs(self):
args = self.get_xos_args(throw_exception=True)
return list( HpcHealthCheck.objects.filter(hpcService=args["hpcService"], kind=args["kind"], resource_name=args["resource_name"]) )
def postprocess(self, obj):
pass
def can_delete(self, obj):
return super(XOSTenant, self).can_delete(obj)
| apache-2.0 | -8,430,083,443,676,888,000 | 30.25641 | 139 | 0.716161 | false |
datalogics-robb/scons | src/engine/SCons/cpp.py | 2 | 18411 | #
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
__doc__ = """
SCons C Pre-Processor module
"""
import SCons.compat
import os
import re
import string
#
# First "subsystem" of regular expressions that we set up:
#
# Stuff to turn the C preprocessor directives in a file's contents into
# a list of tuples that we can process easily.
#
# A table of regular expressions that fetch the arguments from the rest of
# a C preprocessor line. Different directives have different arguments
# that we want to fetch, using the regular expressions to which the lists
# of preprocessor directives map.
cpp_lines_dict = {
# Fetch the rest of a #if/#elif/#ifdef/#ifndef/#import/#include/
# #include_next line as one argument.
('if', 'elif', 'ifdef', 'ifndef', 'import', 'include', 'include_next',)
: '\s+(.+)',
# We don't care what comes after a #else or #endif line.
('else', 'endif',) : '',
# Fetch three arguments from a #define line:
# 1) The #defined keyword.
# 2) The optional parentheses and arguments (if it's a function-like
# macro, '' if it's not).
# 3) The expansion value.
('define',) : '\s+([_A-Za-z][_A-Za-z0-9_]+)(\([^)]*\))?\s*(.*)',
# Fetch the #undefed keyword from a #undef line.
('undef',) : '\s+([_A-Za-z][A-Za-z0-9_]+)',
}
# Create a table that maps each individual C preprocessor directive to
# the corresponding compiled regular expression that fetches the arguments
# we care about.
Table = {}
for op_list, expr in cpp_lines_dict.items():
e = re.compile(expr)
for op in op_list:
Table[op] = e
del e
del op
del op_list
# Create a list of the expressions we'll use to match all of the
# preprocessor directives. These are the same as the directives
# themselves *except* that we must use a negative lookahead assertion
# when matching "if" so it doesn't match the "if" in "ifdef."
override = {
'if' : 'if(?!def)',
}
l = map(lambda x, o=override: o.get(x, x), Table.keys())
# Turn the list of expressions into one big honkin' regular expression
# that will match all the preprocessor lines at once. This will return
# a list of tuples, one for each preprocessor line. The preprocessor
# directive will be the first element in each tuple, and the rest of
# the line will be the second element.
e = '^\s*#\s*(' + string.join(l, '|') + ')(.*)$'
# And last but not least, compile the expression.
CPP_Expression = re.compile(e, re.M)
#
# Second "subsystem" of regular expressions that we set up:
#
# Stuff to translate a C preprocessor expression (as found on a #if or
# #elif line) into an equivalent Python expression that we can eval().
#
# A dictionary that maps the C representation of Boolean operators
# to their Python equivalents.
CPP_to_Python_Ops_Dict = {
'!' : ' not ',
'!=' : ' != ',
'&&' : ' and ',
'||' : ' or ',
'?' : ' and ',
':' : ' or ',
'\r' : '',
}
CPP_to_Python_Ops_Sub = lambda m, d=CPP_to_Python_Ops_Dict: d[m.group(0)]
# We have to sort the keys by length so that longer expressions
# come *before* shorter expressions--in particular, "!=" must
# come before "!" in the alternation. Without this, the Python
# re module, as late as version 2.2.2, empirically matches the
# "!" in "!=" first, instead of finding the longest match.
# What's up with that?
l = CPP_to_Python_Ops_Dict.keys()
l.sort(lambda a, b: cmp(len(b), len(a)))
# Turn the list of keys into one regular expression that will allow us
# to substitute all of the operators at once.
expr = string.join(map(re.escape, l), '|')
# ...and compile the expression.
CPP_to_Python_Ops_Expression = re.compile(expr)
# A separate list of expressions to be evaluated and substituted
# sequentially, not all at once.
CPP_to_Python_Eval_List = [
['defined\s+(\w+)', '__dict__.has_key("\\1")'],
['defined\s*\((\w+)\)', '__dict__.has_key("\\1")'],
['/\*.*\*/', ''],
['/\*.*', ''],
['//.*', ''],
['(0x[0-9A-Fa-f]*)[UL]+', '\\1L'],
]
# Replace the string representations of the regular expressions in the
# list with compiled versions.
for l in CPP_to_Python_Eval_List:
l[0] = re.compile(l[0])
# Wrap up all of the above into a handy function.
def CPP_to_Python(s):
"""
Converts a C pre-processor expression into an equivalent
Python expression that can be evaluated.
"""
s = CPP_to_Python_Ops_Expression.sub(CPP_to_Python_Ops_Sub, s)
for expr, repl in CPP_to_Python_Eval_List:
s = expr.sub(repl, s)
return s
del expr
del l
del override
class FunctionEvaluator:
"""
Handles delayed evaluation of a #define function call.
"""
def __init__(self, name, args, expansion):
"""
Squirrels away the arguments and expansion value of a #define
macro function for later evaluation when we must actually expand
a value that uses it.
"""
self.name = name
self.args = function_arg_separator.split(args)
self.expansion = string.split(expansion, '##')
def __call__(self, *values):
"""
Evaluates the expansion of a #define macro function called
with the specified values.
"""
if len(self.args) != len(values):
raise ValueError, "Incorrect number of arguments to `%s'" % self.name
# Create a dictionary that maps the macro arguments to the
# corresponding values in this "call." We'll use this when we
# eval() the expansion so that arguments will get expanded to
# the right values.
locals = {}
for k, v in zip(self.args, values):
locals[k] = v
parts = []
for s in self.expansion:
if not s in self.args:
s = repr(s)
parts.append(s)
statement = string.join(parts, ' + ')
return eval(statement, globals(), locals)
# Find line continuations.
line_continuations = re.compile('\\\\\r?\n')
# Search for a "function call" macro on an expansion. Returns the
# two-tuple of the "function" name itself, and a string containing the
# arguments within the call parentheses.
function_name = re.compile('(\S+)\(([^)]*)\)')
# Split a string containing comma-separated function call arguments into
# the separate arguments.
function_arg_separator = re.compile(',\s*')
class PreProcessor:
"""
The main workhorse class for handling C pre-processing.
"""
def __init__(self, current='.', cpppath=[], dict={}, all=0):
global Table
self.searchpath = {
'"' : [current] + cpppath,
'<' : cpppath + [current],
}
# Initialize our C preprocessor namespace for tracking the
# values of #defined keywords. We use this namespace to look
# for keywords on #ifdef/#ifndef lines, and to eval() the
# expressions on #if/#elif lines (after massaging them from C to
# Python).
self.cpp_namespace = dict.copy()
self.cpp_namespace['__dict__'] = self.cpp_namespace
if all:
self.do_include = self.all_include
# For efficiency, a dispatch table maps each C preprocessor
# directive (#if, #define, etc.) to the method that should be
# called when we see it. We accomodate state changes (#if,
# #ifdef, #ifndef) by pushing the current dispatch table on a
# stack and changing what method gets called for each relevant
# directive we might see next at this level (#else, #elif).
# #endif will simply pop the stack.
d = {}
for op in Table.keys():
d[op] = getattr(self, 'do_' + op)
self.default_table = d
# Controlling methods.
def tupleize(self, contents):
"""
Turns the contents of a file into a list of easily-processed
tuples describing the CPP lines in the file.
The first element of each tuple is the line's preprocessor
directive (#if, #include, #define, etc., minus the initial '#').
The remaining elements are specific to the type of directive, as
pulled apart by the regular expression.
"""
global CPP_Expression, Table
contents = line_continuations.sub('', contents)
cpp_tuples = CPP_Expression.findall(contents)
return map(lambda m, t=Table:
(m[0],) + t[m[0]].match(m[1]).groups(),
cpp_tuples)
def __call__(self, contents):
"""
Pre-processes a file contents.
This is the main entry point, which
"""
self.stack = []
self.dispatch_table = self.default_table.copy()
self.tuples = self.tupleize(contents)
self.result = []
while self.tuples:
t = self.tuples.pop(0)
# Uncomment to see the list of tuples being processed (e.g.,
# to validate the CPP lines are being translated correctly).
#print t
self.dispatch_table[t[0]](t)
return self.result
# Dispatch table stack manipulation methods.
def save(self):
"""
Pushes the current dispatch table on the stack and re-initializes
the current dispatch table to the default.
"""
self.stack.append(self.dispatch_table)
self.dispatch_table = self.default_table.copy()
def restore(self):
"""
Pops the previous dispatch table off the stack and makes it the
current one.
"""
try: self.dispatch_table = self.stack.pop()
except IndexError: pass
# Utility methods.
def do_nothing(self, t):
"""
Null method for when we explicitly want the action for a
specific preprocessor directive to do nothing.
"""
pass
def eval_expression(self, t):
"""
Evaluates a C preprocessor expression.
This is done by converting it to a Python equivalent and
eval()ing it in the C preprocessor namespace we use to
track #define values.
"""
t = CPP_to_Python(string.join(t[1:]))
try: return eval(t, self.cpp_namespace)
except (NameError, TypeError): return 0
def find_include_file(self, t):
"""
Finds the #include file for a given preprocessor tuple.
"""
fname = t[2]
for d in self.searchpath[t[1]]:
f = os.path.join(d, fname)
if os.path.isfile(f):
return f
return None
# Start and stop processing include lines.
def start_handling_includes(self, t=None):
"""
Causes the PreProcessor object to start processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates True, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated
False.
"""
d = self.dispatch_table
d['import'] = self.do_import
d['include'] = self.do_include
d['include_next'] = self.do_include
def stop_handling_includes(self, t=None):
"""
Causes the PreProcessor object to stop processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates False, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated True.
"""
d = self.dispatch_table
d['import'] = self.do_nothing
d['include'] = self.do_nothing
d['include_next'] = self.do_nothing
# Default methods for handling all of the preprocessor directives.
# (Note that what actually gets called for a given directive at any
# point in time is really controlled by the dispatch_table.)
def _do_if_else_condition(self, condition):
"""
Common logic for evaluating the conditions on #if, #ifdef and
#ifndef lines.
"""
self.save()
d = self.dispatch_table
if condition:
self.start_handling_includes()
d['elif'] = self.stop_handling_includes
d['else'] = self.stop_handling_includes
else:
self.stop_handling_includes()
d['elif'] = self.do_elif
d['else'] = self.start_handling_includes
def do_ifdef(self, t):
"""
Default handling of a #ifdef line.
"""
self._do_if_else_condition(self.cpp_namespace.has_key(t[1]))
def do_ifndef(self, t):
"""
Default handling of a #ifndef line.
"""
self._do_if_else_condition(not self.cpp_namespace.has_key(t[1]))
def do_if(self, t):
"""
Default handling of a #if line.
"""
self._do_if_else_condition(self.eval_expression(t))
def do_elif(self, t):
"""
Default handling of a #elif line.
"""
d = self.dispatch_table
if self.eval_expression(t):
self.start_handling_includes()
d['elif'] = self.stop_handling_includes
d['else'] = self.stop_handling_includes
def do_else(self, t):
"""
Default handling of a #else line.
"""
pass
def do_endif(self, t):
"""
Default handling of a #endif line.
"""
self.restore()
def do_define(self, t):
"""
Default handling of a #define line.
"""
_, name, args, expansion = t
try:
expansion = int(expansion)
except (TypeError, ValueError):
pass
if args:
evaluator = FunctionEvaluator(name, args[1:-1], expansion)
self.cpp_namespace[name] = evaluator
else:
self.cpp_namespace[name] = expansion
def do_undef(self, t):
"""
Default handling of a #undef line.
"""
try: del self.cpp_namespace[t[1]]
except KeyError: pass
def do_import(self, t):
"""
Default handling of a #import line.
"""
# XXX finish this -- maybe borrow/share logic from do_include()...?
pass
def do_include(self, t):
"""
Default handling of a #include line.
"""
t = self.resolve_include(t)
include_file = self.find_include_file(t)
if include_file:
#print "include_file =", include_file
self.result.append(include_file)
contents = open(include_file).read()
new_tuples = self.tupleize(contents)
self.tuples[:] = new_tuples + self.tuples
# Date: Tue, 22 Nov 2005 20:26:09 -0500
# From: Stefan Seefeld <[email protected]>
#
# By the way, #include_next is not the same as #include. The difference
# being that #include_next starts its search in the path following the
# path that let to the including file. In other words, if your system
# include paths are ['/foo', '/bar'], and you are looking at a header
# '/foo/baz.h', it might issue an '#include_next <baz.h>' which would
# correctly resolve to '/bar/baz.h' (if that exists), but *not* see
# '/foo/baz.h' again. See http://www.delorie.com/gnu/docs/gcc/cpp_11.html
# for more reasoning.
#
# I have no idea in what context 'import' might be used.
# XXX is #include_next really the same as #include ?
do_include_next = do_include
# Utility methods for handling resolution of include files.
def resolve_include(self, t):
"""Resolve a tuple-ized #include line.
This handles recursive expansion of values without "" or <>
surrounding the name until an initial " or < is found, to handle
#include FILE
where FILE is a #define somewhere else.
"""
s = t[1]
while not s[0] in '<"':
#print "s =", s
try:
s = self.cpp_namespace[s]
except KeyError:
m = function_name.search(s)
s = self.cpp_namespace[m.group(1)]
if callable(s):
args = function_arg_separator.split(m.group(2))
s = apply(s, args)
if not s:
return None
return (t[0], s[0], s[1:-1])
def all_include(self, t):
"""
"""
self.result.append(self.resolve_include(t))
class DumbPreProcessor(PreProcessor):
"""A preprocessor that ignores all #if/#elif/#else/#endif directives
and just reports back *all* of the #include files (like the classic
SCons scanner did).
This is functionally equivalent to using a regular expression to
find all of the #include lines, only slower. It exists mainly as
an example of how the main PreProcessor class can be sub-classed
to tailor its behavior.
"""
def __init__(self, *args, **kw):
apply(PreProcessor.__init__, (self,)+args, kw)
d = self.default_table
for func in ['if', 'elif', 'else', 'endif', 'ifdef', 'ifndef']:
d[func] = d[func] = self.do_nothing
del __revision__
| mit | -274,357,927,382,766,880 | 32.596715 | 81 | 0.599316 | false |
petercable/mi-instrument | mi/core/test/test_persistent_store.py | 9 | 11196 | #!/usr/bin/env python
"""
@package mi.core.test.test_persistent_store
@file <git-workspace>/ooi/edex/com.raytheon.uf.ooi.plugin.instrumentagent/utility/edex_static/base/ooi/instruments/mi-instrument/mi/core/test/test_persistent_store.py
@author Johnathon Rusk
@brief Unit tests for PersistentStoreDict module
"""
# Note: Execute via, "nosetests -a UNIT -v mi/core/test/test_persistent_store.py"
__author__ = 'Johnathon Rusk'
__license__ = 'Apache 2.0'
from nose.plugins.attrib import attr
from mi.core.unit_test import MiUnitTest
import sys
from mi.core.persistent_store import PersistentStoreDict
@attr('UNIT', group='mi')
class TestPersistentStoreDict(MiUnitTest):
def setUp(self):
self.UNICODE_KEY = "UNICODE_KEY" # Test 'str' type key
self.UNICODE_VALUES = [u"this is a unicode string", u"this is another unicode string"]
self.INT_KEY = u"INT_KEY"
self.INT_VALUES = [1234, 5678]
self.LONG_KEY = "LONG_KEY" # Test 'str' type key
self.LONG_VALUES = [sys.maxint + 1, sys.maxint + 2]
self.FLOAT_KEY = u"FLOAT_KEY"
self.FLOAT_VALUES = [56.78, 12.34]
self.BOOL_KEY = "BOOL_KEY" # Test 'str' type key
self.BOOL_VALUES = [True, False]
self.DICT_KEY = u"DICT_KEY"
self.DICT_VALUES = [{u"KEY_1":1, u"KEY_2":2, u"KEY_3":3}, {u"KEY_4":4, u"KEY_5":5, u"KEY_6":6}]
self.LIST_KEY = "LIST_KEY" # Test 'str' type key
self.LIST_VALUES = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 0]]
self.persistentStoreDict = PersistentStoreDict("unit_test", "GI01SUMO-00001")
def tearDown(self):
self.persistentStoreDict.clear() # NOTE: This technically assumes the delete functionality works.
def helper_get(self, key, expectedValue, expectedValueType):
self.assertIn(type(key), [str, unicode])
value = self.persistentStoreDict[key]
self.assertIs(type(value), expectedValueType)
self.assertEqual(value, expectedValue)
def helper_set(self, key, value, valueType, shouldAddKey):
self.assertIn(type(key), [str, unicode])
self.assertIs(type(value), valueType)
self.assertIs(type(shouldAddKey), bool)
initialKeyCount = len(self.persistentStoreDict.keys())
self.persistentStoreDict[key] = value
self.assertEqual(len(self.persistentStoreDict.keys()), (initialKeyCount + 1) if shouldAddKey else initialKeyCount)
def helper_del(self, key):
self.assertIn(type(key), [str, unicode])
initialKeyCount = len(self.persistentStoreDict.keys())
del self.persistentStoreDict[key]
self.assertEqual(len(self.persistentStoreDict.keys()), initialKeyCount - 1)
def test_createRecords_success_unicode(self):
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode, True)
def test_createRecords_success_int(self):
self.helper_set(self.INT_KEY, self.INT_VALUES[0], int, True)
def test_createRecords_success_long(self):
self.helper_set(self.LONG_KEY, self.LONG_VALUES[0], long, True)
def test_createRecords_success_float(self):
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[0], float, True)
def test_createRecords_success_bool(self):
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[0], bool, True)
def test_createRecords_success_dict(self):
self.helper_set(self.DICT_KEY, self.DICT_VALUES[0], dict, True)
def test_createRecords_success_list(self):
self.helper_set(self.LIST_KEY, self.LIST_VALUES[0], list, True)
def test_createRecords_fail_badKeyType(self):
key = 0
value = u"this will fail"
self.assertNotIn(type(key), [str, unicode])
self.assertIn(type(value), [unicode, int, long, float, bool, dict, list])
with self.assertRaises(TypeError) as contextManager:
self.persistentStoreDict[key] = value
self.assertEqual(contextManager.exception.args[0], "Key must be of type 'str' or 'unicode'.")
def test_createRecords_fail_badItemType(self):
key = u"this will fail"
value = 2+3j
self.assertIn(type(key), [str, unicode])
self.assertNotIn(type(value), [unicode, int, long, float, bool, dict, list])
with self.assertRaises(TypeError) as contextManager:
self.persistentStoreDict[key] = value
self.assertEqual(contextManager.exception.args[0], "Value must be of type: 'unicode', 'int', 'long', 'float', 'bool', 'dict', or 'list'")
def test_createRecords_fail_badItemType_nested(self):
key = u"this will fail"
value = {u"KEY_1":[1, 2, 3], u"KEY_2":[1+2j, 3+4j, 5+6j]}
self.assertIn(type(key), [str, unicode])
self.assertIn(type(value), [unicode, int, long, float, bool, dict, list])
self.assertNotIn(type(value[u'KEY_2'][0]), [unicode, int, long, float, bool, dict, list])
with self.assertRaises(TypeError) as contextManager:
self.persistentStoreDict[key] = value
self.assertEqual(contextManager.exception.args[0], "Value must be of type: 'unicode', 'int', 'long', 'float', 'bool', 'dict', or 'list'")
def test_getRecords_success_unicode(self):
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode, True)
self.helper_get(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode)
def test_getRecords_success_int(self):
self.helper_set(self.INT_KEY, self.INT_VALUES[0], int, True)
self.helper_get(self.INT_KEY, self.INT_VALUES[0], int)
def test_getRecords_success_long(self):
self.helper_set(self.LONG_KEY, self.LONG_VALUES[0], long, True)
self.helper_get(self.LONG_KEY, self.LONG_VALUES[0], long)
def test_getRecords_success_float(self):
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[0], float, True)
self.helper_get(self.FLOAT_KEY, self.FLOAT_VALUES[0], float)
def test_getRecords_success_bool(self):
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[0], bool, True)
self.helper_get(self.BOOL_KEY, self.BOOL_VALUES[0], bool)
def test_getRecords_success_dict(self):
self.helper_set(self.DICT_KEY, self.DICT_VALUES[0], dict, True)
self.helper_get(self.DICT_KEY, self.DICT_VALUES[0], dict)
def test_getRecords_success_list(self):
self.helper_set(self.LIST_KEY, self.LIST_VALUES[0], list, True)
self.helper_get(self.LIST_KEY, self.LIST_VALUES[0], list)
def test_getRecords_fail_badKeyType(self):
key = 0
self.assertNotIn(type(key), [str, unicode])
with self.assertRaises(TypeError) as contextManager:
value = self.persistentStoreDict[key]
self.assertEqual(contextManager.exception.args[0], "Key must be of type 'str' or 'unicode'.")
def test_getRecords_fail_keyNotFound(self):
key = u"this will fail"
self.assertIn(type(key), [str, unicode])
with self.assertRaises(KeyError) as contextManager:
value = self.persistentStoreDict[key]
self.assertEqual(contextManager.exception.args[0], "No item found with key: '{0}'".format(key))
def test_updateRecords_success_unicode(self):
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode, True)
self.helper_get(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode)
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[1], unicode, False)
self.helper_get(self.UNICODE_KEY, self.UNICODE_VALUES[1], unicode)
def test_updateRecords_success_int(self):
self.helper_set(self.INT_KEY, self.INT_VALUES[0], int, True)
self.helper_get(self.INT_KEY, self.INT_VALUES[0], int)
self.helper_set(self.INT_KEY, self.INT_VALUES[1], int, False)
self.helper_get(self.INT_KEY, self.INT_VALUES[1], int)
def test_updateRecords_success_long(self):
self.helper_set(self.LONG_KEY, self.LONG_VALUES[0], long, True)
self.helper_get(self.LONG_KEY, self.LONG_VALUES[0], long)
self.helper_set(self.LONG_KEY, self.LONG_VALUES[1], long, False)
self.helper_get(self.LONG_KEY, self.LONG_VALUES[1], long)
def test_updateRecords_success_float(self):
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[0], float, True)
self.helper_get(self.FLOAT_KEY, self.FLOAT_VALUES[0], float)
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[1], float, False)
self.helper_get(self.FLOAT_KEY, self.FLOAT_VALUES[1], float)
def test_updateRecords_success_bool(self):
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[0], bool, True)
self.helper_get(self.BOOL_KEY, self.BOOL_VALUES[0], bool)
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[1], bool, False)
self.helper_get(self.BOOL_KEY, self.BOOL_VALUES[1], bool)
def test_updateRecords_success_dict(self):
self.helper_set(self.DICT_KEY, self.DICT_VALUES[0], dict, True)
self.helper_get(self.DICT_KEY, self.DICT_VALUES[0], dict)
self.helper_set(self.DICT_KEY, self.DICT_VALUES[1], dict, False)
self.helper_get(self.DICT_KEY, self.DICT_VALUES[1], dict)
def test_updateRecords_success_list(self):
self.helper_set(self.LIST_KEY, self.LIST_VALUES[0], list, True)
self.helper_get(self.LIST_KEY, self.LIST_VALUES[0], list)
self.helper_set(self.LIST_KEY, self.LIST_VALUES[1], list, False)
self.helper_get(self.LIST_KEY, self.LIST_VALUES[1], list)
def test_removeRecords_success_unicode(self):
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode, True)
self.helper_del(self.UNICODE_KEY)
def test_removeRecords_success_int(self):
self.helper_set(self.INT_KEY, self.INT_VALUES[0], int, True)
self.helper_del(self.INT_KEY)
def test_removeRecords_success_long(self):
self.helper_set(self.LONG_KEY, self.LONG_VALUES[0], long, True)
self.helper_del(self.LONG_KEY)
def test_removeRecords_success_float(self):
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[0], float, True)
self.helper_del(self.FLOAT_KEY)
def test_removeRecords_success_bool(self):
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[0], bool, True)
self.helper_del(self.BOOL_KEY)
def test_removeRecords_success_dict(self):
self.helper_set(self.DICT_KEY, self.DICT_VALUES[0], dict, True)
self.helper_del(self.DICT_KEY)
def test_removeRecords_success_list(self):
self.helper_set(self.LIST_KEY, self.LIST_VALUES[0], list, True)
self.helper_del(self.LIST_KEY)
def test_removeRecords_fail_badKeyType(self):
key = 0
self.assertNotIn(type(key), [str, unicode])
with self.assertRaises(TypeError) as contextManager:
del self.persistentStoreDict[key]
self.assertEqual(contextManager.exception.args[0], "Key must be of type 'str' or 'unicode'.")
def test_removeRecords_fail_keyNotFound(self):
key = u"this will fail"
self.assertIn(type(key), [str, unicode])
with self.assertRaises(KeyError) as contextManager:
del self.persistentStoreDict[key]
self.assertEqual(contextManager.exception.args[0], "No item found with key: '{0}'".format(key))
| bsd-2-clause | -8,588,406,243,775,132,000 | 46.240506 | 166 | 0.665952 | false |
edxnercel/edx-platform | common/lib/chem/chem/chemtools.py | 250 | 10721 | """This module originally includes functions for grading Vsepr problems.
Also, may be this module is the place for other chemistry-related grade functions. TODO: discuss it.
"""
import json
import unittest
import itertools
def vsepr_parse_user_answer(user_input):
"""
user_input is json generated by vsepr.js from dictionary.
There are must be only two keys in original user_input dictionary: "geometry" and "atoms".
Format: u'{"geometry": "AX3E0","atoms":{"c0": "B","p0": "F","p1": "B","p2": "F"}}'
Order of elements inside "atoms" subdict does not matters.
Return dict from parsed json.
"Atoms" subdict stores positions of atoms in molecule.
General types of positions:
c0 - central atom
p0..pN - peripheral atoms
a0..aN - axial atoms
e0..eN - equatorial atoms
Each position is dictionary key, i.e. user_input["atoms"]["c0"] is central atom, user_input["atoms"]["a0"] is one of axial atoms.
Special position only for AX6 (Octahedral) geometry:
e10, e12 - atom pairs opposite the central atom,
e20, e22 - atom pairs opposite the central atom,
e1 and e2 pairs lying crosswise in equatorial plane.
In user_input["atoms"] may be only 3 set of keys:
(c0,p0..pN),
(c0, a0..aN, e0..eN),
(c0, a0, a1, e10,e11,e20,e21) - if geometry is AX6.
"""
return json.loads(user_input)
def vsepr_build_correct_answer(geometry, atoms):
"""
geometry is string.
atoms is dict of atoms with proper positions.
Example:
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
returns a dictionary composed from input values:
{'geometry': geometry, 'atoms': atoms}
"""
return {'geometry': geometry, 'atoms': atoms}
def vsepr_grade(user_input, correct_answer, convert_to_peripheral=False):
"""
This function does comparison between user_input and correct_answer.
Comparison is successful if all steps are successful:
1) geometries are equal
2) central atoms (index in dictionary 'c0') are equal
3):
In next steps there is comparing of corresponding subsets of atom positions: equatorial (e0..eN), axial (a0..aN) or peripheral (p0..pN)
If convert_to_peripheral is True, then axial and equatorial positions are converted to peripheral.
This means that user_input from:
"atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}' after parsing to json
is converted to:
{"c0": "Br", "p0": "(ep)", "p1": "test", "p2": "H", "p3": "H", "p4": "(ep)", "p6": "(ep)"}
i.e. aX and eX -> pX
So if converted, p subsets are compared,
if not a and e subsets are compared
If all subsets are equal, grade succeeds.
There is also one special case for AX6 geometry.
In this case user_input["atoms"] contains special 3 symbol keys: e10, e12, e20, and e21.
Correct answer for this geometry can be of 3 types:
1) c0 and peripheral
2) c0 and axial and equatorial
3) c0 and axial and equatorial-subset-1 (e1X) and equatorial-subset-2 (e2X)
If correct answer is type 1 or 2, then user_input is converted from type 3 to type 2 (or to type 1 if convert_to_peripheral is True)
If correct_answer is type 3, then we done special case comparison. We have 3 sets of atoms positions both in user_input and correct_answer: axial, eq-1 and eq-2.
Answer will be correct if these sets are equals for one of permutations. For example, if :
user_axial = correct_eq-1
user_eq-1 = correct-axial
user_eq-2 = correct-eq-2
"""
if user_input['geometry'] != correct_answer['geometry']:
return False
if user_input['atoms']['c0'] != correct_answer['atoms']['c0']:
return False
if convert_to_peripheral:
# convert user_input from (a,e,e1,e2) to (p)
# correct_answer must be set in (p) using this flag
c0 = user_input['atoms'].pop('c0')
user_input['atoms'] = {'p' + str(i): v for i, v in enumerate(user_input['atoms'].values())}
user_input['atoms']['c0'] = c0
# special case for AX6
if 'e10' in correct_answer['atoms']: # need check e1x, e2x symmetry for AX6..
a_user = {}
a_correct = {}
for ea_position in ['a', 'e1', 'e2']: # collecting positions:
a_user[ea_position] = [v for k, v in user_input['atoms'].items() if k.startswith(ea_position)]
a_correct[ea_position] = [v for k, v in correct_answer['atoms'].items() if k.startswith(ea_position)]
correct = [sorted(a_correct['a'])] + [sorted(a_correct['e1'])] + [sorted(a_correct['e2'])]
for permutation in itertools.permutations(['a', 'e1', 'e2']):
if correct == [sorted(a_user[permutation[0]])] + [sorted(a_user[permutation[1]])] + [sorted(a_user[permutation[2]])]:
return True
return False
else: # no need to check e1x,e2x symmetry - convert them to ex
if 'e10' in user_input['atoms']: # e1x exists, it is AX6.. case
e_index = 0
for k, v in user_input['atoms'].items():
if len(k) == 3: # e1x
del user_input['atoms'][k]
user_input['atoms']['e' + str(e_index)] = v
e_index += 1
# common case
for ea_position in ['p', 'a', 'e']:
# collecting atoms:
a_user = [v for k, v in user_input['atoms'].items() if k.startswith(ea_position)]
a_correct = [v for k, v in correct_answer['atoms'].items() if k.startswith(ea_position)]
# print a_user, a_correct
if len(a_user) != len(a_correct):
return False
if sorted(a_user) != sorted(a_correct):
return False
return True
class Test_Grade(unittest.TestCase):
''' test grade function '''
def test_incorrect_geometry(self):
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX3E0","atoms":{"c0": "B","p0": "F","p1": "B","p2": "F"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX4E0","atoms":{"c0": "N","p0": "H","p1": "(ep)","p2": "H", "p3": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_ae(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "test", "a1": "(ep)", "e0": "H", "e1": "H", "e2": "(ep)", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "H","e20": "(ep)","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_ae_convert_to_p_but_input_not_in_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "test", "e0": "H", "e1": "H", "e2": "(ep)", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer, convert_to_peripheral=True))
def test_correct_answer_ae_convert_to_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "p0": "(ep)", "p1": "test", "p2": "H", "p3": "H", "p4": "(ep)", "p6": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer, convert_to_peripheral=True))
def test_correct_answer_e1e2_in_a(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "(ep)","a1": "(ep)","e10": "H","e11": "H","e20": "H","e21": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_e1e2_in_e1(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "(ep)","e11": "(ep)","e20": "H","e21": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_e1e2_in_e2(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "H","e11": "H","e20": "(ep)","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_incorrect_answer_e1e2(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "(ep)","e11": "H","e20": "H","e21": "(ep)"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def test_incorrect_c0(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "test", "e0": "H", "e1": "H", "e2": "H", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "H","a0": "test","a1": "(ep)","e0": "H","e1": "H","e2": "(ep)","e3": "H"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def suite():
testcases = [Test_Grade]
suites = []
for testcase in testcases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
return unittest.TestSuite(suites)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| agpl-3.0 | 7,097,200,553,950,299,000 | 51.043689 | 169 | 0.579797 | false |
vivekananda/fbeats | django/contrib/gis/db/backends/oracle/introspection.py | 388 | 1777 | import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute('SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper()))
row = cursor.fetchone()
except Exception, msg:
raise Exception('Could not find entry in USER_SDO_GEOM_METADATA corresponding to "%s"."%s"\n'
'Error message: %s.' % (table_name, geo_col, msg))
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause | -3,018,824,038,000,439,300 | 44.564103 | 131 | 0.596511 | false |
scarriere/CSGames-AI2015 | AIClient_Python/test/test_mathUtils.py | 1 | 1127 | from unittest import TestCase
from mathUtils.Vector2 import Vector2
from mathUtils.MathUtils import MathUtils
from mathUtils.Direction import Direction
class TestMathUtils(TestCase):
def test_getDirectionVector(self):
a = Vector2(1, 1)
b = Vector2(7, 7)
self.assertEqual(Vector2(6, 6), MathUtils.getDirectionVector(a, b))
def test_getDirectionFromPositions(self):
a = Vector2(0, 7)
b = Vector2(7, 7)
self.assertEqual(Direction.RIGHT, MathUtils.getDirectionFromPositions(a, b))
def test_getDirection(self):
self.assertEqual(Direction.RIGHT, MathUtils.getDirection(Vector2(2, 0)))
self.assertEqual(Direction.LEFT, MathUtils.getDirection(Vector2(-2, 0)))
self.assertEqual(Direction.UP, MathUtils.getDirection(Vector2(0, 2)))
self.assertEqual(Direction.DOWN, MathUtils.getDirection(Vector2(0, -2)))
self.assertRaises(Exception, MathUtils.getDirection, (Vector2(2, 2)))
self.assertRaises(Exception, MathUtils.getDirection, (Vector2(-2, -2)))
self.assertRaises(Exception, MathUtils.getDirection, (Vector2(0, 0)))
| mit | 2,933,137,239,843,517,000 | 44.08 | 84 | 0.710736 | false |
Zhongqilong/kbengine | kbe/src/lib/python/Lib/test/test_minidom.py | 60 | 64328 | # test for xml.dom.minidom
import pickle
from test.support import run_unittest, findfile
import unittest
import xml.dom.minidom
from xml.dom.minidom import parse, Node, Document, parseString
from xml.dom.minidom import getDOMImplementation
tstfile = findfile("test.xml", subdir="xmltestdata")
# The tests of DocumentType importing use these helpers to construct
# the documents to work with, since not all DOM builders actually
# create the DocumentType nodes.
def create_doc_without_doctype(doctype=None):
return getDOMImplementation().createDocument(None, "doc", doctype)
def create_nonempty_doctype():
doctype = getDOMImplementation().createDocumentType("doc", None, None)
doctype.entities._seq = []
doctype.notations._seq = []
notation = xml.dom.minidom.Notation("my-notation", None,
"http://xml.python.org/notations/my")
doctype.notations._seq.append(notation)
entity = xml.dom.minidom.Entity("my-entity", None,
"http://xml.python.org/entities/my",
"my-notation")
entity.version = "1.0"
entity.encoding = "utf-8"
entity.actualEncoding = "us-ascii"
doctype.entities._seq.append(entity)
return doctype
def create_doc_with_doctype():
doctype = create_nonempty_doctype()
doc = create_doc_without_doctype(doctype)
doctype.entities.item(0).ownerDocument = doc
doctype.notations.item(0).ownerDocument = doc
return doc
class MinidomTest(unittest.TestCase):
def confirm(self, test, testname = "Test"):
self.assertTrue(test, testname)
def checkWholeText(self, node, s):
t = node.wholeText
self.confirm(t == s, "looking for %r, found %r" % (s, t))
def testParseFromFile(self):
with open(tstfile) as file:
dom = parse(file)
dom.unlink()
self.confirm(isinstance(dom, Document))
def testGetElementsByTagName(self):
dom = parse(tstfile)
self.confirm(dom.getElementsByTagName("LI") == \
dom.documentElement.getElementsByTagName("LI"))
dom.unlink()
def testInsertBefore(self):
dom = parseString("<doc><foo/></doc>")
root = dom.documentElement
elem = root.childNodes[0]
nelem = dom.createElement("element")
root.insertBefore(nelem, elem)
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.childNodes[0] is nelem
and root.childNodes.item(0) is nelem
and root.childNodes[1] is elem
and root.childNodes.item(1) is elem
and root.firstChild is nelem
and root.lastChild is elem
and root.toxml() == "<doc><element/><foo/></doc>"
, "testInsertBefore -- node properly placed in tree")
nelem = dom.createElement("element")
root.insertBefore(nelem, None)
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3
and root.childNodes[1] is elem
and root.childNodes.item(1) is elem
and root.childNodes[2] is nelem
and root.childNodes.item(2) is nelem
and root.lastChild is nelem
and nelem.previousSibling is elem
and root.toxml() == "<doc><element/><foo/><element/></doc>"
, "testInsertBefore -- node properly placed in tree")
nelem2 = dom.createElement("bar")
root.insertBefore(nelem2, nelem)
self.confirm(len(root.childNodes) == 4
and root.childNodes.length == 4
and root.childNodes[2] is nelem2
and root.childNodes.item(2) is nelem2
and root.childNodes[3] is nelem
and root.childNodes.item(3) is nelem
and nelem2.nextSibling is nelem
and nelem.previousSibling is nelem2
and root.toxml() ==
"<doc><element/><foo/><bar/><element/></doc>"
, "testInsertBefore -- node properly placed in tree")
dom.unlink()
def _create_fragment_test_nodes(self):
dom = parseString("<doc/>")
orig = dom.createTextNode("original")
c1 = dom.createTextNode("foo")
c2 = dom.createTextNode("bar")
c3 = dom.createTextNode("bat")
dom.documentElement.appendChild(orig)
frag = dom.createDocumentFragment()
frag.appendChild(c1)
frag.appendChild(c2)
frag.appendChild(c3)
return dom, orig, c1, c2, c3, frag
def testInsertBeforeFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.insertBefore(frag, None)
self.confirm(tuple(dom.documentElement.childNodes) ==
(orig, c1, c2, c3),
"insertBefore(<fragment>, None)")
frag.unlink()
dom.unlink()
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.insertBefore(frag, orig)
self.confirm(tuple(dom.documentElement.childNodes) ==
(c1, c2, c3, orig),
"insertBefore(<fragment>, orig)")
frag.unlink()
dom.unlink()
def testAppendChild(self):
dom = parse(tstfile)
dom.documentElement.appendChild(dom.createComment("Hello"))
self.confirm(dom.documentElement.childNodes[-1].nodeName == "#comment")
self.confirm(dom.documentElement.childNodes[-1].data == "Hello")
dom.unlink()
def testAppendChildFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.appendChild(frag)
self.confirm(tuple(dom.documentElement.childNodes) ==
(orig, c1, c2, c3),
"appendChild(<fragment>)")
frag.unlink()
dom.unlink()
def testReplaceChildFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.replaceChild(frag, orig)
orig.unlink()
self.confirm(tuple(dom.documentElement.childNodes) == (c1, c2, c3),
"replaceChild(<fragment>)")
frag.unlink()
dom.unlink()
def testLegalChildren(self):
dom = Document()
elem = dom.createElement('element')
text = dom.createTextNode('text')
self.assertRaises(xml.dom.HierarchyRequestErr, dom.appendChild, text)
dom.appendChild(elem)
self.assertRaises(xml.dom.HierarchyRequestErr, dom.insertBefore, text,
elem)
self.assertRaises(xml.dom.HierarchyRequestErr, dom.replaceChild, text,
elem)
nodemap = elem.attributes
self.assertRaises(xml.dom.HierarchyRequestErr, nodemap.setNamedItem,
text)
self.assertRaises(xml.dom.HierarchyRequestErr, nodemap.setNamedItemNS,
text)
elem.appendChild(text)
dom.unlink()
def testNamedNodeMapSetItem(self):
dom = Document()
elem = dom.createElement('element')
attrs = elem.attributes
attrs["foo"] = "bar"
a = attrs.item(0)
self.confirm(a.ownerDocument is dom,
"NamedNodeMap.__setitem__() sets ownerDocument")
self.confirm(a.ownerElement is elem,
"NamedNodeMap.__setitem__() sets ownerElement")
self.confirm(a.value == "bar",
"NamedNodeMap.__setitem__() sets value")
self.confirm(a.nodeValue == "bar",
"NamedNodeMap.__setitem__() sets nodeValue")
elem.unlink()
dom.unlink()
def testNonZero(self):
dom = parse(tstfile)
self.confirm(dom)# should not be zero
dom.appendChild(dom.createComment("foo"))
self.confirm(not dom.childNodes[-1].childNodes)
dom.unlink()
def testUnlink(self):
dom = parse(tstfile)
self.assertTrue(dom.childNodes)
dom.unlink()
self.assertFalse(dom.childNodes)
def testContext(self):
with parse(tstfile) as dom:
self.assertTrue(dom.childNodes)
self.assertFalse(dom.childNodes)
def testElement(self):
dom = Document()
dom.appendChild(dom.createElement("abc"))
self.confirm(dom.documentElement)
dom.unlink()
def testAAA(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam2")
self.confirm(el.toxml() == '<abc spam="jam2"/>', "testAAA")
a = el.getAttributeNode("spam")
self.confirm(a.ownerDocument is dom,
"setAttribute() sets ownerDocument")
self.confirm(a.ownerElement is dom.documentElement,
"setAttribute() sets ownerElement")
dom.unlink()
def testAAB(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam")
el.setAttribute("spam", "jam2")
self.confirm(el.toxml() == '<abc spam="jam2"/>', "testAAB")
dom.unlink()
def testAddAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
child.setAttribute("def", "ghi")
self.confirm(child.getAttribute("def") == "ghi")
self.confirm(child.attributes["def"].value == "ghi")
child.setAttribute("jkl", "mno")
self.confirm(child.getAttribute("jkl") == "mno")
self.confirm(child.attributes["jkl"].value == "mno")
self.confirm(len(child.attributes) == 2)
child.setAttribute("def", "newval")
self.confirm(child.getAttribute("def") == "newval")
self.confirm(child.attributes["def"].value == "newval")
self.confirm(len(child.attributes) == 2)
dom.unlink()
def testDeleteAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
self.confirm(len(child.attributes) == 0)
child.setAttribute("def", "ghi")
self.confirm(len(child.attributes) == 1)
del child.attributes["def"]
self.confirm(len(child.attributes) == 0)
dom.unlink()
def testRemoveAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
child.setAttribute("def", "ghi")
self.confirm(len(child.attributes) == 1)
self.assertRaises(xml.dom.NotFoundErr, child.removeAttribute, "foo")
child.removeAttribute("def")
self.confirm(len(child.attributes) == 0)
dom.unlink()
def testRemoveAttrNS(self):
dom = Document()
child = dom.appendChild(
dom.createElementNS("http://www.python.org", "python:abc"))
child.setAttributeNS("http://www.w3.org", "xmlns:python",
"http://www.python.org")
child.setAttributeNS("http://www.python.org", "python:abcattr", "foo")
self.assertRaises(xml.dom.NotFoundErr, child.removeAttributeNS,
"foo", "http://www.python.org")
self.confirm(len(child.attributes) == 2)
child.removeAttributeNS("http://www.python.org", "abcattr")
self.confirm(len(child.attributes) == 1)
dom.unlink()
def testRemoveAttributeNode(self):
dom = Document()
child = dom.appendChild(dom.createElement("foo"))
child.setAttribute("spam", "jam")
self.confirm(len(child.attributes) == 1)
node = child.getAttributeNode("spam")
self.assertRaises(xml.dom.NotFoundErr, child.removeAttributeNode,
None)
child.removeAttributeNode(node)
self.confirm(len(child.attributes) == 0
and child.getAttributeNode("spam") is None)
dom2 = Document()
child2 = dom2.appendChild(dom2.createElement("foo"))
node2 = child2.getAttributeNode("spam")
self.assertRaises(xml.dom.NotFoundErr, child2.removeAttributeNode,
node2)
dom.unlink()
def testHasAttribute(self):
dom = Document()
child = dom.appendChild(dom.createElement("foo"))
child.setAttribute("spam", "jam")
self.confirm(child.hasAttribute("spam"))
def testChangeAttr(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam")
self.confirm(len(el.attributes) == 1)
el.setAttribute("spam", "bam")
# Set this attribute to be an ID and make sure that doesn't change
# when changing the value:
el.setIdAttribute("spam")
self.confirm(len(el.attributes) == 1
and el.attributes["spam"].value == "bam"
and el.attributes["spam"].nodeValue == "bam"
and el.getAttribute("spam") == "bam"
and el.getAttributeNode("spam").isId)
el.attributes["spam"] = "ham"
self.confirm(len(el.attributes) == 1
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam"].isId)
el.setAttribute("spam2", "bam")
self.confirm(len(el.attributes) == 2
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam2"].value == "bam"
and el.attributes["spam2"].nodeValue == "bam"
and el.getAttribute("spam2") == "bam")
el.attributes["spam2"] = "bam2"
self.confirm(len(el.attributes) == 2
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam2"].value == "bam2"
and el.attributes["spam2"].nodeValue == "bam2"
and el.getAttribute("spam2") == "bam2")
dom.unlink()
def testGetAttrList(self):
pass
def testGetAttrValues(self):
pass
def testGetAttrLength(self):
pass
def testGetAttribute(self):
dom = Document()
child = dom.appendChild(
dom.createElementNS("http://www.python.org", "python:abc"))
self.assertEqual(child.getAttribute('missing'), '')
def testGetAttributeNS(self):
dom = Document()
child = dom.appendChild(
dom.createElementNS("http://www.python.org", "python:abc"))
child.setAttributeNS("http://www.w3.org", "xmlns:python",
"http://www.python.org")
self.assertEqual(child.getAttributeNS("http://www.w3.org", "python"),
'http://www.python.org')
self.assertEqual(child.getAttributeNS("http://www.w3.org", "other"),
'')
child2 = child.appendChild(dom.createElement('abc'))
self.assertEqual(child2.getAttributeNS("http://www.python.org", "missing"),
'')
def testGetAttributeNode(self): pass
def testGetElementsByTagNameNS(self):
d="""<foo xmlns:minidom='http://pyxml.sf.net/minidom'>
<minidom:myelem/>
</foo>"""
dom = parseString(d)
elems = dom.getElementsByTagNameNS("http://pyxml.sf.net/minidom",
"myelem")
self.confirm(len(elems) == 1
and elems[0].namespaceURI == "http://pyxml.sf.net/minidom"
and elems[0].localName == "myelem"
and elems[0].prefix == "minidom"
and elems[0].tagName == "minidom:myelem"
and elems[0].nodeName == "minidom:myelem")
dom.unlink()
def get_empty_nodelist_from_elements_by_tagName_ns_helper(self, doc, nsuri,
lname):
nodelist = doc.getElementsByTagNameNS(nsuri, lname)
self.confirm(len(nodelist) == 0)
def testGetEmptyNodeListFromElementsByTagNameNS(self):
doc = parseString('<doc/>')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, 'http://xml.python.org/namespaces/a', 'localname')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, '*', 'splat')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, 'http://xml.python.org/namespaces/a', '*')
doc = parseString('<doc xmlns="http://xml.python.org/splat"><e/></doc>')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "http://xml.python.org/splat", "not-there")
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "*", "not-there")
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "http://somewhere.else.net/not-there", "e")
def testElementReprAndStr(self):
dom = Document()
el = dom.appendChild(dom.createElement("abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
dom.unlink()
def testElementReprAndStrUnicode(self):
dom = Document()
el = dom.appendChild(dom.createElement("abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
dom.unlink()
def testElementReprAndStrUnicodeNS(self):
dom = Document()
el = dom.appendChild(
dom.createElementNS("http://www.slashdot.org", "slash:abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
self.confirm("slash:abc" in string1)
dom.unlink()
def testAttributeRepr(self):
dom = Document()
el = dom.appendChild(dom.createElement("abc"))
node = el.setAttribute("abc", "def")
self.confirm(str(node) == repr(node))
dom.unlink()
def testTextNodeRepr(self): pass
def testWriteXML(self):
str = '<?xml version="1.0" ?><a b="c"/>'
dom = parseString(str)
domstr = dom.toxml()
dom.unlink()
self.confirm(str == domstr)
def testAltNewline(self):
str = '<?xml version="1.0" ?>\n<a b="c"/>\n'
dom = parseString(str)
domstr = dom.toprettyxml(newl="\r\n")
dom.unlink()
self.confirm(domstr == str.replace("\n", "\r\n"))
def test_toprettyxml_with_text_nodes(self):
# see issue #4147, text nodes are not indented
decl = '<?xml version="1.0" ?>\n'
self.assertEqual(parseString('<B>A</B>').toprettyxml(),
decl + '<B>A</B>\n')
self.assertEqual(parseString('<C>A<B>A</B></C>').toprettyxml(),
decl + '<C>\n\tA\n\t<B>A</B>\n</C>\n')
self.assertEqual(parseString('<C><B>A</B>A</C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\tA\n</C>\n')
self.assertEqual(parseString('<C><B>A</B><B>A</B></C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\t<B>A</B>\n</C>\n')
self.assertEqual(parseString('<C><B>A</B>A<B>A</B></C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\tA\n\t<B>A</B>\n</C>\n')
def test_toprettyxml_with_adjacent_text_nodes(self):
# see issue #4147, adjacent text nodes are indented normally
dom = Document()
elem = dom.createElement('elem')
elem.appendChild(dom.createTextNode('TEXT'))
elem.appendChild(dom.createTextNode('TEXT'))
dom.appendChild(elem)
decl = '<?xml version="1.0" ?>\n'
self.assertEqual(dom.toprettyxml(),
decl + '<elem>\n\tTEXT\n\tTEXT\n</elem>\n')
def test_toprettyxml_preserves_content_of_text_node(self):
# see issue #4147
for str in ('<B>A</B>', '<A><B>C</B></A>'):
dom = parseString(str)
dom2 = parseString(dom.toprettyxml())
self.assertEqual(
dom.getElementsByTagName('B')[0].childNodes[0].toxml(),
dom2.getElementsByTagName('B')[0].childNodes[0].toxml())
def testProcessingInstruction(self):
dom = parseString('<e><?mypi \t\n data \t\n ?></e>')
pi = dom.documentElement.firstChild
self.confirm(pi.target == "mypi"
and pi.data == "data \t\n "
and pi.nodeName == "mypi"
and pi.nodeType == Node.PROCESSING_INSTRUCTION_NODE
and pi.attributes is None
and not pi.hasChildNodes()
and len(pi.childNodes) == 0
and pi.firstChild is None
and pi.lastChild is None
and pi.localName is None
and pi.namespaceURI == xml.dom.EMPTY_NAMESPACE)
def testProcessingInstructionRepr(self): pass
def testTextRepr(self): pass
def testWriteText(self): pass
def testDocumentElement(self): pass
def testTooManyDocumentElements(self):
doc = parseString("<doc/>")
elem = doc.createElement("extra")
# Should raise an exception when adding an extra document element.
self.assertRaises(xml.dom.HierarchyRequestErr, doc.appendChild, elem)
elem.unlink()
doc.unlink()
def testCreateElementNS(self): pass
def testCreateAttributeNS(self): pass
def testParse(self): pass
def testParseString(self): pass
def testComment(self): pass
def testAttrListItem(self): pass
def testAttrListItems(self): pass
def testAttrListItemNS(self): pass
def testAttrListKeys(self): pass
def testAttrListKeysNS(self): pass
def testRemoveNamedItem(self):
doc = parseString("<doc a=''/>")
e = doc.documentElement
attrs = e.attributes
a1 = e.getAttributeNode("a")
a2 = attrs.removeNamedItem("a")
self.confirm(a1.isSameNode(a2))
self.assertRaises(xml.dom.NotFoundErr, attrs.removeNamedItem, "a")
def testRemoveNamedItemNS(self):
doc = parseString("<doc xmlns:a='http://xml.python.org/' a:b=''/>")
e = doc.documentElement
attrs = e.attributes
a1 = e.getAttributeNodeNS("http://xml.python.org/", "b")
a2 = attrs.removeNamedItemNS("http://xml.python.org/", "b")
self.confirm(a1.isSameNode(a2))
self.assertRaises(xml.dom.NotFoundErr, attrs.removeNamedItemNS,
"http://xml.python.org/", "b")
def testAttrListValues(self): pass
def testAttrListLength(self): pass
def testAttrList__getitem__(self): pass
def testAttrList__setitem__(self): pass
def testSetAttrValueandNodeValue(self): pass
def testParseElement(self): pass
def testParseAttributes(self): pass
def testParseElementNamespaces(self): pass
def testParseAttributeNamespaces(self): pass
def testParseProcessingInstructions(self): pass
def testChildNodes(self): pass
def testFirstChild(self): pass
def testHasChildNodes(self):
dom = parseString("<doc><foo/></doc>")
doc = dom.documentElement
self.assertTrue(doc.hasChildNodes())
dom2 = parseString("<doc/>")
doc2 = dom2.documentElement
self.assertFalse(doc2.hasChildNodes())
def _testCloneElementCopiesAttributes(self, e1, e2, test):
attrs1 = e1.attributes
attrs2 = e2.attributes
keys1 = list(attrs1.keys())
keys2 = list(attrs2.keys())
keys1.sort()
keys2.sort()
self.confirm(keys1 == keys2, "clone of element has same attribute keys")
for i in range(len(keys1)):
a1 = attrs1.item(i)
a2 = attrs2.item(i)
self.confirm(a1 is not a2
and a1.value == a2.value
and a1.nodeValue == a2.nodeValue
and a1.namespaceURI == a2.namespaceURI
and a1.localName == a2.localName
, "clone of attribute node has proper attribute values")
self.confirm(a2.ownerElement is e2,
"clone of attribute node correctly owned")
def _setupCloneElement(self, deep):
dom = parseString("<doc attr='value'><foo/></doc>")
root = dom.documentElement
clone = root.cloneNode(deep)
self._testCloneElementCopiesAttributes(
root, clone, "testCloneElement" + (deep and "Deep" or "Shallow"))
# mutilate the original so shared data is detected
root.tagName = root.nodeName = "MODIFIED"
root.setAttribute("attr", "NEW VALUE")
root.setAttribute("added", "VALUE")
return dom, clone
def testCloneElementShallow(self):
dom, clone = self._setupCloneElement(0)
self.confirm(len(clone.childNodes) == 0
and clone.childNodes.length == 0
and clone.parentNode is None
and clone.toxml() == '<doc attr="value"/>'
, "testCloneElementShallow")
dom.unlink()
def testCloneElementDeep(self):
dom, clone = self._setupCloneElement(1)
self.confirm(len(clone.childNodes) == 1
and clone.childNodes.length == 1
and clone.parentNode is None
and clone.toxml() == '<doc attr="value"><foo/></doc>'
, "testCloneElementDeep")
dom.unlink()
def testCloneDocumentShallow(self):
doc = parseString("<?xml version='1.0'?>\n"
"<!-- comment -->"
"<!DOCTYPE doc [\n"
"<!NOTATION notation SYSTEM 'http://xml.python.org/'>\n"
"]>\n"
"<doc attr='value'/>")
doc2 = doc.cloneNode(0)
self.confirm(doc2 is None,
"testCloneDocumentShallow:"
" shallow cloning of documents makes no sense!")
def testCloneDocumentDeep(self):
doc = parseString("<?xml version='1.0'?>\n"
"<!-- comment -->"
"<!DOCTYPE doc [\n"
"<!NOTATION notation SYSTEM 'http://xml.python.org/'>\n"
"]>\n"
"<doc attr='value'/>")
doc2 = doc.cloneNode(1)
self.confirm(not (doc.isSameNode(doc2) or doc2.isSameNode(doc)),
"testCloneDocumentDeep: document objects not distinct")
self.confirm(len(doc.childNodes) == len(doc2.childNodes),
"testCloneDocumentDeep: wrong number of Document children")
self.confirm(doc2.documentElement.nodeType == Node.ELEMENT_NODE,
"testCloneDocumentDeep: documentElement not an ELEMENT_NODE")
self.confirm(doc2.documentElement.ownerDocument.isSameNode(doc2),
"testCloneDocumentDeep: documentElement owner is not new document")
self.confirm(not doc.documentElement.isSameNode(doc2.documentElement),
"testCloneDocumentDeep: documentElement should not be shared")
if doc.doctype is not None:
# check the doctype iff the original DOM maintained it
self.confirm(doc2.doctype.nodeType == Node.DOCUMENT_TYPE_NODE,
"testCloneDocumentDeep: doctype not a DOCUMENT_TYPE_NODE")
self.confirm(doc2.doctype.ownerDocument.isSameNode(doc2))
self.confirm(not doc.doctype.isSameNode(doc2.doctype))
def testCloneDocumentTypeDeepOk(self):
doctype = create_nonempty_doctype()
clone = doctype.cloneNode(1)
self.confirm(clone is not None
and clone.nodeName == doctype.nodeName
and clone.name == doctype.name
and clone.publicId == doctype.publicId
and clone.systemId == doctype.systemId
and len(clone.entities) == len(doctype.entities)
and clone.entities.item(len(clone.entities)) is None
and len(clone.notations) == len(doctype.notations)
and clone.notations.item(len(clone.notations)) is None
and len(clone.childNodes) == 0)
for i in range(len(doctype.entities)):
se = doctype.entities.item(i)
ce = clone.entities.item(i)
self.confirm((not se.isSameNode(ce))
and (not ce.isSameNode(se))
and ce.nodeName == se.nodeName
and ce.notationName == se.notationName
and ce.publicId == se.publicId
and ce.systemId == se.systemId
and ce.encoding == se.encoding
and ce.actualEncoding == se.actualEncoding
and ce.version == se.version)
for i in range(len(doctype.notations)):
sn = doctype.notations.item(i)
cn = clone.notations.item(i)
self.confirm((not sn.isSameNode(cn))
and (not cn.isSameNode(sn))
and cn.nodeName == sn.nodeName
and cn.publicId == sn.publicId
and cn.systemId == sn.systemId)
def testCloneDocumentTypeDeepNotOk(self):
doc = create_doc_with_doctype()
clone = doc.doctype.cloneNode(1)
self.confirm(clone is None, "testCloneDocumentTypeDeepNotOk")
def testCloneDocumentTypeShallowOk(self):
doctype = create_nonempty_doctype()
clone = doctype.cloneNode(0)
self.confirm(clone is not None
and clone.nodeName == doctype.nodeName
and clone.name == doctype.name
and clone.publicId == doctype.publicId
and clone.systemId == doctype.systemId
and len(clone.entities) == 0
and clone.entities.item(0) is None
and len(clone.notations) == 0
and clone.notations.item(0) is None
and len(clone.childNodes) == 0)
def testCloneDocumentTypeShallowNotOk(self):
doc = create_doc_with_doctype()
clone = doc.doctype.cloneNode(0)
self.confirm(clone is None, "testCloneDocumentTypeShallowNotOk")
def check_import_document(self, deep, testName):
doc1 = parseString("<doc/>")
doc2 = parseString("<doc/>")
self.assertRaises(xml.dom.NotSupportedErr, doc1.importNode, doc2, deep)
def testImportDocumentShallow(self):
self.check_import_document(0, "testImportDocumentShallow")
def testImportDocumentDeep(self):
self.check_import_document(1, "testImportDocumentDeep")
def testImportDocumentTypeShallow(self):
src = create_doc_with_doctype()
target = create_doc_without_doctype()
self.assertRaises(xml.dom.NotSupportedErr, target.importNode,
src.doctype, 0)
def testImportDocumentTypeDeep(self):
src = create_doc_with_doctype()
target = create_doc_without_doctype()
self.assertRaises(xml.dom.NotSupportedErr, target.importNode,
src.doctype, 1)
# Testing attribute clones uses a helper, and should always be deep,
# even if the argument to cloneNode is false.
def check_clone_attribute(self, deep, testName):
doc = parseString("<doc attr='value'/>")
attr = doc.documentElement.getAttributeNode("attr")
self.assertNotEqual(attr, None)
clone = attr.cloneNode(deep)
self.confirm(not clone.isSameNode(attr))
self.confirm(not attr.isSameNode(clone))
self.confirm(clone.ownerElement is None,
testName + ": ownerElement should be None")
self.confirm(clone.ownerDocument.isSameNode(attr.ownerDocument),
testName + ": ownerDocument does not match")
self.confirm(clone.specified,
testName + ": cloned attribute must have specified == True")
def testCloneAttributeShallow(self):
self.check_clone_attribute(0, "testCloneAttributeShallow")
def testCloneAttributeDeep(self):
self.check_clone_attribute(1, "testCloneAttributeDeep")
def check_clone_pi(self, deep, testName):
doc = parseString("<?target data?><doc/>")
pi = doc.firstChild
self.assertEqual(pi.nodeType, Node.PROCESSING_INSTRUCTION_NODE)
clone = pi.cloneNode(deep)
self.confirm(clone.target == pi.target
and clone.data == pi.data)
def testClonePIShallow(self):
self.check_clone_pi(0, "testClonePIShallow")
def testClonePIDeep(self):
self.check_clone_pi(1, "testClonePIDeep")
def testNormalize(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode("second"))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalize -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild is root.lastChild
and root.firstChild.data == "firstsecond"
, "testNormalize -- result")
doc.unlink()
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
doc.normalize()
self.confirm(len(root.childNodes) == 0
and root.childNodes.length == 0,
"testNormalize -- single empty node removed")
doc.unlink()
def testNormalizeCombineAndNextSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode("second"))
root.appendChild(doc.createElement("i"))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3,
"testNormalizeCombineAndNextSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.firstChild.data == "firstsecond"
and root.firstChild is not root.lastChild
and root.firstChild.nextSibling is root.lastChild
and root.firstChild.previousSibling is None
and root.lastChild.previousSibling is root.firstChild
and root.lastChild.nextSibling is None
, "testNormalizeCombinedAndNextSibling -- result")
doc.unlink()
def testNormalizeDeleteWithPrevSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalizeDeleteWithPrevSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild.data == "first"
and root.firstChild is root.lastChild
and root.firstChild.nextSibling is None
and root.firstChild.previousSibling is None
, "testNormalizeDeleteWithPrevSibling -- result")
doc.unlink()
def testNormalizeDeleteWithNextSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("second"))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalizeDeleteWithNextSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild.data == "second"
and root.firstChild is root.lastChild
and root.firstChild.nextSibling is None
and root.firstChild.previousSibling is None
, "testNormalizeDeleteWithNextSibling -- result")
doc.unlink()
def testNormalizeDeleteWithTwoNonTextSiblings(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createElement("i"))
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createElement("i"))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3,
"testNormalizeDeleteWithTwoSiblings -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.firstChild is not root.lastChild
and root.firstChild.nextSibling is root.lastChild
and root.firstChild.previousSibling is None
and root.lastChild.previousSibling is root.firstChild
and root.lastChild.nextSibling is None
, "testNormalizeDeleteWithTwoSiblings -- result")
doc.unlink()
def testNormalizeDeleteAndCombine(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("second"))
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("fourth"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 5
and root.childNodes.length == 5,
"testNormalizeDeleteAndCombine -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild is root.lastChild
and root.firstChild.data == "secondfourth"
and root.firstChild.previousSibling is None
and root.firstChild.nextSibling is None
, "testNormalizeDeleteAndCombine -- result")
doc.unlink()
def testNormalizeRecursion(self):
doc = parseString("<doc>"
"<o>"
"<i/>"
"t"
#
#x
"</o>"
"<o>"
"<o>"
"t2"
#x2
"</o>"
"t3"
#x3
"</o>"
#
"</doc>")
root = doc.documentElement
root.childNodes[0].appendChild(doc.createTextNode(""))
root.childNodes[0].appendChild(doc.createTextNode("x"))
root.childNodes[1].childNodes[0].appendChild(doc.createTextNode("x2"))
root.childNodes[1].appendChild(doc.createTextNode("x3"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3
and len(root.childNodes[0].childNodes) == 4
and root.childNodes[0].childNodes.length == 4
and len(root.childNodes[1].childNodes) == 3
and root.childNodes[1].childNodes.length == 3
and len(root.childNodes[1].childNodes[0].childNodes) == 2
and root.childNodes[1].childNodes[0].childNodes.length == 2
, "testNormalize2 -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and len(root.childNodes[0].childNodes) == 2
and root.childNodes[0].childNodes.length == 2
and len(root.childNodes[1].childNodes) == 2
and root.childNodes[1].childNodes.length == 2
and len(root.childNodes[1].childNodes[0].childNodes) == 1
and root.childNodes[1].childNodes[0].childNodes.length == 1
, "testNormalize2 -- childNodes lengths")
self.confirm(root.childNodes[0].childNodes[1].data == "tx"
and root.childNodes[1].childNodes[0].childNodes[0].data == "t2x2"
and root.childNodes[1].childNodes[1].data == "t3x3"
, "testNormalize2 -- joined text fields")
self.confirm(root.childNodes[0].childNodes[1].nextSibling is None
and root.childNodes[0].childNodes[1].previousSibling
is root.childNodes[0].childNodes[0]
and root.childNodes[0].childNodes[0].previousSibling is None
and root.childNodes[0].childNodes[0].nextSibling
is root.childNodes[0].childNodes[1]
and root.childNodes[1].childNodes[1].nextSibling is None
and root.childNodes[1].childNodes[1].previousSibling
is root.childNodes[1].childNodes[0]
and root.childNodes[1].childNodes[0].previousSibling is None
and root.childNodes[1].childNodes[0].nextSibling
is root.childNodes[1].childNodes[1]
, "testNormalize2 -- sibling pointers")
doc.unlink()
def testBug0777884(self):
doc = parseString("<o>text</o>")
text = doc.documentElement.childNodes[0]
self.assertEqual(text.nodeType, Node.TEXT_NODE)
# Should run quietly, doing nothing.
text.normalize()
doc.unlink()
def testBug1433694(self):
doc = parseString("<o><i/>t</o>")
node = doc.documentElement
node.childNodes[1].nodeValue = ""
node.normalize()
self.confirm(node.childNodes[-1].nextSibling is None,
"Final child's .nextSibling should be None")
def testSiblings(self):
doc = parseString("<doc><?pi?>text?<elm/></doc>")
root = doc.documentElement
(pi, text, elm) = root.childNodes
self.confirm(pi.nextSibling is text and
pi.previousSibling is None and
text.nextSibling is elm and
text.previousSibling is pi and
elm.nextSibling is None and
elm.previousSibling is text, "testSiblings")
doc.unlink()
def testParents(self):
doc = parseString(
"<doc><elm1><elm2/><elm2><elm3/></elm2></elm1></doc>")
root = doc.documentElement
elm1 = root.childNodes[0]
(elm2a, elm2b) = elm1.childNodes
elm3 = elm2b.childNodes[0]
self.confirm(root.parentNode is doc and
elm1.parentNode is root and
elm2a.parentNode is elm1 and
elm2b.parentNode is elm1 and
elm3.parentNode is elm2b, "testParents")
doc.unlink()
def testNodeListItem(self):
doc = parseString("<doc><e/><e/></doc>")
children = doc.childNodes
docelem = children[0]
self.confirm(children[0] is children.item(0)
and children.item(1) is None
and docelem.childNodes.item(0) is docelem.childNodes[0]
and docelem.childNodes.item(1) is docelem.childNodes[1]
and docelem.childNodes.item(0).childNodes.item(0) is None,
"test NodeList.item()")
doc.unlink()
def testEncodings(self):
doc = parseString('<foo>€</foo>')
self.assertEqual(doc.toxml(),
'<?xml version="1.0" ?><foo>\u20ac</foo>')
self.assertEqual(doc.toxml('utf-8'),
b'<?xml version="1.0" encoding="utf-8"?><foo>\xe2\x82\xac</foo>')
self.assertEqual(doc.toxml('iso-8859-15'),
b'<?xml version="1.0" encoding="iso-8859-15"?><foo>\xa4</foo>')
self.assertEqual(doc.toxml('us-ascii'),
b'<?xml version="1.0" encoding="us-ascii"?><foo>€</foo>')
self.assertEqual(doc.toxml('utf-16'),
'<?xml version="1.0" encoding="utf-16"?>'
'<foo>\u20ac</foo>'.encode('utf-16'))
# Verify that character decoding errors raise exceptions instead
# of crashing
self.assertRaises(UnicodeDecodeError, parseString,
b'<fran\xe7ais>Comment \xe7a va ? Tr\xe8s bien ?</fran\xe7ais>')
doc.unlink()
class UserDataHandler:
called = 0
def handle(self, operation, key, data, src, dst):
dst.setUserData(key, data + 1, self)
src.setUserData(key, None, None)
self.called = 1
def testUserData(self):
dom = Document()
n = dom.createElement('e')
self.confirm(n.getUserData("foo") is None)
n.setUserData("foo", None, None)
self.confirm(n.getUserData("foo") is None)
n.setUserData("foo", 12, 12)
n.setUserData("bar", 13, 13)
self.confirm(n.getUserData("foo") == 12)
self.confirm(n.getUserData("bar") == 13)
n.setUserData("foo", None, None)
self.confirm(n.getUserData("foo") is None)
self.confirm(n.getUserData("bar") == 13)
handler = self.UserDataHandler()
n.setUserData("bar", 12, handler)
c = n.cloneNode(1)
self.confirm(handler.called
and n.getUserData("bar") is None
and c.getUserData("bar") == 13)
n.unlink()
c.unlink()
dom.unlink()
def checkRenameNodeSharedConstraints(self, doc, node):
# Make sure illegal NS usage is detected:
self.assertRaises(xml.dom.NamespaceErr, doc.renameNode, node,
"http://xml.python.org/ns", "xmlns:foo")
doc2 = parseString("<doc/>")
self.assertRaises(xml.dom.WrongDocumentErr, doc2.renameNode, node,
xml.dom.EMPTY_NAMESPACE, "foo")
def testRenameAttribute(self):
doc = parseString("<doc a='v'/>")
elem = doc.documentElement
attrmap = elem.attributes
attr = elem.attributes['a']
# Simple renaming
attr = doc.renameNode(attr, xml.dom.EMPTY_NAMESPACE, "b")
self.confirm(attr.name == "b"
and attr.nodeName == "b"
and attr.localName is None
and attr.namespaceURI == xml.dom.EMPTY_NAMESPACE
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b").isSameNode(attr)
and attrmap["b"].isSameNode(attr)
and attr.ownerDocument.isSameNode(doc)
and attr.ownerElement.isSameNode(elem))
# Rename to have a namespace, no prefix
attr = doc.renameNode(attr, "http://xml.python.org/ns", "c")
self.confirm(attr.name == "c"
and attr.nodeName == "c"
and attr.localName == "c"
and attr.namespaceURI == "http://xml.python.org/ns"
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c").isSameNode(attr)
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c").isSameNode(attr)
and attrmap["c"].isSameNode(attr)
and attrmap[("http://xml.python.org/ns", "c")].isSameNode(attr))
# Rename to have a namespace, with prefix
attr = doc.renameNode(attr, "http://xml.python.org/ns2", "p:d")
self.confirm(attr.name == "p:d"
and attr.nodeName == "p:d"
and attr.localName == "d"
and attr.namespaceURI == "http://xml.python.org/ns2"
and attr.prefix == "p"
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c") is None
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c") is None
and elem.getAttributeNode("p:d").isSameNode(attr)
and elem.getAttributeNodeNS(
"http://xml.python.org/ns2", "d").isSameNode(attr)
and attrmap["p:d"].isSameNode(attr)
and attrmap[("http://xml.python.org/ns2", "d")].isSameNode(attr))
# Rename back to a simple non-NS node
attr = doc.renameNode(attr, xml.dom.EMPTY_NAMESPACE, "e")
self.confirm(attr.name == "e"
and attr.nodeName == "e"
and attr.localName is None
and attr.namespaceURI == xml.dom.EMPTY_NAMESPACE
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c") is None
and elem.getAttributeNode("p:d") is None
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c") is None
and elem.getAttributeNode("e").isSameNode(attr)
and attrmap["e"].isSameNode(attr))
self.assertRaises(xml.dom.NamespaceErr, doc.renameNode, attr,
"http://xml.python.org/ns", "xmlns")
self.checkRenameNodeSharedConstraints(doc, attr)
doc.unlink()
def testRenameElement(self):
doc = parseString("<doc/>")
elem = doc.documentElement
# Simple renaming
elem = doc.renameNode(elem, xml.dom.EMPTY_NAMESPACE, "a")
self.confirm(elem.tagName == "a"
and elem.nodeName == "a"
and elem.localName is None
and elem.namespaceURI == xml.dom.EMPTY_NAMESPACE
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
# Rename to have a namespace, no prefix
elem = doc.renameNode(elem, "http://xml.python.org/ns", "b")
self.confirm(elem.tagName == "b"
and elem.nodeName == "b"
and elem.localName == "b"
and elem.namespaceURI == "http://xml.python.org/ns"
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
# Rename to have a namespace, with prefix
elem = doc.renameNode(elem, "http://xml.python.org/ns2", "p:c")
self.confirm(elem.tagName == "p:c"
and elem.nodeName == "p:c"
and elem.localName == "c"
and elem.namespaceURI == "http://xml.python.org/ns2"
and elem.prefix == "p"
and elem.ownerDocument.isSameNode(doc))
# Rename back to a simple non-NS node
elem = doc.renameNode(elem, xml.dom.EMPTY_NAMESPACE, "d")
self.confirm(elem.tagName == "d"
and elem.nodeName == "d"
and elem.localName is None
and elem.namespaceURI == xml.dom.EMPTY_NAMESPACE
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
self.checkRenameNodeSharedConstraints(doc, elem)
doc.unlink()
def testRenameOther(self):
# We have to create a comment node explicitly since not all DOM
# builders used with minidom add comments to the DOM.
doc = xml.dom.minidom.getDOMImplementation().createDocument(
xml.dom.EMPTY_NAMESPACE, "e", None)
node = doc.createComment("comment")
self.assertRaises(xml.dom.NotSupportedErr, doc.renameNode, node,
xml.dom.EMPTY_NAMESPACE, "foo")
doc.unlink()
def testWholeText(self):
doc = parseString("<doc>a</doc>")
elem = doc.documentElement
text = elem.childNodes[0]
self.assertEqual(text.nodeType, Node.TEXT_NODE)
self.checkWholeText(text, "a")
elem.appendChild(doc.createTextNode("b"))
self.checkWholeText(text, "ab")
elem.insertBefore(doc.createCDATASection("c"), text)
self.checkWholeText(text, "cab")
# make sure we don't cross other nodes
splitter = doc.createComment("comment")
elem.appendChild(splitter)
text2 = doc.createTextNode("d")
elem.appendChild(text2)
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
x = doc.createElement("x")
elem.replaceChild(x, splitter)
splitter = x
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
x = doc.createProcessingInstruction("y", "z")
elem.replaceChild(x, splitter)
splitter = x
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
elem.removeChild(splitter)
self.checkWholeText(text, "cabd")
self.checkWholeText(text2, "cabd")
def testPatch1094164(self):
doc = parseString("<doc><e/></doc>")
elem = doc.documentElement
e = elem.firstChild
self.confirm(e.parentNode is elem, "Before replaceChild()")
# Check that replacing a child with itself leaves the tree unchanged
elem.replaceChild(e, e)
self.confirm(e.parentNode is elem, "After replaceChild()")
def testReplaceWholeText(self):
def setup():
doc = parseString("<doc>a<e/>d</doc>")
elem = doc.documentElement
text1 = elem.firstChild
text2 = elem.lastChild
splitter = text1.nextSibling
elem.insertBefore(doc.createTextNode("b"), splitter)
elem.insertBefore(doc.createCDATASection("c"), text1)
return doc, elem, text1, splitter, text2
doc, elem, text1, splitter, text2 = setup()
text = text1.replaceWholeText("new content")
self.checkWholeText(text, "new content")
self.checkWholeText(text2, "d")
self.confirm(len(elem.childNodes) == 3)
doc, elem, text1, splitter, text2 = setup()
text = text2.replaceWholeText("new content")
self.checkWholeText(text, "new content")
self.checkWholeText(text1, "cab")
self.confirm(len(elem.childNodes) == 5)
doc, elem, text1, splitter, text2 = setup()
text = text1.replaceWholeText("")
self.checkWholeText(text2, "d")
self.confirm(text is None
and len(elem.childNodes) == 2)
def testSchemaType(self):
doc = parseString(
"<!DOCTYPE doc [\n"
" <!ENTITY e1 SYSTEM 'http://xml.python.org/e1'>\n"
" <!ENTITY e2 SYSTEM 'http://xml.python.org/e2'>\n"
" <!ATTLIST doc id ID #IMPLIED \n"
" ref IDREF #IMPLIED \n"
" refs IDREFS #IMPLIED \n"
" enum (a|b) #IMPLIED \n"
" ent ENTITY #IMPLIED \n"
" ents ENTITIES #IMPLIED \n"
" nm NMTOKEN #IMPLIED \n"
" nms NMTOKENS #IMPLIED \n"
" text CDATA #IMPLIED \n"
" >\n"
"]><doc id='name' notid='name' text='splat!' enum='b'"
" ref='name' refs='name name' ent='e1' ents='e1 e2'"
" nm='123' nms='123 abc' />")
elem = doc.documentElement
# We don't want to rely on any specific loader at this point, so
# just make sure we can get to all the names, and that the
# DTD-based namespace is right. The names can vary by loader
# since each supports a different level of DTD information.
t = elem.schemaType
self.confirm(t.name is None
and t.namespace == xml.dom.EMPTY_NAMESPACE)
names = "id notid text enum ref refs ent ents nm nms".split()
for name in names:
a = elem.getAttributeNode(name)
t = a.schemaType
self.confirm(hasattr(t, "name")
and t.namespace == xml.dom.EMPTY_NAMESPACE)
def testSetIdAttribute(self):
doc = parseString("<doc a1='v' a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNode("a1")
a2 = e.getAttributeNode("a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttribute("a1")
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttribute("a2")
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttribute("a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(doc.getElementById("v") is None
and e.isSameNode(doc.getElementById("w"))
and not a1.isId
and a2.isId
and not a3.isId)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testSetIdAttributeNS(self):
NS1 = "http://xml.python.org/ns1"
NS2 = "http://xml.python.org/ns2"
doc = parseString("<doc"
" xmlns:ns1='" + NS1 + "'"
" xmlns:ns2='" + NS2 + "'"
" ns1:a1='v' ns2:a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNodeNS(NS1, "a1")
a2 = e.getAttributeNodeNS(NS2, "a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttributeNS(NS1, "a1")
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttributeNS(NS2, "a2")
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttributeNS(NS1, "a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(e.isSameNode(doc.getElementById("w")))
self.confirm(not a1.isId)
self.confirm(a2.isId)
self.confirm(not a3.isId)
self.confirm(doc.getElementById("v") is None)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testSetIdAttributeNode(self):
NS1 = "http://xml.python.org/ns1"
NS2 = "http://xml.python.org/ns2"
doc = parseString("<doc"
" xmlns:ns1='" + NS1 + "'"
" xmlns:ns2='" + NS2 + "'"
" ns1:a1='v' ns2:a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNodeNS(NS1, "a1")
a2 = e.getAttributeNodeNS(NS2, "a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttributeNode(a1)
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttributeNode(a2)
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttributeNS(NS1, "a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(e.isSameNode(doc.getElementById("w")))
self.confirm(not a1.isId)
self.confirm(a2.isId)
self.confirm(not a3.isId)
self.confirm(doc.getElementById("v") is None)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testPickledDocument(self):
doc = parseString("<?xml version='1.0' encoding='us-ascii'?>\n"
"<!DOCTYPE doc PUBLIC 'http://xml.python.org/public'"
" 'http://xml.python.org/system' [\n"
" <!ELEMENT e EMPTY>\n"
" <!ENTITY ent SYSTEM 'http://xml.python.org/entity'>\n"
"]><doc attr='value'> text\n"
"<?pi sample?> <!-- comment --> <e/> </doc>")
s = pickle.dumps(doc)
doc2 = pickle.loads(s)
stack = [(doc, doc2)]
while stack:
n1, n2 = stack.pop()
self.confirm(n1.nodeType == n2.nodeType
and len(n1.childNodes) == len(n2.childNodes)
and n1.nodeName == n2.nodeName
and not n1.isSameNode(n2)
and not n2.isSameNode(n1))
if n1.nodeType == Node.DOCUMENT_TYPE_NODE:
len(n1.entities)
len(n2.entities)
len(n1.notations)
len(n2.notations)
self.confirm(len(n1.entities) == len(n2.entities)
and len(n1.notations) == len(n2.notations))
for i in range(len(n1.notations)):
# XXX this loop body doesn't seem to be executed?
no1 = n1.notations.item(i)
no2 = n1.notations.item(i)
self.confirm(no1.name == no2.name
and no1.publicId == no2.publicId
and no1.systemId == no2.systemId)
stack.append((no1, no2))
for i in range(len(n1.entities)):
e1 = n1.entities.item(i)
e2 = n2.entities.item(i)
self.confirm(e1.notationName == e2.notationName
and e1.publicId == e2.publicId
and e1.systemId == e2.systemId)
stack.append((e1, e2))
if n1.nodeType != Node.DOCUMENT_NODE:
self.confirm(n1.ownerDocument.isSameNode(doc)
and n2.ownerDocument.isSameNode(doc2))
for i in range(len(n1.childNodes)):
stack.append((n1.childNodes[i], n2.childNodes[i]))
def testSerializeCommentNodeWithDoubleHyphen(self):
doc = create_doc_without_doctype()
doc.appendChild(doc.createComment("foo--bar"))
self.assertRaises(ValueError, doc.toxml)
def testEmptyXMLNSValue(self):
doc = parseString("<element xmlns=''>\n"
"<foo/>\n</element>")
doc2 = parseString(doc.toxml())
self.confirm(doc2.namespaceURI == xml.dom.EMPTY_NAMESPACE)
def testExceptionOnSpacesInXMLNSValue(self):
with self.assertRaisesRegex(ValueError, 'Unsupported syntax'):
parseString('<element xmlns:abc="http:abc.com/de f g/hi/j k"><abc:foo /></element>')
def testDocRemoveChild(self):
doc = parse(tstfile)
title_tag = doc.documentElement.getElementsByTagName("TITLE")[0]
self.assertRaises( xml.dom.NotFoundErr, doc.removeChild, title_tag)
num_children_before = len(doc.childNodes)
doc.removeChild(doc.childNodes[0])
num_children_after = len(doc.childNodes)
self.assertTrue(num_children_after == num_children_before - 1)
def testProcessingInstructionNameError(self):
# wrong variable in .nodeValue property will
# lead to "NameError: name 'data' is not defined"
doc = parse(tstfile)
pi = doc.createProcessingInstruction("y", "z")
pi.nodeValue = "crash"
def test_main():
run_unittest(MinidomTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 | 8,152,278,292,653,398,000 | 40.636246 | 96 | 0.568415 | false |
datagrok/python-misc | datagrok/math/stats.py | 1 | 1192 | """Utilities for statistics"""
def sorted(xs):
"""Return a sorted copy of the list xs"""
_xs = list(xs)
_xs.sort()
return _xs
def stemleaf(ns):
"""Given a list of integers ns, print a stem-and-leaf display."""
return _stemleaf(sorted(ns))
def dsd(ns):
"""Given a list of integers ns, print a double-stem display."""
return _dsd(sorted(ns))
def fsd(ns):
"""Given a list of integers ns, print a five-stem display."""
return _fsd(sorted(ns))
def _stemleaf(ns):
"""Given a sorted list of integers ns, print a stem-and-leaf display."""
for q in range(10*(min(ns)/10), 10*(max(ns)/10+1), 10):
print "%d|%s" % (q/10, ''.join([str(x % 10) for x in ns if x<q+10 and x>=q]))
def _dsd(ns):
"""Given a sorted list of integers ns, print a double-stem display."""
for q in range(10*(min(ns)/10), 10*(max(ns)/10+1), 5):
print "%d|%s" % (q/10, ''.join([str(x % 10) for x in ns if x<q+5 and x>=q]))
def _fsd(ns):
"""Given a sorted list of integers ns, print a five-stem display."""
for q in range(10*(min(ns)/10), 10*(max(ns)/10+1), 2):
print "%d|%s" % (q/10, ''.join([str(x % 10) for x in ns if x<q+2 and x>=q]))
| agpl-3.0 | -1,492,373,900,817,290,500 | 34.058824 | 85 | 0.580537 | false |
funson/rt-xen | tools/python/xen/remus/qdisc.py | 22 | 4860 | import socket, struct
import netlink
qdisc_kinds = {}
TC_H_ROOT = 0xFFFFFFFF
class QdiscException(Exception): pass
class request(object):
"qdisc request message"
def __init__(self, cmd, flags=0, dev=None, handle=0):
self.n = netlink.nlmsg()
self.t = netlink.tcmsg()
self.n.nlmsg_flags = netlink.NLM_F_REQUEST|flags
self.n.nlmsg_type = cmd
self.t.tcm_family = socket.AF_UNSPEC
if not handle:
handle = TC_H_ROOT
self.t.tcm_parent = handle
if dev:
self.t.tcm_ifindex = dev
def pack(self):
t = self.t.pack()
self.n.body = t
return self.n.pack()
class addrequest(request):
def __init__(self, dev, handle, qdisc):
flags = netlink.NLM_F_EXCL|netlink.NLM_F_CREATE
super(addrequest, self).__init__(netlink.RTM_NEWQDISC, flags=flags,
dev=dev, handle=handle)
self.n.addattr(netlink.TCA_KIND, qdisc.kind + '\0')
opts = qdisc.pack()
if opts:
self.n.addattr(netlink.TCA_OPTIONS, opts)
class delrequest(request):
def __init__(self, dev, handle):
super(delrequest, self).__init__(netlink.RTM_DELQDISC, dev=dev,
handle=handle)
class changerequest(request):
def __init__(self, dev, handle, qdisc):
super(changerequest, self).__init__(netlink.RTM_NEWQDISC,
dev=dev, handle=handle)
self.n.addattr(netlink.TCA_KIND, qdisc.kind + '\0')
opts = qdisc.pack()
if opts:
self.n.addattr(netlink.TCA_OPTIONS, opts)
class Qdisc(object):
def __new__(cls, qdict=None, *args, **opts):
if qdict:
kind = qdict.get('kind')
cls = qdisc_kinds.get(kind, cls)
obj = super(Qdisc, cls).__new__(cls)
return obj
def __init__(self, qdict):
self._qdict = qdict
self.kind = qdict['kind']
self.handle = qdict['handle'] >> 16
def parse(self, opts):
if opts:
raise QdiscException('cannot parse qdisc parameters')
def optstr(self):
if self.qdict['options']:
return '[cannot parse qdisc parameters]'
else:
return ''
def pack(self):
return ''
TC_PRIO_MAX = 15
class PrioQdisc(Qdisc):
fmt = 'i%sB' % (TC_PRIO_MAX + 1)
def __init__(self, qdict):
super(PrioQdisc, self).__init__(qdict)
if qdict.get('options'):
self.unpack(qdict['options'])
else:
self.bands = 3
self.priomap = [1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
def pack(self):
#return struct.pack(self.fmt, self.bands, *self.priomap)
return ''
def unpack(self, opts):
args = struct.unpack(self.fmt, opts)
self.bands = args[0]
self.priomap = args[1:]
def optstr(self):
mapstr = ' '.join([str(p) for p in self.priomap])
return 'bands %d priomap %s' % (self.bands, mapstr)
qdisc_kinds['prio'] = PrioQdisc
qdisc_kinds['pfifo_fast'] = PrioQdisc
class CfifoQdisc(Qdisc):
fmt = 'II'
def __init__(self, qdict):
super(CfifoQdisc, self).__init__(qdict)
if qdict.get('options'):
self.unpack(qdict['options'])
else:
self.epoch = 0
self.vmid = 0
def pack(self):
return struct.pack(self.fmt, self.epoch, self.vmid)
def unpack(self, opts):
self.epoch, self.vmid = struct.unpack(self.fmt, opts)
def parse(self, opts):
args = list(opts)
try:
while args:
arg = args.pop(0)
if arg == 'epoch':
self.epoch = int(args.pop(0))
continue
if arg.lower() == 'vmid':
self.vmid = int(args.pop(0))
continue
except Exception, inst:
raise QdiscException(str(inst))
def optstr(self):
return 'epoch %d vmID %d' % (self.epoch, self.vmid)
qdisc_kinds['cfifo'] = CfifoQdisc
TC_PLUG_CHECKPOINT = 0
TC_PLUG_RELEASE = 1
class PlugQdisc(Qdisc):
fmt = 'I'
def __init__(self, qdict=None):
if not qdict:
qdict = {'kind': 'plug',
'handle': TC_H_ROOT}
super(PlugQdisc, self).__init__(qdict)
self.action = 0
def pack(self):
return struct.pack(self.fmt, self.action)
def parse(self, args):
if not args:
raise QdiscException('no action given')
arg = args[0]
if arg == 'checkpoint':
self.action = TC_PLUG_CHECKPOINT
elif arg == 'release':
self.action = TC_PLUG_RELEASE
else:
raise QdiscException('unknown action')
qdisc_kinds['plug'] = PlugQdisc
| gpl-2.0 | -3,870,929,609,614,113,300 | 26.303371 | 75 | 0.533333 | false |
skillness/OpenNI | Externals/PSCommon/Windows/CreateRedist/CopyToRepository.py | 7 | 3337 | import os
import sys
import re
import time
import traceback
packageFullPath = "..\..\..\..\..\PrimeSenseVersions.nsh"
def find_package_number(findStr, text):
for line in text:
temp = re.search(findStr, line)
if temp != None:
packageNumber = temp.group(1)
return packageNumber
def copy_files_to_repository(SourcePath,RepositoryPath, BuildDate, PackageVersion, Bits, ProjectName,
Major_version, Minor_version, Maintenance_version, Build_version):
fullVersion = Major_version + "." + Minor_version + "." + Maintenance_version + "." + Build_version
destPath = os.path.join(RepositoryPath, BuildDate + "__" + PackageVersion, "Win" + Bits,
ProjectName + "-" + fullVersion)
os.system("rmdir /S /q " + destPath)
os.system("mkdir " + destPath)
os.system("xcopy /E /I " + SourcePath + " " + destPath)
def copy_zip_to_repository(SourcePath,RepositoryPath, BuildDate, PackageVersion, Bits, ProjectName,
Major_version, Minor_version, Maintenance_version, Build_version):
fullVersion = Major_version + "." + Minor_version + "." + Maintenance_version + "." + Build_version
destPath = os.path.join(RepositoryPath, BuildDate + "__" + PackageVersion, "Win" + Bits,
ProjectName + "-" + fullVersion)
os.system("rmdir /S /q " + destPath)
os.system("mkdir " + destPath)
os.system("xcopy /I " + SourcePath + " " + destPath)
def open_package_file(path):
files = open(path).readlines()
packageNumber = find_package_number("!define PACKAGE_VER\s+\"(\S+)\"", files)
return packageNumber
if __name__ == "__main__":
try:
if len(sys.argv) != 10:
print (("Usage: copyToRepository.py <FinalPath> <RepositoryPath> <BuildDate> <bits> " \
+ "<ProjectName> <Major_version> <Minor_version> <Maintenance_version> <Build_version>"))
sys.exit(1)
finalPath = sys.argv[1]
repositoryPath = sys.argv[2]
buildDate = sys.argv[3]
bits = sys.argv[4]
projectName = sys.argv[5]
major_version = sys.argv[6]
minor_version = sys.argv[7]
maintenance_version = sys.argv[8]
build_version = sys.argv[9]
packageNumber = ''
if not(os.path.exists(packageFullPath)):
# Redist of OpenNI openSource
packageFullPath = "..\..\..\..\..\..\PrimeSenseVersions.nsh"
packageNumber = open_package_file(packageFullPath)
if packageNumber == '':
sys.exit(1)
copy_zip_to_repository("..\..\..\..\*.zip",repositoryPath, buildDate, packageNumber, bits,
projectName, major_version, minor_version, maintenance_version, build_version)
else:
# Redist of OpenNI
packageNumber = open_package_file(packageFullPath)
if packageNumber == '':
sys.exit(1)
copy_files_to_repository(finalPath,repositoryPath, buildDate, packageNumber, bits,
projectName, major_version, minor_version, maintenance_version, build_version)
sys.exit(0)
except SystemExit as e:
sys.exit(e)
except:
print ((traceback.print_exc()))
sys.exit(1)
| apache-2.0 | -4,085,302,422,481,745,400 | 41.782051 | 111 | 0.591849 | false |
letolab/airy | airy/utils/translation/trans_null.py | 1 | 2647 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
import warnings
from airy.core.conf import settings
from airy.utils.encoding import force_unicode
from airy.utils.safestring import mark_safe, SafeData
def ngettext(singular, plural, number):
if number == 1: return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_unicode(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
# date formats shouldn't be used using gettext anymore. This
# is kept for backward compatibility
TECHNICAL_ID_MAP = {
"DATE_WITH_TIME_FULL": settings.DATETIME_FORMAT,
"DATE_FORMAT": settings.DATE_FORMAT,
"DATETIME_FORMAT": settings.DATETIME_FORMAT,
"TIME_FORMAT": settings.TIME_FORMAT,
"YEAR_MONTH_FORMAT": settings.YEAR_MONTH_FORMAT,
"MONTH_DAY_FORMAT": settings.MONTH_DAY_FORMAT,
}
def gettext(message):
result = TECHNICAL_ID_MAP.get(message, message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def ugettext(message):
return force_unicode(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def get_language_from_request(request):
return settings.LANGUAGE_CODE
# get_date_formats and get_partial_date_formats aren't used anymore by Django
# but are kept for backward compatibility.
def get_date_formats():
warnings.warn(
'`django.utils.translation.get_date_formats` is deprecated. '
'Please update your code to use the new i18n aware formatting.',
DeprecationWarning
)
return settings.DATE_FORMAT, settings.DATETIME_FORMAT, settings.TIME_FORMAT
def get_partial_date_formats():
warnings.warn(
'`django.utils.translation.get_partial_date_formats` is deprecated. '
'Please update your code to use the new i18n aware formatting.',
DeprecationWarning
)
return settings.YEAR_MONTH_FORMAT, settings.MONTH_DAY_FORMAT
| bsd-2-clause | -2,453,132,263,570,895,400 | 32.935897 | 79 | 0.727238 | false |
shakamunyi/tensorflow | tensorflow/contrib/keras/python/keras/layers/convolutional_recurrent.py | 2 | 24940 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolutional-recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python.keras import activations
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras.engine import InputSpec
from tensorflow.contrib.keras.python.keras.layers.recurrent import Recurrent
from tensorflow.contrib.keras.python.keras.utils import conv_utils
from tensorflow.python.framework import tensor_shape
class ConvRecurrent2D(Recurrent):
"""Abstract base class for convolutional recurrent layers.
Do not use in a model -- it's not a functional layer!
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, rocess the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
Input shape:
5D tensor with shape `(num_samples, timesteps, channels, rows, cols)`.
Output shape:
- if `return_sequences`: 5D tensor with shape
`(num_samples, timesteps, channels, rows, cols)`.
- else, 4D tensor with shape `(num_samples, channels, rows, cols)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an `Embedding` layer with the `mask_zero` parameter
set to `True`.
**Note:** for the time being, masking is only supported with Theano.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch.
This assumes a one-to-one mapping between
samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
a `batch_input_size=(...)` to the first layer in your model.
This is the expected shape of your inputs *including the batch
size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
return_sequences=False,
go_backwards=False,
stateful=False,
**kwargs):
super(ConvRecurrent2D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
'dilation_rate')
self.return_sequences = return_sequences
self.go_backwards = go_backwards
self.stateful = stateful
self.input_spec = [InputSpec(ndim=5)]
self.state_spec = None
def _compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[3]
cols = input_shape[4]
elif self.data_format == 'channels_last':
rows = input_shape[2]
cols = input_shape[3]
rows = conv_utils.conv_output_length(
rows,
self.kernel_size[0],
padding=self.padding,
stride=self.strides[0],
dilation=self.dilation_rate[0])
cols = conv_utils.conv_output_length(
cols,
self.kernel_size[1],
padding=self.padding,
stride=self.strides[1],
dilation=self.dilation_rate[1])
if self.return_sequences:
if self.data_format == 'channels_first':
output_shape = [input_shape[0], input_shape[1],
self.filters, rows, cols]
elif self.data_format == 'channels_last':
output_shape = [input_shape[0], input_shape[1],
rows, cols, self.filters]
else:
if self.data_format == 'channels_first':
output_shape = [input_shape[0], self.filters, rows, cols]
elif self.data_format == 'channels_last':
output_shape = [input_shape[0], rows, cols, self.filters]
if self.return_state:
if self.data_format == 'channels_first':
output_shapes = [output_shape] + [(input_shape[0],
self.filters,
rows,
cols) for _ in range(2)]
elif self.data_format == 'channels_last':
output_shapes = [output_shape] + [(input_shape[0],
rows,
cols,
self.filters) for _ in range(2)]
return [tensor_shape.TensorShape(shape) for shape in output_shapes]
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'return_sequences': self.return_sequences,
'go_backwards': self.go_backwards,
'stateful': self.stateful
}
base_config = super(ConvRecurrent2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConvLSTM2D(ConvRecurrent2D):
"""Convolutional LSTM.
It is similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, rocess the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Input shape:
- if data_format='channels_first'
5D tensor with shape:
`(samples,time, channels, rows, cols)`
- if data_format='channels_last'
5D tensor with shape:
`(samples,time, rows, cols, channels)`
Output shape:
- if `return_sequences`
- if data_format='channels_first'
5D tensor with shape:
`(samples, time, filters, output_row, output_col)`
- if data_format='channels_last'
5D tensor with shape:
`(samples, time, output_row, output_col, filters)`
- else
- if data_format ='channels_first'
4D tensor with shape:
`(samples, filters, output_row, output_col)`
- if data_format='channels_last'
4D tensor with shape:
`(samples, output_row, output_col, filters)`
where o_row and o_col depend on the shape of the filter and
the padding
Raises:
ValueError: in case of invalid constructor arguments.
References:
- [Convolutional LSTM Network: A Machine Learning Approach for
Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)
The current implementation does not include the feedback loop on the
cells output
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
go_backwards=False,
stateful=False,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(ConvLSTM2D, self).__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
return_sequences=return_sequences,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = [InputSpec(ndim=4), InputSpec(ndim=4)]
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tuple(tensor_shape.TensorShape(input_shape).as_list())
batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:])
if self.stateful:
self.reset_states()
else:
# initial states: 2 all-zero tensor of shape (filters)
self.states = [None, None]
if self.data_format == 'channels_first':
channel_axis = 2
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
state_shape = [None] * 4
state_shape[channel_axis] = input_dim
state_shape = tuple(state_shape)
self.state_spec = [
InputSpec(shape=state_shape),
InputSpec(shape=state_shape)
]
kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
self.kernel_shape = kernel_shape
recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4)
self.kernel = self.add_weight(
shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=recurrent_kernel_shape,
initializer=self.recurrent_initializer,
name='recurrent_kernel',
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.filters * 4,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
if self.unit_forget_bias:
bias_value = np.zeros((self.filters * 4,))
bias_value[self.filters:self.filters * 2] = 1.
K.set_value(self.bias, bias_value)
else:
self.bias = None
self.kernel_i = self.kernel[:, :, :, :self.filters]
self.recurrent_kernel_i = self.recurrent_kernel[:, :, :, :self.filters]
self.kernel_f = self.kernel[:, :, :, self.filters:self.filters * 2]
self.recurrent_kernel_f = self.recurrent_kernel[:, :, :, self.filters:
self.filters * 2]
self.kernel_c = self.kernel[:, :, :, self.filters * 2:self.filters * 3]
self.recurrent_kernel_c = self.recurrent_kernel[:, :, :, self.filters * 2:
self.filters * 3]
self.kernel_o = self.kernel[:, :, :, self.filters * 3:]
self.recurrent_kernel_o = self.recurrent_kernel[:, :, :, self.filters * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.filters]
self.bias_f = self.bias[self.filters:self.filters * 2]
self.bias_c = self.bias[self.filters * 2:self.filters * 3]
self.bias_o = self.bias[self.filters * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def get_initial_state(self, inputs):
# (samples, timesteps, rows, cols, filters)
initial_state = K.zeros_like(inputs)
# (samples, rows, cols, filters)
initial_state = K.sum(initial_state, axis=1)
shape = list(self.kernel_shape)
shape[-1] = self.filters
initial_state = self.input_conv(
initial_state, K.zeros(tuple(shape)), padding=self.padding)
initial_states = [initial_state for _ in range(2)]
return initial_states
def reset_states(self):
if not self.stateful:
raise RuntimeError('Layer must be stateful.')
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise ValueError('If a RNN is stateful, a complete '
'input_shape must be provided '
'(including batch size). '
'Got input shape: ' + str(input_shape))
if self.return_state:
output_shape = tuple(self._compute_output_shape(input_shape)[0].as_list())
else:
output_shape = tuple(self._compute_output_shape(input_shape).as_list())
if self.return_sequences:
output_shape = (input_shape[0],) + output_shape[2:]
else:
output_shape = (input_shape[0],) + output_shape[1:]
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros(output_shape))
K.set_value(self.states[1],
np.zeros(output_shape))
else:
self.states = [
K.zeros(output_shape),
K.zeros(output_shape)
]
def get_constants(self, inputs, training=None):
constants = []
if self.implementation == 0 and 0 < self.dropout < 1:
ones = K.zeros_like(inputs)
ones = K.sum(ones, axis=1)
ones += 1
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(4)
]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.recurrent_dropout < 1:
shape = list(self.kernel_shape)
shape[-1] = self.filters
ones = K.zeros_like(inputs)
ones = K.sum(ones, axis=1)
ones = self.input_conv(ones, K.zeros(shape), padding=self.padding)
ones += 1.
def dropped_inputs(): # pylint: disable=function-redefined
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(4)
]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def input_conv(self, x, w, b=None, padding='valid'):
conv_out = K.conv2d(
x,
w,
strides=self.strides,
padding=padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if b is not None:
conv_out = K.bias_add(conv_out, b, data_format=self.data_format)
return conv_out
def reccurent_conv(self, x, w):
conv_out = K.conv2d(
x, w, strides=(1, 1), padding='same', data_format=self.data_format)
return conv_out
def step(self, inputs, states):
assert len(states) == 4
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = states[2]
rec_dp_mask = states[3]
x_i = self.input_conv(
inputs * dp_mask[0], self.kernel_i, self.bias_i, padding=self.padding)
x_f = self.input_conv(
inputs * dp_mask[1], self.kernel_f, self.bias_f, padding=self.padding)
x_c = self.input_conv(
inputs * dp_mask[2], self.kernel_c, self.bias_c, padding=self.padding)
x_o = self.input_conv(
inputs * dp_mask[3], self.kernel_o, self.bias_o, padding=self.padding)
h_i = self.reccurent_conv(h_tm1 * rec_dp_mask[0], self.recurrent_kernel_i)
h_f = self.reccurent_conv(h_tm1 * rec_dp_mask[1], self.recurrent_kernel_f)
h_c = self.reccurent_conv(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c)
h_o = self.reccurent_conv(h_tm1 * rec_dp_mask[3], self.recurrent_kernel_o)
i = self.recurrent_activation(x_i + h_i)
f = self.recurrent_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.recurrent_activation(x_o + h_o)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(ConvLSTM2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| apache-2.0 | 2,763,449,772,239,218,000 | 40.087315 | 80 | 0.623897 | false |
wrongtest/nnlight | src/computation_on_java_impl/layers/pooling.py | 1 | 1033 | from layer.basic.pooling import MaxPoolingWithTimeLayer as MaxPoolingWithTimeLayerBase
class MaxPoolingWithTimeLayer(MaxPoolingWithTimeLayerBase):
def get_computation_on_java_code(self, code, binder):
datatype = binder.get_base_type(self.input)
input_var = binder.get_name(self.input)
output_var = binder.get_name(self.output)
code.field("int", "samples", val=input_var + ".length")
code.field("int", "length", val=input_var + "[0].length")
code.field("int", "features", val=input_var + "[0][0].length")
code.begin_for("int i=0; i<samples; i++")
code.begin_for("int j=0; j<features; j++")
code.field(datatype, "maximum", val=input_var + "[i][0][j]")
code.begin_for("int k=1; k<length; k++")
code.begin_if("maximum < %s[i][k][j]" % input_var)
code.assignment("maximum", "%s[i][k][j]" % input_var)
code.end()
code.end()
code.assignment(output_var + "[i][j]", "maximum")
code.end()
code.end()
| gpl-2.0 | 729,158,902,350,442,100 | 43.913043 | 86 | 0.601162 | false |
bitpay/bitcoin | qa/rpc-tests/getchaintips.py | 66 | 2133 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def run_test (self):
BitcoinTestFramework.run_test (self)
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| mit | 7,234,932,446,776,980,000 | 35.152542 | 70 | 0.627286 | false |
denys-duchier/Scolar | config/softs/jaxml-3.01/jaxml.py | 2 | 49250 | # Module for XML, HTML and CGI output
# jaxml
# (C) Jerome Alet <[email protected]> 2000-2002
# You're welcome to redistribute this software under the
# terms of the GNU General Public Licence version 2.0
# or, at your option, any higher version.
#
# You can read the complete GNU GPL in the file COPYING
# which should come along with this software, or visit
# the Free Software Foundation's WEB site http://www.fsf.org
#
# $Id: jaxml.py,v 1.43 2003/06/26 06:59:32 jerome Exp $
#
# $Log: jaxml.py,v $
# Revision 1.43 2003/06/26 06:59:32 jerome
# Small fix.
#
# Revision 1.42 2003/02/13 14:36:09 jerome
# Version 3.0
#
# Revision 1.41 2003/02/13 10:33:58 jerome
# Version number changed to 3.0beta
# Named _push() and _pop() possibility (untested)
# Complete namespaces support thanks to Jean Jordaan
#
# Revision 1.40 2002/04/25 09:08:34 jerome
# New copyright strings
#
# Revision 1.39 2002/03/02 09:19:36 jerome
# typo in _do_nothing() in CGI scripts
#
# Revision 1.38 2001/04/23 12:17:08 jerome
# Nothing is output when there's no content to output.
#
# Revision 1.37 2001/02/23 15:02:49 jerome
# Correction of a minor bug which prevented headers to be kept correct
# when adding or multiplying documents
#
# Revision 1.36 2001/02/22 08:27:07 jerome
# The copy module is not needed anymore.
#
# Revision 1.35 2001/02/21 16:26:15 jerome
# Version number changed to 2.21
# The _updatemapping() method now returns the new mapping's content.
#
# Revision 1.34 2001/02/21 11:54:56 jerome
# Typo
#
# Revision 1.33 2001/02/21 11:40:47 jerome
# - version number changed to 2.20
# - basic arithmetic operations can now be made on XML_document
# instances, these constructs are now accepted:
#
# firstdoc + seconddoc
# seconddoc + firstdoc
#
# Where firstdoc is an instance of XML_document
# or one of its subclasses, and seconddoc is
# either an instance of XML_document or one of
# its subclasses or a string of text.
#
# yourdoc * intvalue
# intvalue * yourdoc
#
# Will repeat your document just like the * operator
# works with strings of text.
#
# - an infinite loop problem occured when doing a dir(yourdoc),
# it is now corrected, but as a consequences every method
# name beginning with "__" can't be used as a tag name.
# This shouldn't cause any problem, because tag names
# beginning with "__" are probably a very bad idea, if allowed
# at all.
# - an _updatemapping method was added to allow you to initialise
# or update the internal mapping used for the new templating
# facility.
#
# Revision 1.32 2001/02/19 13:42:10 jerome
# Suppressed a remaining debugging test
#
# Revision 1.31 2001/02/19 13:38:38 jerome
# Version changed to 2.10
# Added a new templating method, using documents as pseudo mappings:
# mydoc["some text"] = "another text"
# will replace all occurences of "some text" with "another text" on
# rendition (only), i.e. when either str() or repr() are called.
# Truth value can now be tested: empty documents return false.
#
# Revision 1.30 2001/02/14 10:49:20 jerome
# Typo
#
# Revision 1.29 2001/02/14 10:48:44 jerome
# Version number changed to 2.10
# Docstrings added to the _TAGGED_document.Tag class
# __repr__ is defined once for all
#
# Revision 1.28 2001/02/06 09:50:30 jerome
# Added documentation for the _template() method
# Added some doc for the HTML_document() and CGI_document() classes
#
# Revision 1.27 2001/02/05 16:03:59 jerome
# The CGI_document() constructor now accepts version and encoding arguments
#
# Revision 1.26 2001/02/05 14:49:55 jerome
# Exit code when using the old Html_document class was set to -1 (unsuccessful) instead of 0 (successful)
#
# Revision 1.25 2001/02/05 14:43:10 jerome
# Version number changed to 2.00beta1
#
# Revision 1.24 2001/02/05 14:31:07 jerome
# Version number changed to 2.00
# jaxml now includes what was in the old jahtml module, and features two new
# classes: HTML_document() and CGI_document().
# jaxml's API hasn't changed.
# jahtml's old API was changed to better match jaxml's one.
#
# ========================================================================
# = You don't need the old jahtml module anymore, but before removing it =
# = you must modify your programs to take care of the new API. =
# ========================================================================
#
# Revision 1.23 2001/01/26 12:43:16 jerome
# Rollback on "speed optimisations"
#
# Revision 1.22 2001/01/26 11:01:44 jerome
# The reduce line is commented out because it is much more slower then string.join + map
#
# Revision 1.21 2001/01/26 10:44:07 jerome
# Another speed optimisation
#
# Revision 1.20 2001/01/26 10:08:29 jerome
# Large scale speed optimisations
#
# Revision 1.19 2001/01/25 15:09:34 jerome
# Another optimisation
#
# Revision 1.18 2001/01/25 15:01:57 jerome
# Small speed optimisation in the _pop() method
#
# Revision 1.17 2001/01/25 13:28:48 jerome
# Version number changed to 1.26
# The notation for naming spaces was introduced:
#
# doc.space.tag(...)
#
# will produce:
#
# <space:tag>
# ...
# </space:tag>
#
# Revision 1.16 2001/01/25 12:22:03 jerome
# A new useful notation was introduced, you can now
# do something like:
#
# doc.onetag("...", attr="yes").othertag().adnauseam("easy tag nesting")
#
# Revision 1.15 2001/01/25 11:25:50 jerome
# Version number changed to 1.24
# Tags which enclose nothing are now handled correctly
# Calls to yourtag("Some text", dummy="DUMMY") will
# now produce:
#
# <yourtag dummy="DUMMY">Some text</yourtag>
#
# instead of :
#
# <yourtag dummy="DUMMY">
# Some text
# </yourtag>
#
# Some changes to the test program to reflect the new behaviour
#
# Revision 1.14 2001/01/23 10:30:24 jerome
# The _output() method now accepts None as its file argument
# Minor changes to the documentation
# Copyright year changed to 2000-2001
#
# Revision 1.13 2000/10/04 11:50:30 jerome
# The license is correctly set to "GNU GPL" in setup.py
# Version number change to 1.23
#
# Revision 1.12 2000/09/29 13:49:36 jerome
# The documentation referenced a non existing file.
#
# Revision 1.11 2000/09/29 13:25:37 jerome
# Small but correction with empty text, use None instead
#
# Revision 1.10 2000/09/29 11:14:18 jerome
# The traceback module is not needed anymore
#
# Revision 1.9 2000/09/29 11:02:26 jerome
# With the help of Kragen Sitaker idea posted on comp.lang.python,
# the speed increase factor is now almost 2.5 compared to the 1.1 version.
# Test made on the test.py program launched 5000 times.
#
# Revision 1.8 2000/09/29 08:55:04 jerome
# Near 13% speed optimisation on the test program launched 5000 times.
#
# Revision 1.7 2000/09/29 08:43:30 jerome
# Optimisations
#
# Revision 1.6 2000/09/29 07:42:52 jerome
# Version number changed to 1.2
#
# Revision 1.5 2000/09/28 10:06:09 jerome
# The frenglish word "imbricated" was replaced by the correct english one "nested",
# thanks to Kragen Sitaker.
# Version number changed to 1.1 because seems stable and want more testers: the
# Freshmeat Version Number Effect ;-)
#
# Revision 1.4 2000/09/15 08:30:41 jerome
# Version string and Documentation string added.
#
# Revision 1.3 2000/09/15 08:27:10 jerome
# Clarification on the licensing issue.
# General documentation changes.
# No code changes but version set to 0.3
#
# Revision 1.2 2000/09/14 07:15:29 jerome
# All tag attributes values are now quoted correctly.
# Using attributes with no value at all is not allowed anymore.
# Now xmllib doesn't complain anymore on sampleXML.py output.
#
#
import sys
import os
import string
import cStringIO
import time
__version__ = "3.01"
__doc__ = """
This python module defines a class named XML_document which will
allow you to generate XML documents (yeah !) more easily than
using print or similar functions.
Here's a list of available methods:
===================================
__init__(version, encoding)
The instance constructor, automatically called
when you create a new instance of XML_document.
you can optionnally pass a version and encoding
string, the defaults are "1.0" and "iso-8859-1".
_indentstring(istr)
istr is the new indentation string used
to nicely present your XML documents. By
default istr is equal to 4 space characters.
_output(filearg)
use it to save the XML document to a file.
The optionnal filearg argument may be:
None, "", or "-" which stands for sys.stdout.
a file name.
any file object.
_text(sometext)
use it to insert plain text at the current position
in the document.
_push()
saves the current position in the XML document.
use it if you're going to create a bunch of nested
XML tags and want to escape from them later to continue
your document at the same indentation level.
you can pass an optional 'name' argument, to mark
a position by its name.
_pop()
restores the latest saved position.
use it to escape from nested tags and continue
your XML document at the same indentation level than
the latest time you called _push().
you can pass an optional 'name' argument, to continue
at the same indentation level as when you called _push()
with the same 'name' argument.
_template(file, **vars)
loads a template file and insert it as plain text at the current
position in the document, replacing ##varname## variables
in the template file with their corresponding value passed
in vars[varname]
_updatemapping(newmap)
updates the internal mapping used for replacing some strings with
others when rendering. This can be used as an easy way to
do templating without the need of an external file.
Pass None or no argument to reset the mapping to an empty one.
This method returns the new mapping's content.
Some more methods are available but not meant to be used directly, they
are: __nonzero__, __getitem__, __setitem__, __delitem__, __coerce__, __add__,
__radd__, __mul__, __rmul__, and __copy__. They are used automatically when doing
special things, read the source for details.
ANY and ALL other method you may call will be treated as an XML
tag, unless it already exists as a method in XML_document or a subclass of it,
or its name begins with "__". I suggest you to only add methods whose names
begin with '_' to keep things simple and clear: "__" is reserved for future
use.
The file test/test.py is an example program which generates
some documents, just play with it (use and modify) and you'll
learn quickly how to use jaxml. Its source code is documented and
attempts at describing and trying all jaxml's possibilities, so reading
it is probably the best way to become powerful with jaxml in less than
10 minutes.
Really, PLEASE READ the file test/test.py to learn all possibilities.
=========================================================================
Since version 2.00, jaxml integrates the full functionnalities of the
old jahtml module via the HTML_document and CGI_document classes, however
the API for these two classes has changed to be cleaner and don't use any
predefined set of tags.
The HTML_document() and CGI_document() classes both inherit from XML_document()
and all its methods (see above), but also feature some useful helper methods.
Please read the jaxml module sources and the test/test.py program to learn how
to use them.
=========================================================================
The only difficult things are:
------------------------------
* you have to use the _push() and _pop() methods if you need
to get out of a bunch of nested tags.
* if you call a method (tag) with a string as the first
unnamed parameter, you'll don't need _push() or _pop()
because your tag will be automatically closed immediately.
* if you call a method (tag) with a python mapping as the
first or second unamed parameter, this mapping is used
to correctly handle XML namespaces or attributes
which are python reserved words (e.g. class), please
look at test/test.py to see an example.
"""
class _TAGGED_document :
"""This class defines a tagged document"""
class Tag :
"""This class defines a tag
This is largely inspired from a post in comp.lang.python
by Kragen Sitaker at the end of September 2000. Many
thanks to him !!!
"""
def __init__(self, parent, tagname) :
"""Save a link to the parent and the name of the tag for future reference
parent
The parent object, probably a _TAGGED_document instance.
tagname
The name of this tag
"""
self.__parent = parent
self.__tagname = tagname
def __call__(self, _text_ = None, *nsattributes, **attributes) :
"""Inserts the tag and its attributes in the document
_text_
eventually a string to be enclosed in the tag. the
name _text_ was chosen to not conflict with a probable user's attribute
called 'text'
"""
#
# NameSpace idea from Jean Jordaan
if type(_text_) == type({}) :
nsattributes = (_text_, )
_text_ = None
nsargs = ""
lg = len(nsattributes)
if (lg > 1) :
raise ValueError, "jaxml: Invalid attributes %s" % str(nsattributes[0])
elif lg :
nsattr = nsattributes[0]
try :
for ns in nsattr.keys() :
tags = nsattr[ns]
try :
for tag in tags.keys() :
nsargs = nsargs + ' %s%s%s="%s"' % (ns, (ns and ':'), tag, str(tags[tag]))
except AttributeError :
nsargs = nsargs + ' %s="%s"' % (ns, str(tags))
except AttributeError :
raise ValueError, "jaxml: Invalid attributes %s" % str(nsattr)
# first, we compute the attributes string
# we vonluntarily do the test because of the speed optimisation
# it gives when there's no attribute
if attributes :
# interestingly the "reduce" line is much more slower than the "string.join + map" one
# arg = reduce(lambda s,x,a=attributes: '%s %s="%s"' % (s, x, str(a[x])), attributes.keys(), "")
arg = string.join(map(lambda x,a=attributes: ' %s="%s"' % (x, str(a[x])), attributes.keys()), "")
else :
arg = ""
# if a "first" argument was passed, enclose it in the tag
# and just get out of this tag
if _text_ is not None :
self.__parent._text("<%s%s>%s</%s>" % (self.__tagname, arg + nsargs, str(_text_), self.__tagname))
else :
# future tags will be inserted inside this one
self.__parent._tag__(self.__tagname, arg + nsargs)
return self.__parent
def __getattr__(self, name) :
"""Handles naming spaces (Space:Tag)
name
The name of the (sub)tag part
The current tag's name becomes the naming space's name.
name becomes the new tag's name.
"""
return self.__parent.Tag(self.__parent, "%s:%s" % (self.__tagname, name))
def __init__(self) :
"""Initialize local datas"""
# the document itself
self.__page = []
self.__pushed = []
self.__pusheddict = {}
self.__position = 0
# Initialise a mapping to implement another templating
# facility for postprocessing
self._updatemapping()
# sets the default indentation string
self._indentstring()
def __copy__(self) :
"""Creates a copy of the current document"""
# create an instance of the same class
new = self.__class__()
# copy the "private" members
new.__page = self.__page[:]
new.__pushed = self.__pushed[:]
new.__pusheddict = self.__pusheddict.copy()
new.__position = self.__position
new.__indentstring = self.__indentstring
new.__mapping = self.__mapping.copy()
# copy the "public" ones which are not callable (shouldn't occur anyway)
for (key, value) in self.__dict__.items() :
if (key[:2] == "__") and (key[-2:] == "__") and not callable(getattr(self, key)) :
setattr(new, key, value)
return new
def __mul__(self, number) :
"""Allows a document to be repeated
number
The number of times to repeat the document
allows constructs like: mydoc * 3
"""
if type(number) != type(1) :
raise TypeError, "jaxml.py: __mul__ operation not permitted on these operands."
if number < 0 :
raise ValueError, "jaxml.py: can't repeat a document a negative number of times."
if number == 0 :
# returns an empty document
return self.__class__()
else :
# a multiplication is just a big addition...
new = self.__copy__()
for i in range(number - 1) :
new = new + self
return new
def __rmul__(self, number) :
"""Allows a document to be repeated
number
The number of times to repeat the document
allows construts like: 3 * mydoc
"""
return self * number
def __add__(self, other) :
"""Allows two documents to be concatenated
other
The document or string of text to concatenate to self
This is not a real concatenation: the second
document (other) is in fact inserted at the current
position in the first one (self).
Also allows constructs like: mydoc + "some text"
"""
if (not isinstance(other, _TAGGED_document)) and (type(other) != type("")) :
raise TypeError, "jaxml.py: __add__ operation not permitted on these operands."
# first we make a copy of the original
new = self.__copy__()
# we must also "concatenate" our two template mappings
new.__mapping.update(other.__mapping)
# then we insert other as a single string of text
# skipping the last new line character.
# we use the parent class __str__ method to skip
# all the leading garbage like XML or HTTP headers.
# we should insert it as tags + text instead of plain text...
new._text(_TAGGED_document.__str__(other)[:-1])
return new
def __radd__(self, other) :
"""Allows two documents to be concatenated
other
The document or string of text to which self will be concatenated
This is not a real concatenation: the first
document (self) is in fact inserted at the current
position in the second one (other).
Also allows constructs like: "some text" + mydoc
"""
return other + self
def __coerce__(self, other) :
"""Try to convert two documents to a common type"""
if isinstance(other, _TAGGED_document) :
# no problem, compatible types
return (self, other)
elif type(other) == type("") :
# a string of text must be converted
# to self's type
new = self.__class__()
new._text(other)
return (self, new)
elif type(other) == type(1) :
# probably a __mul__ operation
return (self, other)
else :
# conversion is impossible
return None
def __getattr__(self, name) :
"""Here's the magic: we create tags on demand
name
The name of the tag we want to create
"""
# don't accept __xxxxx names
# we reserve them for internal or/and future use
if (name[:2] != "__") :
return self.Tag(self, name)
def __nonzero__(self) :
"""For truth value testing, returns 1 when the document is not empty"""
if self.__page :
return 1
else :
return 0
def __getitem__(self, key) :
"""returns key's value in the internal mapping"""
return self.__mapping[key]
def __setitem__(self, key, value) :
"""sets key's value in the internal mapping"""
self.__mapping[key] = value
def __delitem__(self, key) :
"""deletes this key from the internal mapping"""
del self.__mapping[key]
def __str__(self) :
"""returns the document as a string of text"""
outstr = cStringIO.StringIO()
indentation = ""
lgindent = len(self.__indentstring)
lastopened = None
for (text, arg, offset) in self.__page :
if offset == -1 : # closing tag
indentation = indentation[: -lgindent]
if text != lastopened : # normal case
outstr.write("%s</%s>\n" % (indentation, text))
else : # noting enclosed
outstr.seek(-2, 1)
outstr.write(" />\n")
lastopened = None
elif offset == 1 : # opening tag
outstr.write("%s<%s%s>\n" % (indentation, text, arg))
indentation = indentation + self.__indentstring
lastopened = text
else : # plain text
outstr.write("%s%s\n" % (indentation, text))
lastopened = None
outstr.flush()
retval = outstr.getvalue()
outstr.close()
# and now we use the internal mapping
# to postprocess the document.
# This may prove to be useful for replacing chars with their
# equivalent SGML entities for example, or for templating
# without a template file.
for (key, value) in self.__mapping.items() :
retval = string.replace(retval, key, value)
return retval
def __repr__(self) :
"""Returns a printable representation of the document, same as str() for now"""
# we define it with a 'def' instead of doing __repr__ = __str__ like the previous versions did
# because we may redefine __str__ in subclasses and don't want to
# have to redefine __repr__ too.
#
# This way it is done once for all:
return str(self)
def __adjust_stack(self, offset) :
"""Adjust the stack of pushed positions.
offset
offset by which adjust the stack
"""
if self.__pushed :
pos, oldoffset = self.__pushed.pop()
self.__pushed.append((pos, oldoffset + offset))
def _tag__(self, tag, arg) :
self.__page.insert(self.__position, (tag, arg, 1))
self.__position = self.__position + 1
self.__page.insert(self.__position, (tag, None, -1))
self.__adjust_stack(2)
#
# Callable interface starts here
def _push(self, name=None) :
"""Push the current tag's position.
useful before a block of nested tags
name : can be used to name the pushed position and pop it later directly
"""
if name :
self.__pusheddict[name] = len(self.__pushed)
self.__pushed.append((self.__position, 0))
def _pop(self, name=None) :
"""Restore the latest pushed position.
useful to get out of a block of nested tags
name : can be used to restore a named position, not necessarily the latest.
"""
if self.__pushed :
maxindex = len(self.__pushed) - 1
if name :
try :
index = self.__pusheddict[name]
del self.__pusheddict[name]
except KeyError :
raise KeyError, "jaxml named position %s doesn't exist" % name
else :
index = maxindex
while maxindex >= index :
pos, offset = self.__pushed.pop()
self.__position = pos + offset
self.__adjust_stack(offset) # we report the offset on previously saved tags
maxindex = maxindex - 1
def _text(self, text):
"""Insert plain text in the document
text
text to be inserted
"""
self.__page.insert(self.__position, (str(text), None, 0))
self.__position = self.__position + 1
self.__adjust_stack(1)
def _indentstring(self, newindentstring = " "):
"""Sets the indentation string for the output (default is 4 space characters)"""
self.__indentstring = newindentstring
def _updatemapping(self, newmap = None) :
"""Updates the internal mapping for the new templating facility,
and returns the new mapping's content
newmap
a Python mapping object to initialise or extend the
mapping. If None then the mapping is reset to an empty dictionnary
which is the default value.
"""
if newmap == None :
# clears the template mapping
self.__mapping = {}
return self.__mapping
elif type(newmap) == type({}) :
# update or extend the current mapping
self.__mapping.update(newmap)
return self.__mapping
else :
raise TypeError, "jaxml.py: _updatemapping's parameter must be a Python mapping object."
def _output(self, file = "-") :
"""Ouput the page, with indentation.
file
the optional file object or filename to output to
("-" or None or "" means sys.stdout)
"""
isopen = 0
if (type(file) == type("")) or (file is None) :
if file and (file != "-") :
outf = open(file, "w")
isopen = 1
else :
outf = sys.stdout
else :
outf = file # we assume it's a file object
outf.write("%s" % str(self))
outf.flush()
if isopen :
outf.close()
class XML_document(_TAGGED_document) :
"""This class defines an XML document"""
def __init__(self, version = "1.0", encoding = "iso-8859-1") :
"""Initialize local datas.
arguments:
version: xml version string
encoding: xml encoding language
"""
_TAGGED_document.__init__(self)
self.__version__ = version
self.__encoding__ = encoding
def __str__(self) :
"""returns the XML document as a string of text"""
tagdocstr = _TAGGED_document.__str__(self)
if tagdocstr :
return ("""<?xml version="%s" encoding="%s"?>\n""" % (self.__version__, self.__encoding__)) + tagdocstr
else :
return ""
def __subst_lines(self, lines, **vars):
"""Substitues var names with their values.
parts of this function come from the Whiz package
THANKS TO Neale Pickett ! Here follows the original license terms for Whiz:
## Author: Neale Pickett <[email protected]>
## Time-stamp: <99/02/11 10:45:42 neale>
## This software and ancillary information (herein called "SOFTWARE")
## called html.py made avaiable under the terms described here. The
## SOFTWARE has been approved for release with associated LA-CC Number
## 89-47.
## Unless otherwise indicated, this SOFTWARE has been authored by an
## employee or employees of the University of California, operator of
## the Los Alamos National Laboratory under contract No. W-7405-ENG-36
## with the U.S. Department of Energy. The U.S. Government has rights
## to use, reproduce, and distribute this SOFTWARE. The public may
## copy, distribute, prepare derivative works and publicly display this
## SOFTWARE without charge, provided that this Notice and any statement
## of authorship are reproduced on all copies. Neither the Government
## nor the University makes any warranty, express or implied, or assumes
## any liability or responsibility for the use of this SOFTWARE.
## If SOFTWARE is modified to produce derivative works, such modified
## SOFTWARE should be clearly marked, so as not to confuse it with the
## version available from LANL.
"""
import regex
container = regex.compile('\(<!-- \)?##\([-_A-Za-z0-9]+\)##\( -->\)?')
for line in lines:
while container.search(line) != -1:
try:
replacement = str(vars[container.group(2)])
except KeyError:
replacement = str('<!-- Unmatched variable: ' + container.group(2) + ' -->')
pre = line[:container.regs[0][0]]
post = line[container.regs[0][1]:]
if string.strip(pre) == '':
# pre is just whitespace, so pad our replacement's lines with that space
lines = string.split(replacement, '\n')
new = [lines[0]]
for l in lines[1:]:
new.append(pre + l)
replacement = string.join(new, '\n')
line = "%s%s%s" % (pre, replacement, post)
self._text(line)
def _template(self, file = "-", **vars) :
"""Include an external file in the current doc
and replaces ##vars## with their values.
Parts of this function come from the Whiz package
THANKS TO Neale Pickett ! Here follows the original license terms for Whiz:
## Author: Neale Pickett <[email protected]>
## Time-stamp: <99/02/11 10:45:42 neale>
## This software and ancillary information (herein called "SOFTWARE")
## called html.py made avaiable under the terms described here. The
## SOFTWARE has been approved for release with associated LA-CC Number
## 89-47.
## Unless otherwise indicated, this SOFTWARE has been authored by an
## employee or employees of the University of California, operator of
## the Los Alamos National Laboratory under contract No. W-7405-ENG-36
## with the U.S. Department of Energy. The U.S. Government has rights
## to use, reproduce, and distribute this SOFTWARE. The public may
## copy, distribute, prepare derivative works and publicly display this
## SOFTWARE without charge, provided that this Notice and any statement
## of authorship are reproduced on all copies. Neither the Government
## nor the University makes any warranty, express or implied, or assumes
## any liability or responsibility for the use of this SOFTWARE.
## If SOFTWARE is modified to produce derivative works, such modified
## SOFTWARE should be clearly marked, so as not to confuse it with the
## version available from LANL.
"""
if (file is None) or (type(file) == type("")) :
if file and (file != "-") :
inf = open(file, "r")
else :
inf = sys.stdin
else :
inf = file
lines = map(lambda l: l[:-1], inf.readlines())
if inf != sys.stdin :
inf.close()
apply(self.__subst_lines, (lines,), vars)
class HTML_document(XML_document) :
"""This class defines a useful method to output a default header,
as well as some methods defined for easying the use of this module and
keep porting from the old jahtml module easy too.
"""
def _default_header(self, title = "JAXML Default HTML Document", **modifiers) :
"""Begins a normal document.
title
the title of the document
modifiers
usual meta name= content= tags (keywords, description, etc...)
WARNING: doesn't work with other meta tags
"""
self.html()
self._push()
self.head()
self.title(title)
for mod in modifiers.keys() :
if modifiers[mod] != None :
self._push()
self.meta(name = string.upper(mod), content = modifiers[mod])
self._pop()
self._pop()
#
# Here we define some methods for easy porting from the old jahtml module
#
def __fake_input(self, _text_ = None, **args) :
self._push()
retcode = apply(self.input, (None, ), args)
self._pop()
return retcode
def _submit(self, **args) :
"""Submit button input type, beware of the leading underscore"""
args["type"] = "submit"
return apply(self.__fake_input, (None, ), args)
def _reset(self, **args) :
"""Reset button input type, beware of the leading underscore"""
args["type"] = "reset"
return apply(self.__fake_input, (None, ), args)
def _radio(self, **args) :
"""Radio button input type, beware of the leading underscore"""
args["type"] = "radio"
return apply(self.__fake_input, (None, ), args)
def _checkbox(self, **args) :
"""Checkbox input type, beware of the leading underscore"""
args["type"] = "checkbox"
return apply(self.__fake_input, (None, ), args)
def _password(self, **args) :
"""Password input type, beware of the leading underscore"""
args["type"] = "password"
return apply(self.__fake_input, (None, ), args)
def _hidden(self, **args) :
"""Hidden input type, beware of the leading underscore"""
args["type"] = "hidden"
return apply(self.__fake_input, (None, ), args)
def _textinput(self, **args) :
"""Text input type, beware of the leading underscore and the trailing 'input'"""
args["type"] = "text"
return apply(self.__fake_input, (None, ), args)
def _button(self, **args) :
"""Button input type, beware of the leading underscore"""
args["type"] = "button"
return apply(self.__fake_input, (None, ), args)
def _file(self, **args) :
"""File input type, beware of the leading underscore"""
args["type"] = "file"
return apply(self.__fake_input, (None, ), args)
def _image(self, **args) :
"""Image input type, beware of the leading underscore"""
args["type"] = "image"
return apply(self.__fake_input, (None, ), args)
def _meta(self, **args) :
"""The META tag, beware of the leading underscore"""
self._push()
retcode = apply(self.meta, (None, ), args)
self._pop()
return retcode
def _br(self, **args) :
"""The BR tag, beware of the leading underscore"""
self._push()
retcode = apply(self.br, (None, ), args)
self._pop()
return retcode
def _hr(self, **args) :
"""The HR tag, beware of the leading underscore"""
self._push()
retcode = apply(self.hr, (None, ), args)
self._pop()
return retcode
class CGI_document(HTML_document) :
"""
This class defines a CGI document.
it inherits from the HTML_document class, but more methods are present
"""
__possibleargs = {"version": "1.0", "encoding": "iso-8859-1", "content_type": "text/html", "content_disposition": "", "expires": "", "pragma": "", "redirect": "", "status": "", "statmes": "", "debug": None}
def __init__(self, **args) :
"""
Initialise local datas.
"""
HTML_document.__init__(self)
for key in self.__possibleargs.keys() :
if args.has_key(key) :
value = args[key]
else :
value = self.__possibleargs[key]
setattr(self, "__" + key + "__", value)
def __str__(self) :
"""Returns the CGI output as a string."""
if self.__redirect__ :
return "Location: %s\n\n" % self.__redirect__
else :
val = "Content-type: %s\n" % self.__content_type__
if self.__status__ :
val = val + "Status: %s %s\n" % (self.__status__, self.__statmes__)
if self.__pragma__ :
val = val + "Pragma: %s\n" % self.__pragma__
if self.__expires__ :
val = val + "Expires: %s\n" % self.__expires__
if self.__content_disposition__ :
val = val + "Content-Disposition: %s\n" % self.__content_disposition__
return val + "\n" + HTML_document.__str__(self)
def _set_debug(self, file) :
"""Sets the flag to send the output to a file too."""
self.__debug__ = file
def _set_pragma(self, pragma) :
"""Defines the pragma value.
pragma
The pragma's value
"""
self.__pragma__ = pragma
def _set_expires(self, expires) :
"""Defines the expiration date of the CGI output.
expires
The expiration date
"""
self.__expires__ = expires
def _set_redirect(self, url) :
"""Defines the redirection url.
url
The redirection url to send
"""
self.__redirect__ = url
def _set_content_type(self, content_type = "text/html") :
"""Defines the content type of the CGI output.
content_type
The new content type, default is text/html
"""
self.__content_type__ = content_type
def _set_content_disposition(self, content_disposition = "") :
"""Defines the content disposition of the CGI output.
content_disposition
The new disposition, default is ""
"""
self.__content_disposition__ = content_disposition
def _set_status(self, status, message="") :
"""Defines the status to return.
statsus
The status value
message
The message following the status value
"""
self.__status__ = status
self.__statmes__ = message
def _do_nothing(self, message = "No response") :
"""Set status to 204 (do nothing)."""
self._set_status("204", message)
def _envvar(self, varname) :
"""Returns the variable value or None."""
if os.environ.has_key(varname) :
return os.environ[varname]
def _server_software(self) :
"""Returns the SERVER_SOFTWARE environment variable value."""
return self._envvar('SERVER_SOFTWARE')
def _server_name(self) :
"""Returns the SERVER_NAME environment variable value."""
return self._envvar('SERVER_NAME')
def _gateway_interface(self) :
"""Returns the GATEWAY_INTERFACE environment variable value."""
return self._envvar('GATEWAY_INTERFACE')
def _server_protocol(self) :
"""Returns the SERVER_PROTOCOL environment variable value."""
return self._envvar('SERVER_PROTOCOL')
def _server_port(self) :
"""Returns the SERVER_PORT environment variable value."""
return self._envvar('SERVER_PORT')
def _request_method(self) :
"""Returns the REQUEST_METHOD environment variable value."""
return self._envvar('REQUEST_METHOD')
def _path_info(self) :
"""Returns the PATH_INFO environment variable value."""
return self._envvar('PATH_INFO')
def _path_translated(self) :
"""Returns the PATH_TRANSLATED environment variable value."""
return self._envvar('PATH_TRANSLATED')
def _document_root(self) :
"""Returns the DOCUMENT_ROOT environment variable value."""
return self._envvar('DOCUMENT_ROOT')
def _script_name(self) :
"""Returns the SCRIPT_NAME environment variable value."""
return self._envvar('SCRIPT_NAME')
def _query_string(self) :
"""Returns the QUERY_STRING environment variable value."""
return self._envvar('QUERY_STRING')
def _remote_host(self) :
"""Returns the REMOTE_HOST environment variable value."""
return self._envvar('REMOTE_HOST')
def _remote_addr(self) :
"""Returns the REMOTE_ADDR environment variable value."""
return self._envvar('REMOTE_ADDR')
def _auth_type(self) :
"""Returns the AUTH_TYPE environment variable value."""
return self._envvar('AUTH_TYPE')
def _remote_user(self) :
"""Returns the REMOTE_USER environment variable value."""
return self._envvar('REMOTE_USER')
def _remote_ident(self) :
"""Returns the REMOTE_IDENT environment variable value."""
return self._envvar('REMOTE_IDENT')
def _content_type(self) :
"""Returns the CONTENT_TYPE environment variable value."""
return self._envvar('CONTENT_TYPE')
def _content_length(self) :
"""Returns the CONTENT_LENGTH environment variable value."""
return self._envvar('CONTENT_LENGTH')
def _http_accept(self) :
"""Returns the HTTP_ACCEPT environment variable value."""
return self._envvar('HTTP_ACCEPT')
def _http_user_agent(self) :
"""Returns the HTTP_USER_AGENT environment variable value."""
return self._envvar('HTTP_USER_AGENT')
def _http_referer(self) :
"""Returns the HTTP_REFERER environment variable value."""
return self._envvar('HTTP_REFERER')
def _log_message(self, msg = "Error in a CGI Script made with jaxml", level = "error") :
"""Logs a message to the HTTP server's error log file (usually on stderr)."""
sys.stderr.write("[%s] [%s] %s\n" % (time.asctime(time.localtime(time.time())), level, msg))
def _log_message_and_exit(self, msg = "Fatal Error in a CGI Script made with jaxml", level = "error") :
"""Logs a message to the HTTP server's error log file (usually on stderr) and exits unsuccessfully."""
self.log_message(msg, level)
sys.exit(-1)
def _output(self, file = "-") :
"""Prints the CGI script output to stdout or file.
If self.__debug__ is defined it is used as a file
to which send the output to too.
"""
HTML_document._output(self, file)
if self.__debug__ :
HTML_document._output(self, self.__debug__)
class Html_document :
"""This class warns the programmer when used, and exits the program.
This is done to say that the jahtml module is now obsolete"""
def __init__(self) :
"""Warns and Exit"""
sys.stderr.write("EXITING: The jaxml.Html_document() class shouldn't be used anymore.\nUse jaxml.HTML_document() instead, and modify your programs according to the new API.\n")
sys.exit(-1)
| gpl-2.0 | -6,357,981,298,586,470,000 | 41.383821 | 210 | 0.519695 | false |
arun6582/django | tests/forms_tests/field_tests/test_nullbooleanfield.py | 49 | 3555 | from django.forms import Form, HiddenInput, NullBooleanField, RadioSelect
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class NullBooleanFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_nullbooleanfield_clean(self):
f = NullBooleanField()
self.assertIsNone(f.clean(''))
self.assertTrue(f.clean(True))
self.assertFalse(f.clean(False))
self.assertIsNone(f.clean(None))
self.assertFalse(f.clean('0'))
self.assertTrue(f.clean('1'))
self.assertIsNone(f.clean('2'))
self.assertIsNone(f.clean('3'))
self.assertIsNone(f.clean('hello'))
self.assertTrue(f.clean('true'))
self.assertFalse(f.clean('false'))
def test_nullbooleanfield_2(self):
# The internal value is preserved if using HiddenInput (#7753).
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm()
self.assertHTMLEqual(
'<input type="hidden" name="hidden_nullbool1" value="True" id="id_hidden_nullbool1" />'
'<input type="hidden" name="hidden_nullbool2" value="False" id="id_hidden_nullbool2" />',
str(f)
)
def test_nullbooleanfield_3(self):
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm({'hidden_nullbool1': 'True', 'hidden_nullbool2': 'False'})
self.assertIsNone(f.full_clean())
self.assertTrue(f.cleaned_data['hidden_nullbool1'])
self.assertFalse(f.cleaned_data['hidden_nullbool2'])
def test_nullbooleanfield_4(self):
# Make sure we're compatible with MySQL, which uses 0 and 1 for its
# boolean values (#9609).
NULLBOOL_CHOICES = (('1', 'Yes'), ('0', 'No'), ('', 'Unknown'))
class MySQLNullBooleanForm(Form):
nullbool0 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool1 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool2 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
f = MySQLNullBooleanForm({'nullbool0': '1', 'nullbool1': '0', 'nullbool2': ''})
self.assertIsNone(f.full_clean())
self.assertTrue(f.cleaned_data['nullbool0'])
self.assertFalse(f.cleaned_data['nullbool1'])
self.assertIsNone(f.cleaned_data['nullbool2'])
def test_nullbooleanfield_changed(self):
f = NullBooleanField()
self.assertTrue(f.has_changed(False, None))
self.assertTrue(f.has_changed(None, False))
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(False, False))
self.assertTrue(f.has_changed(True, False))
self.assertTrue(f.has_changed(True, None))
self.assertTrue(f.has_changed(True, False))
# HiddenInput widget sends string values for boolean but doesn't clean them in value_from_datadict
self.assertFalse(f.has_changed(False, 'False'))
self.assertFalse(f.has_changed(True, 'True'))
self.assertFalse(f.has_changed(None, ''))
self.assertTrue(f.has_changed(False, 'True'))
self.assertTrue(f.has_changed(True, 'False'))
self.assertTrue(f.has_changed(None, 'False'))
| bsd-3-clause | 1,982,692,439,101,740,800 | 47.040541 | 106 | 0.661885 | false |
tridao/cvxpy | cvxpy/atoms/affine/sum_entries.py | 5 | 2288 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms.affine.affine_atom import AffAtom
from cvxpy.atoms.axis_atom import AxisAtom
import cvxpy.utilities as u
import cvxpy.lin_ops.lin_utils as lu
import numpy as np
class sum_entries(AxisAtom, AffAtom):
""" Summing the entries of an expression.
Attributes
----------
expr : CVXPY Expression
The expression to sum the entries of.
"""
def __init__(self, expr, axis=None):
super(sum_entries, self).__init__(expr, axis=axis)
@AffAtom.numpy_numeric
def numeric(self, values):
"""Sums the entries of value.
"""
return np.sum(values[0], axis=self.axis)
@staticmethod
def graph_implementation(arg_objs, size, data=None):
"""Sum the linear expression's entries.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
axis = data[0]
if axis is None:
obj = lu.sum_entries(arg_objs[0])
elif axis == 1:
const_size = (arg_objs[0].size[1], 1)
ones = lu.create_const(np.ones(const_size), const_size)
obj = lu.rmul_expr(arg_objs[0], ones, size)
else: # axis == 0
const_size = (1, arg_objs[0].size[0])
ones = lu.create_const(np.ones(const_size), const_size)
obj = lu.mul_expr(ones, arg_objs[0], size)
return (obj, [])
| gpl-3.0 | -5,322,284,604,814,319,000 | 29.918919 | 68 | 0.625874 | false |
cdsgroup/qcdb | databases/A24.py | 2 | 34755 | #
# @BEGIN LICENSE
#
# QCDB: quantum chemistry common driver and databases
#
# Copyright (c) 2011-2017 The QCDB Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of QCDB.
#
# QCDB is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# QCDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with QCDB; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Hobza) of interaction energies for bimolecular complexes.
| Geometries from JCTC 9 2151 (2013).
| Reference interaction energies from Rezac and Hobza, and others (see below).
- **cp** ``'off'`` <erase this comment and after unless on is a valid option> || ``'on'``
- **rlxd** ``'off'`` <erase this comment and after unless on is valid option> || ``'on'``
- **benchmark**
- ``'A240'`` original pub, Riley et al. JCTC 9 2151 (2013).
- ``'A24A'`` weighted average CP/unCP, Burns et al. JCTC 10 49 (2014).
- |dl| ``'A24B'`` |dr| highest extrapolated CP CCSD(T) values (aq5z or a56z).
- **subset**
- ``'small'`` <members_description>
- ``'large'`` <members_description>
- ``'<subset>'`` <members_description>
"""
import re
import qcdb
# <<< A24 Database Module >>>
dbse = 'A24'
# <<< Database Members >>>
HRXN = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
HRXN_SM = [2]
HRXN_LG = []
HB = [1,2,3,4,5]
MX = [6,7,8,9,10,11,12,13,16]
DD = [14,15,17,18,19,20,21,22,23,24]
#weak = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
#weak_hb = [1,2,3,4,5]
#weak_mx = [6,7,8,9,10,11,12,13,16]
#weak_dd = [14,15,17,18,19,20,21,22,23,24]
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supermolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values [kcal/mol] >>>
BIND = {}
# A240: Original publication JCTC 9 2151 (2013)
BIND_A240 = {}
BIND_A240['%s-%s' % (dbse, 1)] = -6.493
BIND_A240['%s-%s' % (dbse, 2)] = -5.006
BIND_A240['%s-%s' % (dbse, 3)] = -4.745
BIND_A240['%s-%s' % (dbse, 4)] = -4.581
BIND_A240['%s-%s' % (dbse, 5)] = -3.137
BIND_A240['%s-%s' % (dbse, 6)] = -1.654
BIND_A240['%s-%s' % (dbse, 7)] = -0.765
BIND_A240['%s-%s' % (dbse, 8)] = -0.663
BIND_A240['%s-%s' % (dbse, 9)] = -4.554
BIND_A240['%s-%s' % (dbse, 10)] = -2.557
BIND_A240['%s-%s' % (dbse, 11)] = -1.621
BIND_A240['%s-%s' % (dbse, 12)] = -1.524
BIND_A240['%s-%s' % (dbse, 13)] = -1.374
BIND_A240['%s-%s' % (dbse, 14)] = -1.090
BIND_A240['%s-%s' % (dbse, 15)] = -0.502
BIND_A240['%s-%s' % (dbse, 16)] = -1.485
BIND_A240['%s-%s' % (dbse, 17)] = -0.827
BIND_A240['%s-%s' % (dbse, 18)] = -0.607
BIND_A240['%s-%s' % (dbse, 19)] = -0.533
BIND_A240['%s-%s' % (dbse, 20)] = -0.405
BIND_A240['%s-%s' % (dbse, 21)] = -0.364
BIND_A240['%s-%s' % (dbse, 22)] = 0.821
BIND_A240['%s-%s' % (dbse, 23)] = 0.934
BIND_A240['%s-%s' % (dbse, 24)] = 1.115
# A24A: Weighted averaged reference used in JCTC 10 49 (2014)
BIND_A24A = {}
BIND_A24A['%s-%s' % (dbse, 1)] = -6.502
BIND_A24A['%s-%s' % (dbse, 2)] = -5.007
BIND_A24A['%s-%s' % (dbse, 3)] = -4.758
BIND_A24A['%s-%s' % (dbse, 4)] = -4.569
BIND_A24A['%s-%s' % (dbse, 5)] = -3.131
BIND_A24A['%s-%s' % (dbse, 6)] = -1.633
BIND_A24A['%s-%s' % (dbse, 7)] = -0.761
BIND_A24A['%s-%s' % (dbse, 8)] = -0.669
BIND_A24A['%s-%s' % (dbse, 9)] = -4.520
BIND_A24A['%s-%s' % (dbse, 10)] = -2.560
BIND_A24A['%s-%s' % (dbse, 11)] = -1.618
BIND_A24A['%s-%s' % (dbse, 12)] = -1.520
BIND_A24A['%s-%s' % (dbse, 13)] = -1.376
BIND_A24A['%s-%s' % (dbse, 14)] = -1.088
BIND_A24A['%s-%s' % (dbse, 15)] = -0.505
BIND_A24A['%s-%s' % (dbse, 16)] = -1.484
BIND_A24A['%s-%s' % (dbse, 17)] = -0.831
BIND_A24A['%s-%s' % (dbse, 18)] = -0.610
BIND_A24A['%s-%s' % (dbse, 19)] = -0.534
BIND_A24A['%s-%s' % (dbse, 20)] = -0.397
BIND_A24A['%s-%s' % (dbse, 21)] = -0.347
BIND_A24A['%s-%s' % (dbse, 22)] = 0.835
BIND_A24A['%s-%s' % (dbse, 23)] = 0.945
BIND_A24A['%s-%s' % (dbse, 24)] = 1.131
# A24B: Highest extrapolated CP CCSD(T) values (q5 or 56)
BIND_A24B = {}
BIND_A24B['%s-%s' % (dbse, 1)] = -6.506 # 56
BIND_A24B['%s-%s' % (dbse, 2)] = -5.015 # 56
BIND_A24B['%s-%s' % (dbse, 3)] = -4.751 # 56
BIND_A24B['%s-%s' % (dbse, 4)] = -4.592 # 56
BIND_A24B['%s-%s' % (dbse, 5)] = -3.142 # 56
BIND_A24B['%s-%s' % (dbse, 6)] = -1.661 # 56
BIND_A24B['%s-%s' % (dbse, 7)] = -0.767
BIND_A24B['%s-%s' % (dbse, 8)] = -0.665 # 56
BIND_A24B['%s-%s' % (dbse, 9)] = -4.565
BIND_A24B['%s-%s' % (dbse, 10)] = -2.564
BIND_A24B['%s-%s' % (dbse, 11)] = -1.626
BIND_A24B['%s-%s' % (dbse, 12)] = -1.527
BIND_A24B['%s-%s' % (dbse, 13)] = -1.377
BIND_A24B['%s-%s' % (dbse, 14)] = -1.094
BIND_A24B['%s-%s' % (dbse, 15)] = -0.504
BIND_A24B['%s-%s' % (dbse, 16)] = -1.493
BIND_A24B['%s-%s' % (dbse, 17)] = -0.830
BIND_A24B['%s-%s' % (dbse, 18)] = -0.609
BIND_A24B['%s-%s' % (dbse, 19)] = -0.534
BIND_A24B['%s-%s' % (dbse, 20)] = -0.406 # 56
BIND_A24B['%s-%s' % (dbse, 21)] = -0.354 # 56
BIND_A24B['%s-%s' % (dbse, 22)] = 0.818
BIND_A24B['%s-%s' % (dbse, 23)] = 0.930
BIND_A24B['%s-%s' % (dbse, 24)] = 1.115
# A24C: Includes (Q), core, rel corrections PCCP 17 19268 (2015)
BIND_A24C = {}
BIND_A24C['%s-%s' % (dbse, 1)] = -6.546
BIND_A24C['%s-%s' % (dbse, 2)] = -5.036
BIND_A24C['%s-%s' % (dbse, 3)] = -4.769
BIND_A24C['%s-%s' % (dbse, 4)] = -4.585
BIND_A24C['%s-%s' % (dbse, 5)] = -3.169
BIND_A24C['%s-%s' % (dbse, 6)] = -1.662
BIND_A24C['%s-%s' % (dbse, 7)] = -0.779
BIND_A24C['%s-%s' % (dbse, 8)] = -0.681
BIND_A24C['%s-%s' % (dbse, 9)] = -4.515
BIND_A24C['%s-%s' % (dbse, 10)] = -2.586
BIND_A24C['%s-%s' % (dbse, 11)] = -1.634
BIND_A24C['%s-%s' % (dbse, 12)] = -1.538
BIND_A24C['%s-%s' % (dbse, 13)] = -1.396
BIND_A24C['%s-%s' % (dbse, 14)] = -1.110
BIND_A24C['%s-%s' % (dbse, 15)] = -0.518
BIND_A24C['%s-%s' % (dbse, 16)] = -1.522
BIND_A24C['%s-%s' % (dbse, 17)] = -0.845
BIND_A24C['%s-%s' % (dbse, 18)] = -0.618
BIND_A24C['%s-%s' % (dbse, 19)] = -0.542
BIND_A24C['%s-%s' % (dbse, 20)] = -0.405
BIND_A24C['%s-%s' % (dbse, 21)] = -0.356
BIND_A24C['%s-%s' % (dbse, 22)] = 0.801
BIND_A24C['%s-%s' % (dbse, 23)] = 0.909
BIND_A24C['%s-%s' % (dbse, 24)] = 1.097
# Set default
BIND = BIND_A24B
# Reference information
BINDINFO_A240 = {}
BINDINFO_A24A = {}
BINDINFO_A24B = {}
BINDINFO_A24C = {}
for rxn in HRXN:
# A24-0: HF/aug-cc-pV5Z + D:CCSD(T)/aug-cc-pV[TQ5]Z + D:(Q)/6-31G**(0.25,0.15) + DKH4/aug-cc-pCVQZ-DK + CCSD(T)/aug-cc-pCV[TQ]Z(ae - fc)
BINDINFO_A240['%s-%s' % (dbse, rxn)] = {'citation': 'a240', 'method': 'CCSDTQ'}
if rxn in [1, 2, 3, 4, 5, 6, 8, 20, 21]:
BINDINFO_A24A['%s-%s' % (dbse, rxn)] = {'citation': 'dilabio', 'method': 'CCSDT', 'mode': 'ave', 'basis': 'a56z'}
BINDINFO_A24B['%s-%s' % (dbse, rxn)] = {'citation': 'dilabio', 'method': 'CCSDT', 'mode': 'CP', 'basis': 'a56z'}
else:
BINDINFO_A24A['%s-%s' % (dbse, rxn)] = {'citation': 'dilabio', 'method': 'CCSDT', 'mode': 'ave', 'basis': 'aq5z'}
BINDINFO_A24B['%s-%s' % (dbse, rxn)] = {'citation': 'dilabio', 'method': 'CCSDT', 'mode': 'CP', 'basis': 'aq5z'}
# A24C: A24B + D:(Q)/aTZ (A24-2, 4, 5, 19; could be aDTZ?) /aDZ (1, 6-8, 10-18, 20-24) /None (3, 9) + relativisic(A24-0) + core(A24-0)
BINDINFO_A24C['%s-%s' % (dbse, rxn)] = {'citation': 'a24c', 'method': 'CCSDTQ'}
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = """ water_ammonia_Cs """
TAGL['%s-%s-dimer' % (dbse, 1)] = """Dimer from water_ammonia_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 1)] = """Monomer A water_ammonia_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 1)] = """Monomer B water_ammonia_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 1)] = """Monomer A water_ammonia_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 1)] = """Monomer B water_ammonia_Cs """
TAGL['%s-%s' % (dbse, 2)] = """ water_water_Cs """
TAGL['%s-%s-dimer' % (dbse, 2)] = """Dimer from water_water_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 2)] = """Monomer A from water_water_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 2)] = """Monomer B from water_water_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 2)] = """Monomer A from water_water_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 2)] = """Monomer B from water_water_Cs """
TAGL['%s-%s' % (dbse, 3)] = """ HCN_HCN_Cxv """
TAGL['%s-%s-dimer' % (dbse, 3)] = """Dimer from HCN_HCN_Cxv """
TAGL['%s-%s-monoA-CP' % (dbse, 3)] = """Monomer A from HCN_HCN_Cxv """
TAGL['%s-%s-monoB-CP' % (dbse, 3)] = """Monomer B from HCN_HCN_Cxv """
TAGL['%s-%s-monoA-unCP' % (dbse, 3)] = """Monomer A from HCN_HCN_Cxv """
TAGL['%s-%s-monoB-unCP' % (dbse, 3)] = """Monomer B from HCN_HCN_Cxv """
TAGL['%s-%s' % (dbse, 4)] = """ HF_HF_Cs """
TAGL['%s-%s-dimer' % (dbse, 4)] = """Dimer from HF_HF_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 4)] = """Monomer A from HF_HF_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 4)] = """Monomer B from HF_HF_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 4)] = """Monomer A from HF_HF_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 4)] = """Monomer B from HF_HF_Cs """
TAGL['%s-%s' % (dbse, 5)] = """ ammonia_ammonia_C2h """
TAGL['%s-%s-dimer' % (dbse, 5)] = """Dimer from ammonia_ammonia_C2h """
TAGL['%s-%s-monoA-CP' % (dbse, 5)] = """Monomer A from ammonia_ammonia_C2h """
TAGL['%s-%s-monoB-CP' % (dbse, 5)] = """Monomer B from ammonia_ammonia_C2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 5)] = """Monomer A from ammonia_ammonia_C2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 5)] = """Monomer B from ammonia_ammonia_C2h """
TAGL['%s-%s' % (dbse, 6)] = """ methane_HF_C3v """
TAGL['%s-%s-dimer' % (dbse, 6)] = """Dimer from methane_HF_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 6)] = """Monomer A from methane_HF_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 6)] = """Monomer B from methane_HF_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 6)] = """Monomer A from methane_HF_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 6)] = """Monomer B from methane_HF_C3v """
TAGL['%s-%s' % (dbse, 7)] = """ ammmonia_methane_C3v """
TAGL['%s-%s-dimer' % (dbse, 7)] = """Dimer from ammmonia_methane_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 7)] = """Monomer A from ammmonia_methane_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 7)] = """Monomer B from ammmonia_methane_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 7)] = """Monomer A from ammmonia_methane_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 7)] = """Monomer B from ammmonia_methane_C3v """
TAGL['%s-%s' % (dbse, 8)] = """ methane_water_Cs """
TAGL['%s-%s-dimer' % (dbse, 8)] = """Dimer from methane_water_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 8)] = """Monomer A from methane_water_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 8)] = """Monomer B from methane_water_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 8)] = """Monomer A from methane_water_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 8)] = """Monomer B from methane_water_Cs """
TAGL['%s-%s' % (dbse, 9)] = """ formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-dimer' % (dbse, 9)] = """Dimer from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 9)] = """Monomer A from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 9)] = """Monomer B from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 9)] = """Monomer A from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 9)] = """Monomer B from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s' % (dbse, 10)] = """ ethene_wat_Cs """
TAGL['%s-%s-dimer' % (dbse, 10)] = """Dimer from ethene_wat_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 10)] = """Monomer A from ethene_wat_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 10)] = """Monomer B from ethene_wat_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 10)] = """Monomer A from ethene_wat_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 10)] = """Monomer B from ethene_wat_Cs """
TAGL['%s-%s' % (dbse, 11)] = """ ethene_formaldehyde_Cs """
TAGL['%s-%s-dimer' % (dbse, 11)] = """Dimer from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 11)] = """Monomer A from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 11)] = """Monomer B from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 11)] = """Monomer A from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 11)] = """Monomer B from ethene_formaldehyde_Cs """
TAGL['%s-%s' % (dbse, 12)] = """ ethyne_ethyne_C2v """
TAGL['%s-%s-dimer' % (dbse, 12)] = """Dimer from ethyne_ethyne_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 12)] = """Monomer A from ethyne_ethyne_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 12)] = """Monomer B from ethyne_ethyne_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 12)] = """Monomer A from ethyne_ethyne_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 12)] = """Monomer B from ethyne_ethyne_C2v """
TAGL['%s-%s' % (dbse, 13)] = """ ethene_ammonia_Cs """
TAGL['%s-%s-dimer' % (dbse, 13)] = """Dimer from ethene_ammonia_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 13)] = """Monomer A from ethene_ammonia_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 13)] = """Monomer B from ethene_ammonia_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 13)] = """Monomer A from ethene_ammonia_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 13)] = """Monomer B from ethene_ammonia_Cs """
TAGL['%s-%s' % (dbse, 14)] = """ ethene_ethene_C2v """
TAGL['%s-%s-dimer' % (dbse, 14)] = """Dimer from ethene_ethene_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 14)] = """Monomer A from ethene_ethene_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 14)] = """Monomer B from ethene_ethene_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 14)] = """Monomer A from ethene_ethene_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 14)] = """Monomer B from ethene_ethene_C2v """
TAGL['%s-%s' % (dbse, 15)] = """ methane_ethene_Cs """
TAGL['%s-%s-dimer' % (dbse, 15)] = """Dimer from methane_ethene_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 15)] = """Monomer A from methane_ethene_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 15)] = """Monomer B from methane_ethene_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 15)] = """Monomer A from methane_ethene_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 15)] = """Monomer B from methane_ethene_Cs """
TAGL['%s-%s' % (dbse, 16)] = """ borane_methane_Cs """
TAGL['%s-%s-dimer' % (dbse, 16)] = """Dimer from borane_methane_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 16)] = """Monomer A from borane_methane_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 16)] = """Monomer B from borane_methane_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 16)] = """Monomer A from borane_methane_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 16)] = """Monomer B from borane_methane_Cs """
TAGL['%s-%s' % (dbse, 17)] = """ methane_ethane_Cs """
TAGL['%s-%s-dimer' % (dbse, 17)] = """Dimer from methane_ethane_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 17)] = """Monomer A from methane_ethane_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 17)] = """Monomer B from methane_ethane_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 17)] = """Monomer A from methane_ethane_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 17)] = """Monomer B from methane_ethane_Cs """
TAGL['%s-%s' % (dbse, 18)] = """ methane_ethane_C3 """
TAGL['%s-%s-dimer' % (dbse, 18)] = """Dimer from methane_ethane_C3 """
TAGL['%s-%s-monoA-CP' % (dbse, 18)] = """Monomer A from methane_ethane_C3 """
TAGL['%s-%s-monoB-CP' % (dbse, 18)] = """Monomer B from methane_ethane_C3 """
TAGL['%s-%s-monoA-unCP' % (dbse, 18)] = """Monomer A from methane_ethane_C3 """
TAGL['%s-%s-monoB-unCP' % (dbse, 18)] = """Monomer B from methane_ethane_C3 """
TAGL['%s-%s' % (dbse, 19)] = """ methane_methane_D3d """
TAGL['%s-%s-dimer' % (dbse, 19)] = """Dimer from methane_methane_D3d """
TAGL['%s-%s-monoA-CP' % (dbse, 19)] = """Monomer A from methane_methane_D3d """
TAGL['%s-%s-monoB-CP' % (dbse, 19)] = """Monomer B from methane_methane_D3d """
TAGL['%s-%s-monoA-unCP' % (dbse, 19)] = """Monomer A from methane_methane_D3d """
TAGL['%s-%s-monoB-unCP' % (dbse, 19)] = """Monomer B from methane_methane_D3d """
TAGL['%s-%s' % (dbse, 20)] = """ methane_Ar_C3v """
TAGL['%s-%s-dimer' % (dbse, 20)] = """Dimer from methane_Ar_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 20)] = """Monomer A from methane_Ar_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 20)] = """Monomer B from methane_Ar_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 20)] = """Monomer A from methane_Ar_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 20)] = """Monomer B from methane_Ar_C3v """
TAGL['%s-%s' % (dbse, 21)] = """ ethene_Ar_C2v """
TAGL['%s-%s-dimer' % (dbse, 21)] = """Dimer from ethene_Ar_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 21)] = """Monomer A from ethene_Ar_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 21)] = """Monomer B from ethene_Ar_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 21)] = """Monomer A from ethene_Ar_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 21)] = """Monomer B from ethene_Ar_C2v """
TAGL['%s-%s' % (dbse, 22)] = """ ethene_ethyne_C2v """
TAGL['%s-%s-dimer' % (dbse, 22)] = """Dimer from ethene_ethyne_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 22)] = """Monomer A from ethene_ethyne_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 22)] = """Monomer B from ethene_ethyne_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 22)] = """Monomer A from ethene_ethyne_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 22)] = """Monomer B from ethene_ethyne_C2v """
TAGL['%s-%s' % (dbse, 23)] = """ ethene_ethene_D2h """
TAGL['%s-%s-dimer' % (dbse, 23)] = """Dimer from ethene_ethene_D2h """
TAGL['%s-%s-monoA-CP' % (dbse, 23)] = """Monomer A from ethene_ethene_D2h """
TAGL['%s-%s-monoB-CP' % (dbse, 23)] = """Monomer B from ethene_ethene_D2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 23)] = """Monomer A from ethene_ethene_D2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 23)] = """Monomer B from ethene_ethene_D2h """
TAGL['%s-%s' % (dbse, 24)] = """ ethyne_ethyne_D2h """
TAGL['%s-%s-dimer' % (dbse, 24)] = """Dimer from ethyne_ethyne_D2h """
TAGL['%s-%s-monoA-CP' % (dbse, 24)] = """Monomer A from ethyne_ethyne_D2h """
TAGL['%s-%s-monoB-CP' % (dbse, 24)] = """Monomer B from ethyne_ethyne_D2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 24)] = """Monomer A from ethyne_ethyne_D2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 24)] = """Monomer B from ethyne_ethyne_D2h """
TAGL['dbse'] = 'interaction energies for small bimolecular complexes'
TAGL['default'] = 'entire database'
TAGL['small'] = 'few computationally quick systems'
TAGL['large'] = 'most computationally expensive systems'
TAGL['HB'] = 'hydrogen-bonded systems'
TAGL['MX'] = 'mixed-influence systems'
TAGL['DD'] = 'dispersion-dominated systems'
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1')] = qcdb.Molecule("""
0 1
O 0.00000000 -0.05786571 -1.47979303
H 0.00000000 0.82293384 -1.85541474
H 0.00000000 0.07949567 -0.51934253
--
0 1
N 0.00000000 0.01436394 1.46454628
H 0.00000000 -0.98104857 1.65344779
H -0.81348351 0.39876776 1.92934049
H 0.81348351 0.39876776 1.92934049
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2')] = qcdb.Molecule("""
0 1
O -0.06699914 0.00000000 1.49435474
H 0.81573427 0.00000000 1.86586639
H 0.06885510 0.00000000 0.53914277
--
0 1
O 0.06254775 0.00000000 -1.42263208
H -0.40696540 -0.76017841 -1.77174450
H -0.40696540 0.76017841 -1.77174450
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3')] = qcdb.Molecule("""
0 1
H 0.00000000 0.00000000 3.85521306
C 0.00000000 0.00000000 2.78649976
N 0.00000000 0.00000000 1.63150791
--
0 1
H 0.00000000 0.00000000 -0.59377492
C 0.00000000 0.00000000 -1.66809824
N 0.00000000 0.00000000 -2.82525056
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4')] = qcdb.Molecule("""
0 1
H 0.00000000 0.80267982 1.69529329
F 0.00000000 -0.04596666 1.34034818
--
0 1
H 0.00000000 -0.12040787 -0.49082840
F 0.00000000 0.00976945 -1.40424978
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5')] = qcdb.Molecule("""
0 1
N -0.04998129 -1.58709323 0.00000000
H 0.12296265 -2.16846018 0.81105976
H 0.12296265 -2.16846018 -0.81105976
H 0.65988580 -0.86235298 0.00000000
--
0 1
N 0.04998129 1.58709323 0.00000000
H -0.12296265 2.16846018 0.81105976
H -0.65988580 0.86235298 0.00000000
H -0.12296265 2.16846018 -0.81105976
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.00000000 1.77071609
H 0.51593378 -0.89362352 1.42025061
H -0.00000000 0.00000000 2.85805859
H 0.51593378 0.89362352 1.42025061
H -1.03186756 0.00000000 1.42025061
--
0 1
H -0.00000000 0.00000000 -0.54877328
F -0.00000000 0.00000000 -1.46803256
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7')] = qcdb.Molecule("""
0 1
N -0.00000000 0.00000000 1.84833659
H 0.93730979 -0.00000000 2.23206741
H -0.46865489 -0.81173409 2.23206741
H -0.46865489 0.81173409 2.23206741
--
0 1
H 0.00000000 -0.00000000 -0.94497174
C 0.00000000 -0.00000000 -2.03363752
H 0.51251439 0.88770096 -2.40095125
H 0.51251439 -0.88770096 -2.40095125
H -1.02502878 0.00000000 -2.40095125
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8')] = qcdb.Molecule("""
0 1
C 0.00069016 0.00000000 -1.99985520
H -0.50741740 0.88759452 -2.37290605
H 1.03052749 0.00000000 -2.35282982
H -0.01314396 0.00000000 -0.91190852
H -0.50741740 -0.88759452 -2.37290605
--
0 1
O -0.00472553 0.00000000 1.71597466
H 0.03211863 0.75755459 2.30172044
H 0.03211863 -0.75755459 2.30172044
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9')] = qcdb.Molecule("""
0 1
C 0.00000000 0.60123980 -1.35383976
O 0.00000000 -0.59301814 -1.55209021
H 0.93542250 1.17427624 -1.26515132
H -0.93542250 1.17427624 -1.26515132
--
0 1
C 0.00000000 -0.60200476 1.55228866
O 0.00000000 0.59238638 1.35511328
H 0.00000000 -1.00937982 2.57524635
H 0.00000000 -1.32002906 0.71694997
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10')] = qcdb.Molecule("""
0 1
C 0.01058825 -0.66806246 1.29820809
C 0.01058825 0.66806246 1.29820809
H 0.86863216 1.23267933 0.95426815
H -0.84608285 1.23258495 1.64525385
H -0.84608285 -1.23258495 1.64525385
H 0.86863216 -1.23267933 0.95426815
--
0 1
H -0.79685627 0.00000000 -2.50911038
O 0.04347445 0.00000000 -2.04834054
H -0.19067546 0.00000000 -1.11576944
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.59797089 1.47742864
C 0.00000000 0.42131196 2.33957848
H 0.92113351 -1.02957102 1.10653516
H -0.92113351 -1.02957102 1.10653516
H -0.92393815 0.85124826 2.70694633
H 0.92393815 0.85124826 2.70694633
--
0 1
O 0.00000000 -0.51877334 -1.82845679
C 0.00000000 0.68616220 -1.73709412
H 0.00000000 1.33077474 -2.63186355
H 0.00000000 1.18902807 -0.75645498
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12')] = qcdb.Molecule("""
0 1
C 0.00000000 0.60356400 -2.18173438
H 0.00000000 1.66847581 -2.18429610
C 0.00000000 -0.60356400 -2.18173438
H 0.00000000 -1.66847581 -2.18429610
--
0 1
C -0.00000000 0.00000000 1.57829513
H -0.00000000 0.00000000 0.51136193
C -0.00000000 0.00000000 2.78576543
H -0.00000000 0.00000000 3.85017859
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.59662248 1.58722206
C 0.00000000 0.68258238 1.20494642
H 0.92312147 1.22423658 1.04062463
H -0.92312147 1.22423658 1.04062463
H -0.92388993 -1.13738548 1.75121281
H 0.92388993 -1.13738548 1.75121281
--
0 1
N 0.00000000 -0.00401379 -2.31096701
H -0.81122549 -0.45983060 -2.71043881
H 0.00000000 -0.22249432 -1.32128161
H 0.81122549 -0.45983060 -2.71043881
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14')] = qcdb.Molecule("""
0 1
H 0.92444510 -1.23172221 -1.90619313
H -0.92444510 -1.23172221 -1.90619313
H -0.92444510 1.23172221 -1.90619313
H 0.92444510 1.23172221 -1.90619313
C 0.00000000 0.66728778 -1.90556520
C 0.00000000 -0.66728778 -1.90556520
--
0 1
H -0.00000000 1.23344948 2.82931792
H 0.00000000 1.22547148 0.97776199
H -0.00000000 -1.22547148 0.97776199
H -0.00000000 -1.23344948 2.82931792
C -0.00000000 -0.66711698 1.90601042
C -0.00000000 0.66711698 1.90601042
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15')] = qcdb.Molecule("""
0 1
C 0.00000000 0.64634385 -1.60849815
C 0.00000000 -0.67914355 -1.45381675
H -0.92399961 -1.24016223 -1.38784883
H 0.92399961 -1.24016223 -1.38784883
H 0.92403607 1.20737602 -1.67357285
H -0.92403607 1.20737602 -1.67357285
--
0 1
H 0.00000000 0.08295411 1.59016711
C 0.00000000 0.02871509 2.67711785
H 0.88825459 0.52261990 3.06664029
H -0.88825459 0.52261990 3.06664029
H 0.00000000 -1.01394800 2.98955227
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16')] = qcdb.Molecule("""
0 1
C 0.00346000 0.00000000 1.38045208
H 0.84849635 0.00000000 0.68958651
H 0.39513333 0.00000000 2.39584935
H -0.60268447 -0.88994299 1.22482674
H -0.60268447 0.88994299 1.22482674
--
0 1
B -0.00555317 0.00000000 -1.59887976
H 0.58455128 -1.03051800 -1.67949525
H 0.58455128 1.03051800 -1.67949525
H -1.18903148 0.00000000 -1.47677217
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.06374421 2.42054090
H 0.00000000 1.02169396 2.34238038
H 0.88828307 -0.46131911 1.93307194
H -0.88828307 -0.46131911 1.93307194
H 0.00000000 -0.35363606 3.46945195
--
0 1
C 0.00000000 0.78133572 -1.13543912
H 0.00000000 1.37465349 -2.05114442
H -0.88043002 1.06310554 -0.55580918
C 0.00000000 -0.71332890 -1.44723686
H 0.88043002 1.06310554 -0.55580918
H 0.00000000 -1.30641812 -0.53140693
H -0.88100343 -0.99533072 -2.02587154
H 0.88100343 -0.99533072 -2.02587154
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 -2.85810471
H 0.39304720 -0.94712229 -2.49369739
H 0.62370837 0.81395000 -2.49369739
H -1.01675556 0.13317229 -2.49369739
H 0.00000000 -0.00000000 -3.94634214
--
0 1
C 0.00000000 -0.00000000 0.76143405
C -0.00000000 -0.00000000 2.28821715
H -0.61711193 -0.80824397 0.36571527
H -0.39140385 0.93855659 0.36571527
H 1.00851577 -0.13031262 0.36571527
H -1.00891703 0.13031295 2.68258296
H 0.39160418 -0.93890425 2.68258296
H 0.61731284 0.80859130 2.68258296
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 1.81901457
H 0.51274115 0.88809373 1.45476743
H 0.51274115 -0.88809373 1.45476743
H -1.02548230 0.00000000 1.45476743
H 0.00000000 -0.00000000 2.90722072
--
0 1
C 0.00000000 -0.00000000 -1.81901457
H -0.00000000 0.00000000 -2.90722072
H -0.51274115 0.88809373 -1.45476743
H -0.51274115 -0.88809373 -1.45476743
H 1.02548230 -0.00000000 -1.45476743
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 -2.62458428
H 0.51286762 0.88831278 -2.26110195
H 0.51286762 -0.88831278 -2.26110195
H -0.00000000 0.00000000 -3.71273928
H -1.02573525 0.00000000 -2.26110195
--
0 1
AR -0.00000000 0.00000000 1.05395172
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21')] = qcdb.Molecule("""
0 1
C 0.00000000 0.66718073 -2.29024825
C 0.00000000 -0.66718073 -2.29024825
H -0.92400768 1.23202333 -2.28975239
H 0.92400768 1.23202333 -2.28975239
H -0.92400768 -1.23202333 -2.28975239
H 0.92400768 -1.23202333 -2.28975239
--
0 1
AR -0.00000000 0.00000000 1.60829261
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22')] = qcdb.Molecule("""
0 1
H -0.92396100 1.23195600 -1.68478123
H 0.92396100 1.23195600 -1.68478123
H 0.92396100 -1.23195600 -1.68478123
H -0.92396100 -1.23195600 -1.68478123
C 0.00000000 0.66717600 -1.68478123
C 0.00000000 -0.66717600 -1.68478123
--
0 1
H -0.00000000 -1.66786500 1.81521877
H -0.00000000 1.66786500 1.81521877
C -0.00000000 -0.60339700 1.81521877
C -0.00000000 0.60339700 1.81521877
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '23')] = qcdb.Molecule("""
0 1
H -0.92396100 1.23195600 -1.75000000
H 0.92396100 1.23195600 -1.75000000
H 0.92396100 -1.23195600 -1.75000000
H -0.92396100 -1.23195600 -1.75000000
C 0.00000000 0.66717600 -1.75000000
C -0.00000000 -0.66717600 -1.75000000
--
0 1
H -0.92396100 1.23195600 1.75000000
H 0.92396100 1.23195600 1.75000000
H 0.92396100 -1.23195600 1.75000000
H -0.92396100 -1.23195600 1.75000000
C 0.00000000 0.66717600 1.75000000
C -0.00000000 -0.66717600 1.75000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '24')] = qcdb.Molecule("""
0 1
H -0.00000000 -1.66786500 -1.75000000
H 0.00000000 1.66786500 -1.75000000
C -0.00000000 -0.60339700 -1.75000000
C 0.00000000 0.60339700 -1.75000000
--
0 1
H -0.00000000 -1.66786500 1.75000000
H 0.00000000 1.66786500 1.75000000
C -0.00000000 -0.60339700 1.75000000
C 0.00000000 0.60339700 1.75000000
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
| lgpl-3.0 | 5,668,789,873,789,682,000 | 44.550459 | 140 | 0.524529 | false |
codeforamerica/heroku-buildpack-pygeo | vendor/pip-1.3.1/pip/baseparser.py | 63 | 12283 | """Base option parser setup"""
import sys
import optparse
import pkg_resources
import os
import textwrap
from distutils.util import strtobool
from pip.backwardcompat import ConfigParser, string_types, ssl
from pip.locations import default_config_file, default_log_file
from pip.util import get_terminal_size, get_prog
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args, **kwargs):
# help position must be aligned with __init__.parseopts.description
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = get_terminal_size()[0] - 2
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
def format_heading(self, heading):
if heading == 'Options':
return ''
return heading + ':\n'
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
def format_description(self, description):
# leave full control over description to us
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
#some doc strings have inital newlines, some don't
description = description.lstrip('\n')
#some doc strings have final newlines and spaces, some don't
description = description.rstrip()
#dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = '%s:\n%s\n' % (label, description)
return description
else:
return ''
def format_epilog(self, epilog):
# leave full control over epilog to us
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [indent + line for line in text.split('\n')]
return "\n".join(new_lines)
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class CustomOptionParser(optparse.OptionParser):
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
class ConfigOptionParser(CustomOptionParser):
"""Custom option parser which updates its defaults by by checking the
configuration files and environmental variables"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.name = kwargs.pop('name')
self.files = self.get_config_files()
self.config.read(self.files)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(self.normalize_keys(self.get_config_section(section)))
# 2. environmental variables
config.update(self.normalize_keys(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occurred during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
def get_config_section(self, name):
"""Get a section of a configuration"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='PIP_'):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s\n" % msg)
try:
pip_dist = pkg_resources.get_distribution('pip')
version = '%s from %s (python %s)' % (
pip_dist, pip_dist.location, sys.version[:3])
except pkg_resources.DistributionNotFound:
# when running pip.py without installing
version = None
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
genopt = optparse.OptionGroup(parser, 'General Options')
parser.disable_interspersed_args()
# having a default version action just causes trouble
parser.version = version
for opt in standard_options:
genopt.add_option(opt)
parser.add_option_group(genopt)
return parser
standard_options = [
optparse.make_option(
'-h', '--help',
dest='help',
action='help',
help='Show help.'),
optparse.make_option(
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=optparse.SUPPRESS_HELP),
optparse.make_option(
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'),
optparse.make_option(
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.'),
optparse.make_option(
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.'),
optparse.make_option(
'--log',
dest='log',
metavar='file',
help='Log file where a complete (maximum verbosity) record will be kept.'),
optparse.make_option(
# Writes the log levels explicitely to the log'
'--log-explicit-levels',
dest='log_explicit_levels',
action='store_true',
default=False,
help=optparse.SUPPRESS_HELP),
optparse.make_option(
# The default log file
'--local-log', '--log-file',
dest='log_file',
metavar='file',
default=default_log_file,
help=optparse.SUPPRESS_HELP),
optparse.make_option(
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=optparse.SUPPRESS_HELP),
optparse.make_option(
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port."),
optparse.make_option(
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).'),
optparse.make_option(
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=optparse.SUPPRESS_HELP),
optparse.make_option(
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=optparse.SUPPRESS_HELP),
optparse.make_option(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup."),
optparse.make_option(
'--cert',
dest='cert',
type='str',
default='',
metavar='path',
help = "Path to alternate CA bundle."),
]
if not ssl:
standard_options.append(optparse.make_option(
'--insecure',
dest='insecure',
action='store_true',
default=False,
help = "Allow lack of certificate checking when ssl is not installed."))
| mit | 4,611,692,514,892,938,000 | 31.667553 | 85 | 0.583001 | false |
akhilari7/pa-dude | lib/python2.7/site-packages/pyrfc3339/generator.py | 3 | 2170 | import pytz
from pyrfc3339.utils import timezone, timedelta_seconds
def generate(dt, utc=True, accept_naive=False, microseconds=False):
'''
Generate an :RFC:`3339`-formatted timestamp from a
:class:`datetime.datetime`.
>>> from datetime import datetime
>>> generate(datetime(2009,1,1,12,59,59,0,pytz.utc))
'2009-01-01T12:59:59Z'
The timestamp will use UTC unless `utc=False` is specified, in which case
it will use the timezone from the :class:`datetime.datetime`'s
:attr:`tzinfo` parameter.
>>> eastern = pytz.timezone('US/Eastern')
>>> dt = eastern.localize(datetime(2009,1,1,12,59,59))
>>> generate(dt)
'2009-01-01T17:59:59Z'
>>> generate(dt, utc=False)
'2009-01-01T12:59:59-05:00'
Unless `accept_naive=True` is specified, the `datetime` must not be naive.
>>> generate(datetime(2009,1,1,12,59,59,0))
Traceback (most recent call last):
...
ValueError: naive datetime and accept_naive is False
>>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True)
'2009-01-01T12:59:59Z'
If `accept_naive=True` is specified, the `datetime` is assumed to be UTC.
Attempting to generate a local timestamp from a naive datetime will result
in an error.
>>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True, utc=False)
Traceback (most recent call last):
...
ValueError: cannot generate a local timestamp from a naive datetime
'''
if dt.tzinfo is None:
if accept_naive is True:
if utc is True:
dt = dt.replace(tzinfo=pytz.utc)
else:
raise ValueError("cannot generate a local timestamp from " +
"a naive datetime")
else:
raise ValueError("naive datetime and accept_naive is False")
if utc is True:
dt = dt.astimezone(pytz.utc)
timestamp = dt.strftime('%Y-%m-%dT%H:%M:%S')
if microseconds is True:
timestamp += dt.strftime('.%f')
if dt.tzinfo is pytz.utc:
timestamp += 'Z'
else:
timestamp += timezone(timedelta_seconds(dt.tzinfo.utcoffset(dt)))
return timestamp
| mit | -9,141,408,771,895,293,000 | 31.38806 | 78 | 0.631797 | false |
sbalde/edx-platform | cms/djangoapps/contentstore/features/signup.py | 111 | 2821 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from nose.tools import assert_true, assert_false # pylint: disable=no-name-in-module
@step('I fill in the registration form$')
def i_fill_in_the_registration_form(step):
def fill_in_reg_form():
register_form = world.css_find('form#register_form')
register_form.find_by_name('email').fill('[email protected]')
register_form.find_by_name('password').fill('test')
register_form.find_by_name('username').fill('robot-studio')
register_form.find_by_name('name').fill('Robot Studio')
register_form.find_by_name('terms_of_service').click()
world.retry_on_exception(fill_in_reg_form)
@step('I press the Create My Account button on the registration form$')
def i_press_the_button_on_the_registration_form(step):
submit_css = 'form#register_form button#submit'
world.css_click(submit_css)
@step('I should see an email verification prompt')
def i_should_see_an_email_verification_prompt(step):
world.css_has_text('h1.page-header', u'Studio Home')
world.css_has_text('div.msg h3.title', u'We need to verify your email address')
@step(u'I fill in and submit the signin form$')
def i_fill_in_the_signin_form(step):
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill('[email protected]')
login_form.find_by_name('password').fill('test')
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
@step(u'I should( not)? see a login error message$')
def i_should_see_a_login_error(step, should_not_see):
if should_not_see:
# the login error may be absent or invisible. Check absence first,
# because css_visible will throw an exception if the element is not present
if world.is_css_present('div#login_error'):
assert_false(world.css_visible('div#login_error'))
else:
assert_true(world.css_visible('div#login_error'))
@step(u'I fill in and submit the signin form incorrectly$')
def i_goof_in_the_signin_form(step):
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill('[email protected]')
login_form.find_by_name('password').fill('oops')
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
@step(u'I edit the password field$')
def i_edit_the_password_field(step):
password_css = 'form#login_form input#password'
world.css_fill(password_css, 'test')
@step(u'I submit the signin form$')
def i_submit_the_signin_form(step):
submit_css = 'form#login_form button#submit'
world.css_click(submit_css)
| agpl-3.0 | 4,010,837,311,263,612,000 | 38.180556 | 85 | 0.685927 | false |
dmsimard/ansible | test/lib/ansible_test/_internal/test.py | 7 | 15062 | """Classes for storing and processing test results."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import re
from . import types as t
from .util import (
display,
get_ansible_version,
)
from .util_common import (
write_text_test_results,
write_json_test_results,
ResultType,
)
from .config import (
TestConfig,
)
def calculate_best_confidence(choices, metadata):
"""
:type choices: tuple[tuple[str, int]]
:type metadata: Metadata
:rtype: int
"""
best_confidence = 0
for path, line in choices:
confidence = calculate_confidence(path, line, metadata)
best_confidence = max(confidence, best_confidence)
return best_confidence
def calculate_confidence(path, line, metadata):
"""
:type path: str
:type line: int
:type metadata: Metadata
:rtype: int
"""
ranges = metadata.changes.get(path)
# no changes were made to the file
if not ranges:
return 0
# changes were made to the same file and line
if any(r[0] <= line <= r[1] in r for r in ranges):
return 100
# changes were made to the same file and the line number is unknown
if line == 0:
return 75
# changes were made to the same file and the line number is different
return 50
class TestResult:
"""Base class for test results."""
def __init__(self, command, test, python_version=None):
"""
:type command: str
:type test: str
:type python_version: str
"""
self.command = command
self.test = test
self.python_version = python_version
self.name = self.test or self.command
if self.python_version:
self.name += '-python-%s' % self.python_version
try:
import junit_xml
except ImportError:
junit_xml = None
self.junit = junit_xml
def write(self, args):
"""
:type args: TestConfig
"""
self.write_console()
self.write_bot(args)
if args.lint:
self.write_lint()
if args.junit:
if self.junit:
self.write_junit(args)
else:
display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True)
def write_console(self):
"""Write results to console."""
def write_lint(self):
"""Write lint results to stdout."""
def write_bot(self, args):
"""
:type args: TestConfig
"""
def write_junit(self, args):
"""
:type args: TestConfig
"""
def create_result_name(self, extension):
"""
:type extension: str
:rtype: str
"""
name = 'ansible-test-%s' % self.command
if self.test:
name += '-%s' % self.test
if self.python_version:
name += '-python-%s' % self.python_version
name += extension
return name
def save_junit(self, args, test_case, properties=None):
"""
:type args: TestConfig
:type test_case: junit_xml.TestCase
:type properties: dict[str, str] | None
:rtype: str | None
"""
test_suites = [
self.junit.TestSuite(
name='ansible-test',
test_cases=[test_case],
timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(),
properties=properties,
),
]
# the junit_xml API is changing in version 2.0.0
# TestSuite.to_xml_string is being replaced with to_xml_report_string
# see: https://github.com/kyrus/python-junit-xml/blob/63db26da353790500642fd02cae1543eb41aab8b/junit_xml/__init__.py#L249-L261
try:
to_xml_string = self.junit.to_xml_report_string
except AttributeError:
# noinspection PyDeprecation
to_xml_string = self.junit.TestSuite.to_xml_string
report = to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8')
if args.explain:
return
write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
class TestTimeout(TestResult):
"""Test timeout."""
def __init__(self, timeout_duration):
"""
:type timeout_duration: int
"""
super(TestTimeout, self).__init__(command='timeout', test='')
self.timeout_duration = timeout_duration
def write(self, args):
"""
:type args: TestConfig
"""
message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration
# Include a leading newline to improve readability on Shippable "Tests" tab.
# Without this, the first line becomes indented.
output = '''
One or more of the following situations may be responsible:
- Code changes have resulted in tests that hang or run for an excessive amount of time.
- Tests have been added which exceed the time limit when combined with existing tests.
- Test infrastructure and/or external dependencies are operating slower than normal.'''
if args.coverage:
output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.'
output += '\n\nConsult the console log for additional details on where the timeout occurred.'
timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
# hack to avoid requiring junit-xml, which may not be pre-installed outside our test containers
xml = '''
<?xml version="1.0" encoding="utf-8"?>
<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0">
\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None">
\t\t<testcase classname="timeout" name="timeout">
\t\t\t<error message="%s" type="error">%s</error>
\t\t</testcase>
\t</testsuite>
</testsuites>
''' % (timestamp, message, output)
write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), xml.lstrip())
class TestSuccess(TestResult):
"""Test success."""
def write_junit(self, args):
"""
:type args: TestConfig
"""
test_case = self.junit.TestCase(classname=self.command, name=self.name)
self.save_junit(args, test_case)
class TestSkipped(TestResult):
"""Test skipped."""
def write_console(self):
"""Write results to console."""
display.info('No tests applicable.', verbosity=1)
def write_junit(self, args):
"""
:type args: TestConfig
"""
test_case = self.junit.TestCase(classname=self.command, name=self.name)
test_case.add_skipped_info('No tests applicable.')
self.save_junit(args, test_case)
class TestFailure(TestResult):
"""Test failure."""
def __init__(self, command, test, python_version=None, messages=None, summary=None):
"""
:type command: str
:type test: str
:type python_version: str | None
:type messages: list[TestMessage] | None
:type summary: unicode | None
"""
super(TestFailure, self).__init__(command, test, python_version)
if messages:
messages = sorted(messages)
else:
messages = []
self.messages = messages
self.summary = summary
def write(self, args):
"""
:type args: TestConfig
"""
if args.metadata.changes:
self.populate_confidence(args.metadata)
super(TestFailure, self).write(args)
def write_console(self):
"""Write results to console."""
if self.summary:
display.error(self.summary)
else:
if self.python_version:
specifier = ' on python %s' % self.python_version
else:
specifier = ''
display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier))
for message in self.messages:
display.error(message.format(show_confidence=True))
doc_url = self.find_docs()
if doc_url:
display.info('See documentation for help: %s' % doc_url)
def write_lint(self):
"""Write lint results to stdout."""
if self.summary:
command = self.format_command()
message = 'The test `%s` failed. See stderr output for details.' % command
path = ''
message = TestMessage(message, path)
print(message)
else:
for message in self.messages:
print(message)
def write_junit(self, args):
"""
:type args: TestConfig
"""
title = self.format_title()
output = self.format_block()
test_case = self.junit.TestCase(classname=self.command, name=self.name)
# Include a leading newline to improve readability on Shippable "Tests" tab.
# Without this, the first line becomes indented.
test_case.add_failure_info(message=title, output='\n%s' % output)
self.save_junit(args, test_case)
def write_bot(self, args):
"""
:type args: TestConfig
"""
docs = self.find_docs()
message = self.format_title(help_link=docs)
output = self.format_block()
if self.messages:
verified = all((m.confidence or 0) >= 50 for m in self.messages)
else:
verified = False
bot_data = dict(
verified=verified,
docs=docs,
results=[
dict(
message=message,
output=output,
),
],
)
if args.explain:
return
write_json_test_results(ResultType.BOT, self.create_result_name('.json'), bot_data)
def populate_confidence(self, metadata):
"""
:type metadata: Metadata
"""
for message in self.messages:
if message.confidence is None:
message.confidence = calculate_confidence(message.path, message.line, metadata)
def format_command(self):
"""
:rtype: str
"""
command = 'ansible-test %s' % self.command
if self.test:
command += ' --test %s' % self.test
if self.python_version:
command += ' --python %s' % self.python_version
return command
def find_docs(self):
"""
:rtype: str
"""
if self.command != 'sanity':
return None # only sanity tests have docs links
# Use the major.minor version for the URL only if this a release that
# matches the pattern 2.4.0, otherwise, use 'devel'
ansible_version = get_ansible_version()
url_version = 'devel'
if re.search(r'^[0-9.]+$', ansible_version):
url_version = '.'.join(ansible_version.split('.')[:2])
testing_docs_url = 'https://docs.ansible.com/ansible/%s/dev_guide/testing' % url_version
url = '%s/%s/' % (testing_docs_url, self.command)
if self.test:
url += '%s.html' % self.test
return url
def format_title(self, help_link=None):
"""
:type help_link: str | None
:rtype: str
"""
command = self.format_command()
if self.summary:
reason = 'the error'
else:
reason = '1 error' if len(self.messages) == 1 else '%d errors' % len(self.messages)
if help_link:
help_link_markup = ' [[explain](%s)]' % help_link
else:
help_link_markup = ''
title = 'The test `%s`%s failed with %s:' % (command, help_link_markup, reason)
return title
def format_block(self):
"""
:rtype: str
"""
if self.summary:
block = self.summary
else:
block = '\n'.join(m.format() for m in self.messages)
message = block.strip()
# Hack to remove ANSI color reset code from SubprocessError messages.
message = message.replace(display.clear, '')
return message
class TestMessage:
"""Single test message for one file."""
def __init__(self, message, path, line=0, column=0, level='error', code=None, confidence=None):
"""
:type message: str
:type path: str
:type line: int
:type column: int
:type level: str
:type code: str | None
:type confidence: int | None
"""
self.__path = path
self.__line = line
self.__column = column
self.__level = level
self.__code = code
self.__message = message
self.confidence = confidence
@property
def path(self): # type: () -> str
"""Return the path."""
return self.__path
@property
def line(self): # type: () -> int
"""Return the line number, or 0 if none is available."""
return self.__line
@property
def column(self): # type: () -> int
"""Return the column number, or 0 if none is available."""
return self.__column
@property
def level(self): # type: () -> str
"""Return the level."""
return self.__level
@property
def code(self): # type: () -> t.Optional[str]
"""Return the code, if any."""
return self.__code
@property
def message(self): # type: () -> str
"""Return the message."""
return self.__message
@property
def tuple(self): # type: () -> t.Tuple[str, int, int, str, t.Optional[str], str]
"""Return a tuple with all the immutable values of this test message."""
return self.__path, self.__line, self.__column, self.__level, self.__code, self.__message
def __lt__(self, other):
return self.tuple < other.tuple
def __le__(self, other):
return self.tuple <= other.tuple
def __eq__(self, other):
return self.tuple == other.tuple
def __ne__(self, other):
return self.tuple != other.tuple
def __gt__(self, other):
return self.tuple > other.tuple
def __ge__(self, other):
return self.tuple >= other.tuple
def __hash__(self):
return hash(self.tuple)
def __str__(self):
return self.format()
def format(self, show_confidence=False):
"""
:type show_confidence: bool
:rtype: str
"""
if self.__code:
msg = '%s: %s' % (self.__code, self.__message)
else:
msg = self.__message
if show_confidence and self.confidence is not None:
msg += ' (%d%%)' % self.confidence
return '%s:%s:%s: %s' % (self.__path, self.__line, self.__column, msg)
| gpl-3.0 | -3,059,636,963,325,375,000 | 27.689524 | 150 | 0.566857 | false |
canwe/NewsBlur | apps/rss_feeds/migrations/0044_favicon_color.py | 18 | 7246 | # encoding: utf-8
import sys
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from apps.rss_feeds.models import Feed
try:
from apps.rss_feeds.models import FeedIcon
except ImportError:
pass
class Migration(DataMigration):
def forwards(self, orm):
feeds = Feed.objects.all().order_by('-average_stories_per_month')
feed_count = feeds.count()
i = 0
for feed in feeds:
i += 1
if i % 1000 == 0:
print "%s/%s" % (i, feed_count,)
sys.stdout.flush()
if not feed.favicon_color:
feed_icon = MFeedIcon.objects(feed_id=feed.pk)
if feed_icon:
try:
feed.favicon_color = feed_icon[0].color
feed.favicon_not_found = feed_icon[0].not_found
feed.save()
except Exception, e:
print '\n\n!!! %s\n\n' % e
continue
def backwards(self, orm):
"Write your backwards methods here."
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'favicon_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'favicon_not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "'[Untitled]'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_classifier_counts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedicon': {
'Meta': {'object_name': 'FeedIcon'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'icon'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['rss_feeds.Feed']"}),
'icon_url': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['rss_feeds']
| mit | -6,049,237,689,037,459,000 | 64.279279 | 159 | 0.54016 | false |
MathieuDuponchelle/my_patched_photologue | photologue/tests/test_gallery.py | 3 | 2220 | from .. import models
from .helpers import PhotologueBaseTest
from .factories import GalleryFactory, PhotoFactory
class GalleryTest(PhotologueBaseTest):
def setUp(self):
"""Create a test gallery with 2 photos."""
super(GalleryTest, self).setUp()
self.test_gallery = GalleryFactory()
self.pl2 = PhotoFactory()
self.test_gallery.photos.add(self.pl)
self.test_gallery.photos.add(self.pl2)
def tearDown(self):
super(GalleryTest, self).tearDown()
self.pl2.delete()
def test_public(self):
"""Method 'public' should only return photos flagged as public."""
self.assertEqual(self.test_gallery.public().count(), 2)
self.pl.is_public = False
self.pl.save()
self.assertEqual(self.test_gallery.public().count(), 1)
def test_photo_count(self):
"""Method 'photo_count' should return the count of the photos in this
gallery."""
self.assertEqual(self.test_gallery.photo_count(), 2)
self.pl.is_public = False
self.pl.save()
self.assertEqual(self.test_gallery.photo_count(), 1)
# Method takes an optional 'public' kwarg.
self.assertEqual(self.test_gallery.photo_count(public=False), 2)
def test_sample(self):
"""Method 'sample' should return a random queryset of photos from the
gallery."""
# By default we return all photos from the gallery (but ordered at random).
_current_sample_size = models.SAMPLE_SIZE
models.SAMPLE_SIZE = 5
self.assertEqual(len(self.test_gallery.sample()), 2)
# We can state how many photos we want.
self.assertEqual(len(self.test_gallery.sample(count=1)), 1)
# If only one photo is public then the sample cannot have more than one
# photo.
self.pl.is_public = False
self.pl.save()
self.assertEqual(len(self.test_gallery.sample(count=2)), 1)
self.pl.is_public = True
self.pl.save()
# We can limit the number of photos by changing settings.
models.SAMPLE_SIZE = 1
self.assertEqual(len(self.test_gallery.sample()), 1)
models.SAMPLE_SIZE = _current_sample_size
| bsd-3-clause | -6,755,612,140,835,178,000 | 34.238095 | 83 | 0.637838 | false |
ansrivas/pylogging | pylogging/formatters.py | 1 | 1932 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Bunch of log formatters to be used."""
import logging
try:
import ujson as json
except Exception as ex:
import json
class TextFormatter(logging.Formatter):
"""Format the meta data in the log message to fix string length."""
datefmt = '%Y-%m-%d %H:%M:%S'
def __init__(self, context=None):
self.context = context
super(TextFormatter, self).__init__()
def format(self, record):
"""Default formatter."""
error_location = "%s.%s" % (record.name, record.funcName)
line_number = "%s" % (record.lineno)
location_line = error_location[:32] + ":" + line_number
s = "%.19s [%-8s] [%-36s] %s" % (self.formatTime(record, TextFormatter.datefmt),
record.levelname, location_line, record.getMessage())
if self.context:
s = "%.19s [%s] [%-8s] [%-36s] %s" % (self.formatTime(record, TextFormatter.datefmt), self.context,
record.levelname, location_line, record.getMessage())
return s
class JsonFormatter(logging.Formatter):
"""Format the meta data in the json log message and fix string length."""
datefmt = '%Y-%m-%d %H:%M:%S'
def format(self, record):
"""Default json formatter."""
error_location = "%s.%s" % (record.name, record.funcName)
line_number = "%s" % (record.lineno)
location_line = error_location[:32] + ":" + line_number
output = {'log_time': self.formatTime(record, TextFormatter.datefmt),
'log_location': location_line,
'log_level': record.levelname,
'message': record.getMessage()}
return json.dumps(output)
class Formatters(object):
"""Define a common class for Formatters."""
TextFormatter = TextFormatter()
JsonFormatter = JsonFormatter()
| mit | -2,276,261,824,209,945,600 | 32.310345 | 111 | 0.577122 | false |
xchenum/quantum | quantum/plugins/cisco/client/cli.py | 6 | 6937 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial structure and framework of this CLI has been borrowed from Quantum,
# written by the following authors
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Salvatore Orlando, Citrix
#
# Cisco adaptation for extensions
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# @author: Ying Liu, Cisco Systems, Inc.
import logging
import logging.handlers
from optparse import OptionParser
import os
import sys
import quantumclient.cli as qcli
from quantumclient import Client
LOG = logging.getLogger('quantum')
FORMAT = 'json'
#ACTION_PREFIX_EXT = '/v1.0'
#ACTION_PREFIX_CSCO = ACTION_PREFIX_EXT + \
# '/extensions/csco/tenants/{tenant_id}'
VERSION = '1.0'
URI_PREFIX_EXT = ''
URI_PREFIX_CSCO = '/extensions/csco/tenants/{tenant_id}'
TENANT_ID = 'nova'
CSCO_EXT_NAME = 'Cisco Nova Tenant'
DEFAULT_QUANTUM_VERSION = '1.1'
def help():
"""Help for CLI"""
print "\nCisco Extension Commands:"
for key in COMMANDS.keys():
print " %s %s" % (
key, " ".join(["<%s>" % y for y in COMMANDS[key]["args"]]))
def build_args(cmd, cmdargs, arglist):
"""Building the list of args for a particular CLI"""
args = []
orig_arglist = arglist[:]
try:
for cmdarg in cmdargs:
args.append(arglist[0])
del arglist[0]
except:
LOG.error("Not enough arguments for \"%s\" (expected: %d, got: %d)" % (
cmd, len(cmdargs), len(orig_arglist)))
print "Usage:\n %s %s" % (
cmd, " ".join(["<%s>" % y for y in COMMANDS[cmd]["args"]]))
sys.exit()
if len(arglist) > 0:
LOG.error("Too many arguments for \"%s\" (expected: %d, got: %d)" % (
cmd, len(cmdargs), len(orig_arglist)))
print "Usage:\n %s %s" % (
cmd, " ".join(["<%s>" % y for y in COMMANDS[cmd]["args"]]))
sys.exit()
return args
def list_extensions(*args):
"""Invoking the action to get the supported extensions"""
request_url = "/extensions"
client = Client(HOST, PORT, USE_SSL, format='json',
version=VERSION, uri_prefix=URI_PREFIX_EXT, tenant="dummy")
data = client.do_request('GET', request_url)
print("Obtained supported extensions from Quantum: %s" % data)
def schedule_host(tenant_id, instance_id, user_id=None):
"""Gets the host name from the Quantum service"""
project_id = tenant_id
instance_data_dict = {
'novatenant': {
'instance_id': instance_id,
'instance_desc': {
'user_id': user_id,
'project_id': project_id,
},
},
}
request_url = "/novatenants/" + project_id + "/schedule_host"
client = Client(HOST, PORT, USE_SSL, format='json', tenant=TENANT_ID,
version=VERSION, uri_prefix=URI_PREFIX_CSCO)
data = client.do_request('PUT', request_url, body=instance_data_dict)
hostname = data["host_list"]["host_1"]
if not hostname:
print("Scheduler was unable to locate a host"
" for this request. Is the appropriate"
" service running?")
print("Quantum service returned host: %s" % hostname)
def create_multiport(tenant_id, net_id_list, *args):
"""Creates ports on a single host"""
net_list = net_id_list.split(",")
ports_info = {'multiport':
{'status': 'ACTIVE',
'net_id_list': net_list,
'ports_desc': {'key': 'value'}}}
request_url = "/multiport"
client = Client(HOST, PORT, USE_SSL, format='json', tenant=tenant_id,
version=VERSION, uri_prefix=URI_PREFIX_CSCO)
data = client.do_request('POST', request_url, body=ports_info)
print("Created ports: %s" % data)
COMMANDS = {
"create_multiport": {
"func": create_multiport,
"args": ["tenant-id",
"net-id-list (comma separated list of netword IDs)"],
},
"list_extensions": {
"func": list_extensions,
"args": [],
},
"schedule_host": {
"func": schedule_host,
"args": ["tenant-id", "instance-id"],
},
}
def main():
import cli
usagestr = "Usage: %prog [OPTIONS] <command> [args]"
PARSER = OptionParser(usage=usagestr)
PARSER.add_option("-H", "--host", dest="host",
type="string", default="127.0.0.1",
help="ip address of api host")
PARSER.add_option("-p", "--port", dest="port",
type="int", default=9696, help="api poort")
PARSER.add_option("-s", "--ssl", dest="ssl",
action="store_true", default=False, help="use ssl")
PARSER.add_option("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="turn on verbose logging")
PARSER.add_option("-f", "--logfile", dest="logfile",
type="string", default="syslog", help="log file path")
PARSER.add_option(
'--version', default=DEFAULT_QUANTUM_VERSION,
help='Accepts 1.1 and 1.0, defaults to env[QUANTUM_VERSION].')
options, args = PARSER.parse_args()
if options.verbose:
LOG.setLevel(logging.DEBUG)
else:
LOG.setLevel(logging.WARN)
if options.logfile == "syslog":
LOG.addHandler(logging.handlers.SysLogHandler(address='/dev/log'))
else:
LOG.addHandler(logging.handlers.WatchedFileHandler(options.logfile))
os.chmod(options.logfile, 0644)
version = options.version
if len(args) < 1:
PARSER.print_help()
qcli.help(version)
help()
sys.exit(1)
CMD = args[0]
if CMD in qcli.commands['1.1'].keys():
qcli.main()
sys.exit(1)
if CMD not in COMMANDS.keys():
LOG.error("Unknown command: %s" % CMD)
qcli.help(version)
help()
sys.exit(1)
args = build_args(CMD, COMMANDS[CMD]["args"], args[1:])
LOG.info("Executing command \"%s\" with args: %s" % (CMD, args))
HOST = options.host
PORT = options.port
USE_SSL = options.ssl
COMMANDS[CMD]["func"](*args)
LOG.info("Command execution completed")
sys.exit(0)
if __name__ == "__main__":
main()
| apache-2.0 | 3,999,638,209,563,791,400 | 31.115741 | 79 | 0.590889 | false |
anomitra/articleScraper | PyQt-gpl-5.4.1/examples/qml/referenceexamples/methods.py | 2 | 4326 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import sys
from PyQt5.QtCore import (pyqtProperty, pyqtSlot, QCoreApplication, QObject,
QUrl)
from PyQt5.QtQml import (qmlRegisterType, QQmlComponent, QQmlEngine,
QQmlListProperty)
QML = b'''
import QtQuick 2.0
import People 1.0
BirthdayParty {
host: Person {
name: "Bob Jones"
shoeSize: 12
}
guests: [
Person { name: "Leo Hodges" },
Person { name: "Jack Smith" },
Person { name: "Anne Brown" }
]
Component.onCompleted: invite("William Green")
}
'''
class Person(QObject):
def __init__(self, parent=None):
super(Person, self).__init__(parent)
self._name = ''
self._shoeSize = 0
@pyqtProperty(str)
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@pyqtProperty(int)
def shoeSize(self):
return self._shoeSize
@shoeSize.setter
def shoeSize(self, shoeSize):
self._shoeSize = shoeSize
class BirthdayParty(QObject):
def __init__(self, parent=None):
super(BirthdayParty, self).__init__(parent)
self._host = None
self._guests = []
@pyqtProperty(Person)
def host(self):
return self._host
@host.setter
def host(self, host):
self._host = host
@pyqtProperty(QQmlListProperty)
def guests(self):
return QQmlListProperty(Person, self, self._guests)
def guestCount(self):
return len(self._guests)
def guest(self, idx):
return self._guests[idx]
@pyqtSlot(str)
def invite(self, name):
person = Person(self)
person.name = name
self._guests.append(person)
app = QCoreApplication(sys.argv)
qmlRegisterType(BirthdayParty, "People", 1, 0, "BirthdayParty")
qmlRegisterType(Person, "People", 1, 0, "Person")
engine = QQmlEngine()
component = QQmlComponent(engine)
component.setData(QML, QUrl())
party = component.create()
if party is not None and party.host is not None:
print("\"%s\" is having a birthday!" % party.host.name)
print("They are inviting:")
for ii in range(party.guestCount()):
print(" \"%s\"" % party.guest(ii).name)
else:
for e in component.errors():
print("Error:", e.toString());
| gpl-2.0 | -296,076,130,369,872,450 | 27.84 | 77 | 0.648636 | false |
liucode/tempest-master | tempest/api/compute/volumes/test_volume_snapshots.py | 5 | 3040 | # Copyright 2015 Fujitsu(fnst) Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesSnapshotsTestJSON(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(VolumesSnapshotsTestJSON, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(VolumesSnapshotsTestJSON, cls).setup_clients()
cls.volumes_client = cls.volumes_extensions_client
cls.snapshots_client = cls.snapshots_extensions_client
@test.idempotent_id('cd4ec87d-7825-450d-8040-6e2068f2da8f')
def test_volume_snapshot_create_get_list_delete(self):
v_name = data_utils.rand_name('Volume')
volume = self.volumes_client.create_volume(
size=CONF.volume.volume_size,
display_name=v_name)['volume']
self.addCleanup(self.delete_volume, volume['id'])
waiters.wait_for_volume_status(self.volumes_client, volume['id'],
'available')
s_name = data_utils.rand_name('Snapshot')
# Create snapshot
snapshot = self.snapshots_client.create_snapshot(
volume['id'],
display_name=s_name)['snapshot']
def delete_snapshot(snapshot_id):
waiters.wait_for_snapshot_status(self.snapshots_client,
snapshot_id,
'available')
# Delete snapshot
self.snapshots_client.delete_snapshot(snapshot_id)
self.snapshots_client.wait_for_resource_deletion(snapshot_id)
self.addCleanup(delete_snapshot, snapshot['id'])
self.assertEqual(volume['id'], snapshot['volumeId'])
# Get snapshot
fetched_snapshot = self.snapshots_client.show_snapshot(
snapshot['id'])['snapshot']
self.assertEqual(s_name, fetched_snapshot['displayName'])
self.assertEqual(volume['id'], fetched_snapshot['volumeId'])
# Fetch all snapshots
snapshots = self.snapshots_client.list_snapshots()['snapshots']
self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))
| apache-2.0 | -4,811,655,127,097,651,000 | 40.643836 | 79 | 0.651316 | false |
PHSCRC/phsled | nfc/clf/rcs380.py | 4 | 38231 | # -*- coding: latin-1 -*-
# -----------------------------------------------------------------------------
# Copyright 2012-2015 Stephen Tiedemann <[email protected]>
#
# Licensed under the EUPL, Version 1.1 or - as soon they
# will be approved by the European Commission - subsequent
# versions of the EUPL (the "Licence");
# You may not use this work except in compliance with the
# Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
# -----------------------------------------------------------------------------
"""Driver module for contactless devices based on the Sony NFC Port-100
chipset. The only product known to use this chipset is the PaSoRi
RC-S380. The RC-S380 connects to the host as a native USB device.
The RC-S380 has been the first NFC Forum certified device. It supports
reading and writing of all NFC Forum tags as well as peer-to-peer
mode. In addition, the NFC Port-100 also supports card emulation Type
A and Type F Technology. A notable restriction is that peer-to-peer
active communication mode (not required for NFC Forum certification)
is not supported.
========== ======= ============
function support remarks
========== ======= ============
sense_tta yes
sense_ttb yes
sense_ttf yes
sense_dep no
listen_tta yes Type F responses can not be disabled
listen_ttb no
listen_ttf yes
listen_dep yes Only passive communication mode
========== ======= ============
"""
import logging
log = logging.getLogger(__name__)
import os
import time
import errno
import struct
import operator
from binascii import hexlify
import nfc.clf
from . import device
class Frame():
def __init__(self, data):
self._data = None
self._frame = None
if data[0:3] == bytearray("\x00\x00\xff"):
frame = bytearray(data)
if frame == bytearray("\x00\x00\xff\x00\xff\x00"):
self._type = "ack"
elif frame == bytearray("\x00\x00\xFF\xFF\xFF"):
self._type = "err"
elif frame[3:5] == bytearray("\xff\xff"):
self._type = "data"
if self.type == "data":
length = struct.unpack("<H", str(frame[5:7]))[0]
self._data = frame[8:8+length]
else:
frame = bytearray([0, 0, 255, 255, 255])
frame += bytearray(struct.pack("<H", len(data)))
frame += bytearray(struct.pack("B", (256 - sum(frame[5:7])) % 256))
frame += bytearray(data)
frame += bytearray([(256 - sum(frame[8:])) % 256, 0])
self._frame = frame
def __str__(self):
return str(self._frame)
@property
def type(self):
return self._type
@property
def data(self):
return self._data
class CommunicationError:
err2str = {0x00000000: "NO_ERROR",
0x00000001: "PROTOCOL_ERROR",
0x00000002: "PARITY_ERROR",
0x00000004: "CRC_ERROR",
0x00000008: "COLLISION_ERROR",
0x00000010: "OVERFLOW_ERROR",
0x00000040: "TEMPERATURE_ERROR",
0x00000080: "RECEIVE_TIMEOUT_ERROR",
0x00000100: "CRYPTO1_ERROR",
0x00000200: "RFCA_ERROR",
0x00000400: "RF_OFF_ERROR",
0x00000800: "TRANSMIT_TIMEOUT_ERROR",
0x80000000: "RECEIVE_LENGTH_ERROR"
}
str2err = dict([(v, k) for k, v in err2str.iteritems()])
def __init__(self, status_bytes):
self.errno = struct.unpack('<L', str(status_bytes))[0]
def __eq__(self, strerr):
return self.errno & CommunicationError.str2err[strerr]
def __ne__(self, strerr):
return not self.__eq__(strerr)
def __str__(self):
return self.__class__.__name__ + ' ' + CommunicationError.err2str.get(
self.errno, "{0:08x}".format(self.errno))
class StatusError:
err2str = ("SUCCESS", "PARAMETER_ERROR", "PB_ERROR", "RFCA_ERROR",
"TEMPERATURE_ERROR", "PWD_ERROR", "RECEIVE_ERROR",
"COMMANDTYPE_ERROR")
def __init__(self, status):
self.errno = status
def __str__(self):
try:
return StatusError.err2str[self.errno]
except IndexError:
return "UNKNOWN STATUS ERROR {0:02x}".format(self.errno)
class Chipset(object):
ACK = bytearray.fromhex('0000FF00FF00')
CMD = {
# RF Communication
0x00: "InSetRF",
0x02: "InSetProtocol",
0x04: "InCommRF",
0x06: "SwitchRF",
0x10: "MaintainFlash",
0x12: "ResetDevice",
0x20: "GetFirmwareVersion",
0x22: "GetPDDataVersion",
0x24: "GetProperty",
0x26: "InGetProtocol",
0x28: "GetCommandType",
0x2A: "SetCommandType",
0x30: "InSetRCT",
0x32: "InGetRCT",
0x34: "GetPDData",
0x36: "ReadRegister",
0x40: "TgSetRF",
0x42: "TgSetProtocol",
0x44: "TgSetAuto",
0x46: "TgSetRFOff",
0x48: "TgCommRF",
0x50: "TgGetProtocol",
0x60: "TgSetRCT",
0x62: "TgGetRCT",
0xF0: "Diagnose",
}
def __init__(self, transport, logger):
self.transport = transport
self.log = logger
# write ack to perform a soft reset
# raises IOError(EACCES) if we're second
self.transport.write(Chipset.ACK)
# do some basic initialization and deactivate rf
self.set_command_type(1)
self.get_firmware_version()
self.get_pd_data_version()
self.switch_rf("off")
def close(self):
self.switch_rf('off')
self.transport.write(Chipset.ACK)
self.transport.close()
self.transport = None
def send_command(self, cmd_code, cmd_data, timeout):
cmd_data = bytearray(cmd_data)
log.log(logging.DEBUG-1, self.CMD[cmd_code]+" "+hexlify(cmd_data))
if self.transport is not None:
cmd = bytearray([0xD6, cmd_code]) + cmd_data
self.transport.write(str(Frame(cmd)))
if Frame(self.transport.read(timeout=100)).type == "ack":
rsp = Frame(self.transport.read(timeout)).data
if rsp and rsp[0] == 0xD7 and rsp[1] == cmd_code + 1:
return rsp[2:]
else:
log.debug("transport closed in send_command")
def in_set_rf(self, brty_send, brty_recv=None):
settings = {
"212F": (1, 1, 15, 1), "424F": (1, 2, 15, 2),
"106A": (2, 3, 15, 3), "212A": (4, 4, 15, 4),
"424A": (5, 5, 15, 5), "106B": (3, 7, 15, 7),
"212B": (3, 8, 15, 8), "424B": (3, 9, 15, 9),
}
if brty_recv is None: brty_recv = brty_send
data = settings[brty_send][0:2] + settings[brty_recv][2:4]
data = self.send_command(0x00, data, 100)
if data and data[0] != 0:
raise StatusError(data[0])
in_set_protocol_defaults = bytearray.fromhex(
"0018 0101 0201 0300 0400 0500 0600 0708 0800 0900"
"0A00 0B00 0C00 0E04 0F00 1000 1100 1200 1306")
def in_set_protocol(self, data=None, **kwargs):
data = bytearray() if data is None else bytearray(data)
KEYS = ("initial_guard_time", "add_crc", "check_crc", "multi_card",
"add_parity", "check_parity", "bitwise_anticoll",
"last_byte_bit_count", "mifare_crypto", "add_sof",
"check_sof", "add_eof", "check_eof", "rfu", "deaf_time",
"continuous_receive_mode", "min_len_for_crm",
"type_1_tag_rrdd", "rfca", "guard_time")
for key, value in kwargs.iteritems():
data.extend(bytearray([KEYS.index(key), int(value)]))
data = self.send_command(0x02, data, 100)
if data and data[0] != 0:
raise StatusError(data[0])
def in_comm_rf(self, data, timeout):
to = struct.pack("<H", timeout*10) if timeout <= 6553 else '\xFF\xFF'
data = self.send_command(0x04, to + str(data), timeout+500)
if data and tuple(data[0:4]) != (0, 0, 0, 0):
raise CommunicationError(data[0:4])
return data[5:] if data else None
def switch_rf(self, switch):
switch = ("off", "on").index(switch)
data = self.send_command(0x06, [switch], 100)
if data and data[0] != 0:
raise StatusError(data[0])
def tg_set_rf(self, comm_type):
tg_comm_type = {"106A": (8, 11), "212F": (8, 12), "424F": (8, 13),
"212A": (8, 14), "424A": (8, 15)}
comm_type = tg_comm_type[comm_type]
data = self.send_command(0x40, comm_type, 100)
if data and data[0] != 0:
raise StatusError(data[0])
tg_set_protocol_defaults = bytearray.fromhex("0001 0101 0207")
def tg_set_protocol(self, data=None, **kwargs):
data = bytearray() if data is None else bytearray(data)
KEYS = ("send_timeout_time_unit", "rf_off_error",
"continuous_receive_mode")
for key, value in kwargs.iteritems():
data.extend(bytearray([KEYS.index(key), int(value)]))
data = self.send_command(0x42, bytearray(data), 100)
if data and data[0] != 0:
raise StatusError(data[0])
def tg_set_auto(self, data):
data = self.send_command(0x44, data, 100)
if data and data[0] != 0:
raise StatusError(data[0])
def tg_comm_rf(self, guard_time=0, send_timeout=0xFFFF,
mdaa=False, nfca_params='', nfcf_params='',
mf_halted=False, arae=False, recv_timeout=0,
transmit_data=None):
# Send a response packet and receive the next request. If
# *transmit_data* is None skip sending. If *recv_timeout* is
# zero skip receiving. Data is sent only between *guard_time*
# and *send_timeout*, measured from the end of the last
# received data. If *mdaa* is True, reply to Type A and Type F
# activation commands with *nfca_params* (sens_res, nfcid1-3,
# sel_res) and *nfcf_params* (idm, pmm, system_code).
data = struct.pack("<HH?6s18s??H", guard_time, send_timeout,
mdaa, str(nfca_params), str(nfcf_params),
mf_halted, arae, recv_timeout)
if transmit_data:
data = data + str(transmit_data)
data = self.send_command(0x48, data, timeout=None)
if data and tuple(data[3:7]) != (0, 0, 0, 0):
raise CommunicationError(data[3:7])
return data
def reset_device(self, startup_delay=0):
self.send_command(0x12, struct.pack("<H", startup_delay), 100)
self.transport.write(Chipset.ACK)
time.sleep(float(startup_delay + 500)/1000)
def get_firmware_version(self, option=None):
assert option in (None, 0x60, 0x61, 0x80)
data = self.send_command(0x20, [option] if option else [], 100)
log.debug("firmware version {1:x}.{0:02x}".format(*data))
return data
def get_pd_data_version(self):
data = self.send_command(0x22, [], 100)
log.debug("package data format {1:x}.{0:02x}".format(*data))
def get_command_type(self):
data = self.send_command(0x28, [], 100)
return struct.unpack(">Q", str(data[0:8]))
def set_command_type(self, command_type):
data = self.send_command(0x2A, [command_type], 100)
if data and data[0] != 0:
raise StatusError(data[0])
class Device(device.Device):
# Device driver for the Sony NFC Port-100 chipset.
def __init__(self, chipset, logger):
self.chipset = chipset
self.log = logger
minor, major = self.chipset.get_firmware_version()
self._chipset_name = "NFC Port-100 v{0:x}.{1:02x}".format(major, minor)
def close(self):
self.chipset.close()
self.chipset = None
def mute(self):
self.chipset.switch_rf("off")
def sense_tta(self, target):
"""Sense for a Type A Target is supported for 106, 212 and 424
kbps. However, there may not be any target that understands the
activation commands in other than 106 kbps.
"""
log.debug("polling for NFC-A technology")
if target.brty not in ("106A", "212A", "424A"):
message = "unsupported bitrate {0}".format(target.brty)
raise nfc.clf.UnsupportedTargetError(message)
self.chipset.in_set_rf(target.brty)
self.chipset.in_set_protocol(self.chipset.in_set_protocol_defaults)
self.chipset.in_set_protocol(initial_guard_time=6, add_crc=0,
check_crc=0, check_parity=1,
last_byte_bit_count=7)
sens_req = (target.sens_req if target.sens_req else
bytearray.fromhex("26"))
try:
sens_res = self.chipset.in_comm_rf(sens_req, 30)
if len(sens_res) != 2: return None
except CommunicationError as error:
if error != "RECEIVE_TIMEOUT_ERROR": log.debug(error)
return None
log.debug("rcvd SENS_RES " + hexlify(sens_res))
if sens_res[0] & 0x1F == 0:
log.debug("type 1 tag target found")
self.chipset.in_set_protocol(last_byte_bit_count=8, add_crc=2,
check_crc=2, type_1_tag_rrdd=2)
target = nfc.clf.RemoteTarget(target.brty, sens_res=sens_res)
if sens_res[1] & 0x0F == 0b1100:
rid_cmd = bytearray.fromhex("78 0000 00000000")
log.debug("send RID_CMD " + hexlify(rid_cmd))
try:
target.rid_res = self.chipset.in_comm_rf(rid_cmd, 30)
except CommunicationError as error:
log.debug(error)
return None
return target
# other than type 1 tag
try:
self.chipset.in_set_protocol(last_byte_bit_count=8, add_parity=1)
if target.sel_req:
uid = target.sel_req
if len(uid) > 4: uid = "\x88" + uid
if len(uid) > 8: uid = uid[0:4] + "\x88" + uid[4:]
self.chipset.in_set_protocol(add_crc=1, check_crc=1)
for i, sel_cmd in zip(range(0,len(uid),4),"\x93\x95\x97"):
sel_req = sel_cmd + "\x70" + uid[i:i+4]
sel_req.append(reduce(operator.xor, sel_req[2:6])) # BCC
log.debug("send SEL_REQ " + hexlify(sel_req))
sel_res = self.chipset.in_comm_rf(sel_req, 30)
log.debug("rcvd SEL_RES " + hexlify(sel_res))
uid = target.sel_req
else:
uid = bytearray()
for sel_cmd in "\x93\x95\x97":
self.chipset.in_set_protocol(add_crc=0, check_crc=0)
sdd_req = sel_cmd + "\x20"
log.debug("send SDD_REQ " + hexlify(sdd_req))
sdd_res = self.chipset.in_comm_rf(sdd_req, 30)
log.debug("rcvd SDD_RES " + hexlify(sdd_res))
self.chipset.in_set_protocol(add_crc=1, check_crc=1)
sel_req = sel_cmd + "\x70" + sdd_res
log.debug("send SEL_REQ " + hexlify(sel_req))
sel_res = self.chipset.in_comm_rf(sel_req, 30)
log.debug("rcvd SEL_RES " + hexlify(sel_res))
if sel_res[0] & 0b00000100: uid = uid + sdd_res[1:4]
else: uid = uid + sdd_res[0:4]; break
if sel_res[0] & 0b00000100 == 0:
return nfc.clf.RemoteTarget(target.brty, sens_res=sens_res,
sel_res=sel_res, sdd_res=uid)
except CommunicationError as error:
log.debug(error)
def sense_ttb(self, target):
"""Sense for a Type B Target is supported for 106, 212 and 424
kbps. However, there may not be any target that understands the
activation command in other than 106 kbps.
"""
log.debug("polling for NFC-B technology")
if target.brty not in ("106B", "212B", "424B"):
message = "unsupported bitrate {0}".format(target.brty)
raise nfc.clf.UnsupportedTargetError(message)
self.chipset.in_set_rf(target.brty)
self.chipset.in_set_protocol(self.chipset.in_set_protocol_defaults)
self.chipset.in_set_protocol(initial_guard_time=20, add_sof=1,
check_sof=1, add_eof=1, check_eof=1)
sensb_req = (target.sensb_req if target.sensb_req else
bytearray.fromhex("050010"))
log.debug("send SENSB_REQ " + hexlify(sensb_req))
try:
sensb_res = self.chipset.in_comm_rf(sensb_req, 30)
except CommunicationError as error:
if error != "RECEIVE_TIMEOUT_ERROR": log.debug(error)
return None
if len(sensb_res) >= 12 and sensb_res[0] == 0x50:
log.debug("rcvd SENSB_RES " + hexlify(sensb_res))
return nfc.clf.RemoteTarget(target.brty, sensb_res=sensb_res)
def sense_ttf(self, target):
"""Sense for a Type F Target is supported for 212 and 424 kbps.
"""
log.debug("polling for NFC-F technology")
if target.brty not in ("212F", "424F"):
message = "unsupported bitrate {0}".format(target.brty)
raise nfc.clf.UnsupportedTargetError(message)
self.chipset.in_set_rf(target.brty)
self.chipset.in_set_protocol(self.chipset.in_set_protocol_defaults)
self.chipset.in_set_protocol(initial_guard_time=24)
sensf_req = (target.sensf_req if target.sensf_req else
bytearray.fromhex("00FFFF0100"))
log.debug("send SENSF_REQ " + hexlify(sensf_req))
try:
frame = chr(len(sensf_req)+1) + sensf_req
frame = self.chipset.in_comm_rf(frame, 10)
except CommunicationError as error:
if error != "RECEIVE_TIMEOUT_ERROR": log.debug(error)
return None
if len(frame) >= 18 and frame[0] == len(frame) and frame[1] == 1:
log.debug("rcvd SENSF_RES " + hexlify(frame[1:]))
return nfc.clf.RemoteTarget(target.brty, sensf_res=frame[1:])
def sense_dep(self, target):
"""Sense for an active DEP Target is not supported. The device only
supports passive activation via sense_tta/sense_ttf.
"""
message = "{device} does not support sense for active DEP Target"
raise nfc.clf.UnsupportedTargetError(message.format(device=self))
def listen_tta(self, target, timeout):
"""Listen as Type A Target in 106 kbps.
Restrictions:
* It is not possible to send short frames that are required
for ACK and NAK responses. This means that a Type 2 Tag
emulation can only implement a single sector memory model.
* It can not be avoided that the chipset responds to SENSF_REQ
commands. The driver configures the SENSF_RES response to
all zero and ignores all Type F communication but eventually
it depends on the remote device whether Type A Target
activation will still be attempted.
"""
if not target.brty == '106A':
info = "unsupported target bitrate: %r" % target.brty
raise nfc.clf.UnsupportedTargetError(info)
if target.rid_res:
info = "listening for type 1 tag activation is not supported"
raise nfc.clf.UnsupportedTargetError(info)
try:
assert target.sens_res is not None, "sens_res is required"
assert target.sdd_res is not None, "sdd_res is required"
assert target.sel_res is not None, "sel_res is required"
assert len(target.sens_res) == 2, "sens_res must be 2 byte"
assert len(target.sdd_res) == 4, "sdd_res must be 4 byte"
assert len(target.sel_res) == 1, "sel_res must be 1 byte"
assert target.sdd_res[0] == 0x08, "sdd_res[0] must be 08h"
except AssertionError as error:
raise ValueError(str(error))
nfca_params = target.sens_res + target.sdd_res[1:4] + target.sel_res
log.debug("nfca_params %s", hexlify(nfca_params))
self.chipset.tg_set_rf("106A")
self.chipset.tg_set_protocol(self.chipset.tg_set_protocol_defaults)
self.chipset.tg_set_protocol(rf_off_error=False)
time_to_return = time.time() + timeout
tg_comm_rf_args = {'mdaa': True, 'nfca_params': nfca_params}
tg_comm_rf_args['recv_timeout'] = min(int(1000 * timeout), 0xFFFF)
def listen_tta_tt2():
recv_timeout = tg_comm_rf_args['recv_timeout']
while recv_timeout > 0:
log.debug("wait %d ms for Type 2 Tag activation", recv_timeout)
try:
data = self.chipset.tg_comm_rf(**tg_comm_rf_args)
except CommunicationError as error:
log.debug(error)
else:
brty = ('106A', '212F', '424F')[data[0]-11]
log.debug("%s rcvd %s", brty, hexlify(buffer(data, 7)))
if brty == "106A" and data[2] & 0x03 == 3:
self.chipset.tg_set_protocol(rf_off_error=True)
return nfc.clf.LocalTarget(
"106A", sens_res=nfca_params[0:2],
sdd_res='\x08'+nfca_params[2:5],
sel_res=nfca_params[5:6], tt2_cmd=data[7:])
else:
log.debug("not a 106A Type 2 Tag command")
finally:
recv_timeout = int(1000 * (time_to_return - time.time()))
tg_comm_rf_args['recv_timeout'] = recv_timeout
def listen_tta_tt4():
rats_cmd = rats_res = None
recv_timeout = tg_comm_rf_args['recv_timeout']
while recv_timeout > 0:
log.debug("wait %d ms for 106A TT4 command", recv_timeout)
try:
data = self.chipset.tg_comm_rf(**tg_comm_rf_args)
tg_comm_rf_args['transmit_data'] = None
except CommunicationError as error:
tg_comm_rf_args['transmit_data'] = None
rats_cmd, rats_res = None
log.debug(error)
else:
brty = ('106A', '212F', '424F')[data[0]-11]
log.debug("%s rcvd %s", brty, hexlify(buffer(data, 7)))
if brty=="106A" and data[2]==3 and data[7]==0xE0:
(rats_cmd, rats_res) = (data[7:], target.rats_res)
log.debug("rcvd RATS_CMD %s", hexlify(rats_cmd))
if rats_res is None:
rats_res = bytearray.fromhex("05 78 80 70 02")
log.debug("send RATS_RES %s", hexlify(rats_res))
tg_comm_rf_args['transmit_data'] = rats_res
elif brty=="106A" and data[7]!=0xF0 and rats_cmd:
(did, cmd) = (rats_cmd[1] & 0x0F, data[7:])
ta_tb_tc = rats_res[2:]
ta = ta_tb_tc.pop(0) if rats_res[1]&0x10 else None
tb = ta_tb_tc.pop(0) if rats_res[1]&0x20 else None
tc = ta_tb_tc.pop(0) if rats_res[1]&0x40 else None
did_supported = tc is None or bool(tc & 0x02)
cmd_with_did = bool(cmd[0] & 0x08)
if ((cmd_with_did and did_supported and cmd[1]==did)
or (did==0 and not cmd_with_did)):
if cmd[0] in (0xC2, 0xCA):
log.debug("rcvd S(DESELECT) %s", hexlify(cmd))
tg_comm_rf_args['transmit_data'] = cmd
log.debug("send S(DESELECT) %s", hexlify(cmd))
rats_cmd = rats_res = None
else:
log.debug("rcvd TT4_CMD %s", hexlify(cmd))
self.chipset.tg_set_protocol(rf_off_error=True)
return nfc.clf.LocalTarget(
"106A", sens_res=nfca_params[0:2],
sdd_res='\x08'+nfca_params[2:5],
sel_res=nfca_params[5:6], tt4_cmd=cmd,
rats_cmd=rats_cmd, rats_res=rats_res)
else: log.debug("skip TT4_CMD %s (DID)", hexlify(cmd))
else: log.debug("not a 106A TT4 command")
finally:
recv_timeout = int(1000 * (time_to_return - time.time()))
tg_comm_rf_args['recv_timeout'] = recv_timeout
if target.sel_res[0] & 0x60 == 0x00:
return listen_tta_tt2()
if target.sel_res[0] & 0x20 == 0x20:
return listen_tta_tt4()
reason = "sel_res does not indicate any tag target support"
raise nfc.clf.UnsupportedTargetError(reason)
def listen_ttb(self, target, timeout):
"""Listen as Type B Target is not supported."""
message = "{device} does not support listen as Type A Target"
raise nfc.clf.UnsupportedTargetError(message.format(device=self))
def listen_ttf(self, target, timeout):
"""Listen as Type F Target is supported for either 212 or 424 kbps."""
assert target.sensf_res is not None
assert len(target.sensf_res) == 19
if target.brty not in ('212F', '424F'):
info = "unsupported target bitrate: %r" % target.brty
raise nfc.clf.UnsupportedTargetError(info)
self.chipset.tg_set_rf(target.brty)
self.chipset.tg_set_protocol(self.chipset.tg_set_protocol_defaults)
self.chipset.tg_set_protocol(rf_off_error=False)
recv_timeout = min(int(1000 * timeout), 0xFFFF)
time_to_return = time.time() + timeout
transmit_data = sensf_req = sensf_res = None
while recv_timeout > 0:
if transmit_data:
log.debug("%s send %s", target.brty, hexlify(transmit_data))
log.debug("%s wait recv %d ms", target.brty, recv_timeout)
try:
data = self.chipset.tg_comm_rf(recv_timeout=recv_timeout,
transmit_data=transmit_data)
except CommunicationError as error:
log.debug(error); continue
finally:
recv_timeout = int((time_to_return - time.time()) * 1E3)
transmit_data = None
assert target.brty == ('106A', '212F', '424F')[data[0]-11]
log.debug("%s rcvd %s", target.brty, hexlify(buffer(data, 7)))
if len(data) > 7 and len(data)-7 == data[7]:
if sensf_req and data[9:17] == target.sensf_res[1:9]:
self.chipset.tg_set_protocol(rf_off_error=True)
target = nfc.clf.LocalTarget(target.brty)
target.sensf_req = sensf_req
target.sensf_res = sensf_res
target.tt3_cmd = data[8:]
return target
if len(data) == 13 and data[7] == 6 and data[8] == 0:
(sensf_req, sensf_res) = (data[8:], target.sensf_res[:])
if ((sensf_req[1]==255 or sensf_req[1]==sensf_res[17]) and
(sensf_req[2]==255 or sensf_req[2]==sensf_res[18])):
transmit_data = sensf_res[0:17]
if sensf_req[3] == 1:
transmit_data += sensf_res[17:19]
if sensf_req[3] == 2:
transmit_data += "\x00" + chr(1<<(target.brty=="424F"))
transmit_data = chr(len(transmit_data)+1) + transmit_data
def listen_dep(self, target, timeout):
log.debug("listen_dep for {0:.3f} sec".format(timeout))
assert target.sensf_res is not None
assert target.sens_res is not None
assert target.sdd_res is not None
assert target.sel_res is not None
assert target.atr_res is not None
nfca_params = target.sens_res + target.sdd_res[1:4] + target.sel_res
nfcf_params = target.sensf_res[1:19]
log.debug("nfca_params %s", hexlify(nfca_params))
log.debug("nfcf_params %s", hexlify(nfcf_params))
assert len(nfca_params) == 6
assert len(nfcf_params) == 18
self.chipset.tg_set_rf("106A")
self.chipset.tg_set_protocol(self.chipset.tg_set_protocol_defaults)
self.chipset.tg_set_protocol(rf_off_error=False)
tg_comm_rf_args = {'mdaa': True}
tg_comm_rf_args['nfca_params'] = nfca_params
tg_comm_rf_args['nfcf_params'] = nfcf_params
recv_timeout = min(int(1000 * timeout), 0xFFFF)
time_to_return = time.time() + timeout
while recv_timeout > 0:
tg_comm_rf_args['recv_timeout'] = recv_timeout
log.debug("wait %d ms for activation", recv_timeout)
try:
data = self.chipset.tg_comm_rf(**tg_comm_rf_args)
except CommunicationError as error:
if error != "RECEIVE_TIMEOUT_ERROR": log.warning(error)
else:
brty = ('106A', '212F', '424F')[data[0]-11]
log.debug("%s %s", brty, hexlify(data))
if data[2] & 0x03 == 3: data = data[7:]; break
else: log.debug("not a passive mode activation")
recv_timeout = int(1000 * (time_to_return - time.time()))
else:
return None
# further tg_comm_rf commands return RF_OFF_ERROR when field is gone
self.chipset.tg_set_protocol(rf_off_error=True)
if brty == "106A" and len(data)>1 and data[0] != 0xF0:
# We received a Type A card activation, probably because
# sel_res has indicated Type 2 or Type 4A Tag support.
target = nfc.clf.LocalTarget("106A", tag_cmd=data[:])
target.sens_res = nfca_params[0:2]
target.sdd_res = '\x08' + nfca_params[2:5]
target.sel_res = nfca_params[5:6]
return target
try:
if brty == "106A": assert data.pop(0) == 0xF0
assert len(data) == data.pop(0)
assert data.startswith("\xD4\x00")
except (IndexError, AssertionError):
return None
activation_params = nfca_params if brty=='106A' else nfcf_params
def send_res_recv_req(brty, data, timeout):
if data: data = ("", "\xF0")[brty=="106A"] + chr(len(data)) + data
args = {'transmit_data': data, 'recv_timeout': timeout}
data = self.chipset.tg_comm_rf(**args)[7:]
if timeout > 0:
try:
if brty == "106A":
assert data.pop(0) == 0xF0, "invalid start byte"
assert len(data) == data.pop(0), "incorrect length byte"
assert data[0] == 0xD4, "invalid command byte 1"
assert data[1] in (0,4,6,8,10), "invalid command byte 2"
except IndexError:
raise AssertionError("insufficient receive data")
return data
while data and data[1] == 0:
try:
(atr_req, atr_res) = (data[:], target.atr_res)
log.debug("%s rcvd ATR_REQ %s", brty, hexlify(atr_req))
assert len(atr_req) >= 16, "ATR_REQ has less than 16 byte"
assert len(atr_req) <= 64, "ATR_REQ has more than 64 byte"
log.debug("%s send ATR_RES %s", brty, hexlify(atr_res))
data = send_res_recv_req(brty, atr_res, 1000)
except (CommunicationError, AssertionError) as error:
log.warning(str(error))
return None
psl_req = dep_req = None
while data and data[1] in (4,6,8,10):
did = atr_req[12] if atr_req[12]>0 else None
cmd = ("PSL", "DEP", "DSL", "RLS")[(data[1]-4)//2] + "_REQ"
log.debug("%s rcvd %s %s", brty, cmd, hexlify(data))
try:
if cmd=="DEP_REQ" and did==(data[3] if data[2]>>2&1 else None):
dep_req = data[:]
break
if cmd=="DSL_REQ" and did==(data[2] if len(data)>2 else None):
data = "\xD5\x09" + data[2:3]
log.debug("%s send DSL_RES %s", brty, hexlify(data))
send_res_recv_req(brty, data, 0)
return None
if cmd=="RLS_REQ" and did==(data[2] if len(data)>2 else None):
data = "\xD5\x0B" + data[2:3]
log.debug("%s send RLS_RES %s", brty, hexlify(data))
send_res_recv_req(brty, data, 0)
return None
if cmd=="PSL_REQ" and did==(data[2] if data[2]>0 else None):
(dsi, dri) = ((data[3] >> 3) & 7, data[3] & 7)
if dsi != dri:
log.warning("DSI != DRI is not supported")
return None
(psl_req, psl_res) = (data[:], "\xD5\x05"+data[2:3])
log.debug("%s send PSL_RES %s", brty, hexlify(psl_res))
send_res_recv_req(brty, psl_res, 0)
brty = ('106A', '212F', '424F')[dsi]
self.chipset.tg_set_rf(brty)
log.debug("%s wait recv 1000 ms", brty)
data = send_res_recv_req(brty, None, 1000)
except (CommunicationError, AssertionError) as error:
log.warning(str(error))
return None
else: # while data and data[1] in (4,6,8,10)
return None
target = nfc.clf.LocalTarget(brty, atr_req=atr_req, dep_req=dep_req)
if psl_req: target.psl_req = psl_req
if activation_params == nfca_params:
target.sens_res = nfca_params[0:2]
target.sdd_res = '\x08' + nfca_params[2:5]
target.sel_res = nfca_params[5:6]
else:
target.sensf_res = "\x01" + nfcf_params
return target
def get_max_send_data_size(self, target):
return 290
def get_max_recv_data_size(self, target):
return 290
def send_cmd_recv_rsp(self, target, data, timeout):
timeout_msec = min(int(timeout * 1000), 0xFFFF) if timeout else 0
self.chipset.in_set_rf(target.brty_send, target.brty_recv)
self.chipset.in_set_protocol(self.chipset.in_set_protocol_defaults)
in_set_protocol_settings = {
'add_parity': 1 if target.brty_send.endswith('A') else 0,
'check_parity': 1 if target.brty_recv.endswith('A') else 0
}
try:
if (target.brty == '106A' and target.sel_res and
target.sel_res[0] & 0x60 == 0x00):
# Driver must check TT2 CRC to get ACK/NAK
in_set_protocol_settings['check_crc'] = 0
self.chipset.in_set_protocol(**in_set_protocol_settings)
return self._tt2_send_cmd_recv_rsp(data, timeout_msec)
else:
self.chipset.in_set_protocol(**in_set_protocol_settings)
return self.chipset.in_comm_rf(data, timeout_msec)
except CommunicationError as error:
log.debug(error)
if error == "RECEIVE_TIMEOUT_ERROR":
raise nfc.clf.TimeoutError
raise nfc.clf.TransmissionError
def _tt2_send_cmd_recv_rsp(self, data, timeout_msec):
# The Type2Tag implementation needs to receive the Mifare
# ACK/NAK responses but the chipset reports them as crc error
# (indistinguishable from a real crc error). We thus had to
# switch off the crc check and do it here.
data = self.chipset.in_comm_rf(data, timeout_msec)
if len(data) > 2 and self.check_crc_a(data) is False:
raise nfc.clf.TransmissionError("crc_a check error")
return data[:-2] if len(data) > 2 else data
def send_rsp_recv_cmd(self, target, data, timeout):
assert timeout is None or timeout >= 0
timeout_msec = min(int(timeout * 1000), 0xFFFF) if timeout else 0
kwargs = {
'guard_time': 500,
'transmit_data': data,
'recv_timeout': 0xFFFF if timeout is None else int(timeout*1E3),
}
try:
data = self.chipset.tg_comm_rf(**kwargs)
return data[7:] if data else None
except CommunicationError as error:
log.debug(error)
if error == "RF_OFF_ERROR":
raise nfc.clf.BrokenLinkError(str(error))
if error == "RECEIVE_TIMEOUT_ERROR":
raise nfc.clf.TimeoutError(str(error))
raise nfc.clf.TransmissionError(str(error))
def init(transport):
chipset = Chipset(transport, logger=log)
device = Device(chipset, logger=log)
device._vendor_name = transport.manufacturer_name
device._device_name = transport.product_name
return device
| mit | 6,889,416,888,422,765,000 | 42.444318 | 79 | 0.535011 | false |
hynekcer/django | django/db/migrations/operations/base.py | 356 | 4370 | from __future__ import unicode_literals
from django.db import router
class Operation(object):
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')
def describe(self):
"""
Outputs a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allow_migrate_model(self, connection_alias, model):
"""
Returns if we're allowed to migrate the model.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model)
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
)
| bsd-3-clause | 5,531,483,985,423,518,000 | 35.722689 | 103 | 0.641648 | false |
Gitlab11/odoo | addons/calendar/controllers/main.py | 329 | 3390 | import simplejson
import openerp
import openerp.http as http
from openerp.http import request
import openerp.addons.web.controllers.main as webmain
import json
class meeting_invitation(http.Controller):
@http.route('/calendar/meeting/accept', type='http', auth="calendar")
def accept(self, db, token, action, id, **kwargs):
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token), ('state', '!=', 'accepted')])
if attendee_id:
attendee_pool.do_accept(cr, openerp.SUPERUSER_ID, attendee_id)
return self.view(db, token, action, id, view='form')
@http.route('/calendar/meeting/decline', type='http', auth="calendar")
def declined(self, db, token, action, id):
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token), ('state', '!=', 'declined')])
if attendee_id:
attendee_pool.do_decline(cr, openerp.SUPERUSER_ID, attendee_id)
return self.view(db, token, action, id, view='form')
@http.route('/calendar/meeting/view', type='http', auth="calendar")
def view(self, db, token, action, id, view='calendar'):
registry = openerp.modules.registry.RegistryManager.get(db)
meeting_pool = registry.get('calendar.event')
attendee_pool = registry.get('calendar.attendee')
partner_pool = registry.get('res.partner')
with registry.cursor() as cr:
attendee = attendee_pool.search_read(cr, openerp.SUPERUSER_ID, [('access_token', '=', token)], [])
if attendee and attendee[0] and attendee[0].get('partner_id'):
partner_id = int(attendee[0].get('partner_id')[0])
tz = partner_pool.read(cr, openerp.SUPERUSER_ID, partner_id, ['tz'])['tz']
else:
tz = False
attendee_data = meeting_pool.get_attendee(cr, openerp.SUPERUSER_ID, id, dict(tz=tz))
if attendee:
attendee_data['current_attendee'] = attendee[0]
values = dict(init="s.calendar.event('%s', '%s', '%s', '%s' , '%s');" % (db, action, id, 'form', json.dumps(attendee_data)))
return request.render('web.webclient_bootstrap', values)
# Function used, in RPC to check every 5 minutes, if notification to do for an event or not
@http.route('/calendar/notify', type='json', auth="none")
def notify(self):
registry = request.registry
uid = request.session.uid
context = request.session.context
with registry.cursor() as cr:
res = registry.get("calendar.alarm_manager").get_next_notif(cr, uid, context=context)
return res
@http.route('/calendar/notify_ack', type='json', auth="none")
def notify_ack(self, type=''):
registry = request.registry
uid = request.session.uid
context = request.session.context
with registry.cursor() as cr:
res = registry.get("res.partner")._set_calendar_last_notif_ack(cr, uid, context=context)
return res
| agpl-3.0 | -883,575,146,776,035,200 | 46.746479 | 133 | 0.627139 | false |
toddpalino/kafka-tools | kafka/tools/protocol/requests/update_metadata_v3.py | 1 | 2339 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kafka.tools.protocol.requests.update_metadata_v0 import UpdateMetadataV0Request
from kafka.tools.protocol.responses.update_metadata_v3 import UpdateMetadataV3Response
class UpdateMetadataV3Request(UpdateMetadataV0Request):
api_version = 3
response = UpdateMetadataV3Response
help_string = ''
schema = [
{'name': 'controller_id', 'type': 'int32'},
{'name': 'controller_epoch', 'type': 'int32'},
{'name': 'partition_states',
'type': 'array',
'item_type': [
{'name': 'topic', 'type': 'string'},
{'name': 'partition', 'type': 'int32'},
{'name': 'controller_epoch', 'type': 'int32'},
{'name': 'leader', 'type': 'int32'},
{'name': 'leader_epoch', 'type': 'int32'},
{'name': 'isr', 'type': 'array', 'item_type': 'int32'},
{'name': 'zk_version', 'type': 'int32'},
{'name': 'replicas', 'type': 'array', 'item_type': 'int32'},
]},
{'name': 'live_leaders',
'type': 'array',
'item_type': [
{'name': 'id', 'type': 'int32'},
{'name': 'end_points',
'type': 'array',
'item_type': [
{'name': 'port', 'type': 'int32'},
{'name': 'host', 'type': 'string'},
{'name': 'listener_name', 'type': 'string'},
{'name': 'security_protocol_type', 'type': 'int16'},
]},
{'name': 'rack', 'type': 'string'},
]},
]
| apache-2.0 | 7,092,937,227,782,397,000 | 40.035088 | 86 | 0.574605 | false |
gauribhoite/personfinder | env/site-packages/pygments/lexers/data.py | 72 | 18277 | # -*- coding: utf-8 -*-
"""
pygments.lexers.data
~~~~~~~~~~~~~~~~~~~~
Lexers for data file format.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
include, bygroups, inherit
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
Punctuation, Literal
__all__ = ['YamlLexer', 'JsonLexer', 'JsonLdLexer']
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
.. versionadded:: 0.11
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?!\s|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![\w-]*!)'
r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors': [
# a full-form tag
(r'!<[\w;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[\w-]+)?'
r'(?:![\w;/?:@&=+$,.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[\w-]+', Name.Label),
# an alias
(r'\*[\w-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[\S\t ]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^\s\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^\s"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^\s,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class JsonLexer(RegexLexer):
"""
For JSON data structures.
.. versionadded:: 1.5
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = ['application/json']
flags = re.DOTALL
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'\}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'\}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r'\]', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'\{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
class JsonLdLexer(JsonLexer):
"""
For `JSON-LD <http://json-ld.org/>`_ linked data.
.. versionadded:: 2.0
"""
name = 'JSON-LD'
aliases = ['jsonld', 'json-ld']
filenames = ['*.jsonld']
mimetypes = ['application/ld+json']
tokens = {
'objectvalue': [
(r'"@(context|id|value|language|type|container|list|set|'
r'reverse|index|base|vocab|graph)"', Name.Decorator,
'objectattribute'),
inherit,
],
}
| apache-2.0 | 2,063,301,534,494,567,400 | 33.484906 | 83 | 0.48799 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.