repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
titilambert/teeawards | refs/heads/master | old/achievements/medals/medals.py | 1 | from pymongo import DESCENDING
from libs.lib import tee_db, get_stats, get_player_stats
from bottle import mako_view
from datetime import datetime, timedelta
from libs.achievement import achievement_desc_list, achievement_player_list, achievement_livestat_list
from libs.lib import kill_table
@mako_view("desc_medals")
def desc_medals():
return {'medal_list': medals_list}
@mako_view("player_medals")
def player_medals(player, gametype):
player_stats = get_stats(player, gametype)
medal_result = {}
medal_result['gold star'] = player_stats.get('first_place', 0)
medal_result['silver star'] = player_stats.get('second_place', 0)
medal_result['bronze star'] = player_stats.get('third_place', 0)
medal_result['chocolate star'] = player_stats.get('last_place', 0)
medal_result['purple heart'] = player_stats.get('purple', 0)
medal_result['eternal heart'] = player_stats.get('no death', 0)
return {'results': medal_result,
'medals_list': medals_list}
def livestat_medals(new_data):
return None
#
medals_list = [
('gold star', 'First place IAR'),
('silver star', 'Second place IAR'),
('bronze star', 'Third place IAR'),
('chocolate star', 'Last place IAR'),
('purple heart', 'Ratio < 1:4 IAR'),
('eternal heart', 'No death IAR'),
]
achievement_desc_list['desc_medals'] = desc_medals
achievement_player_list['player_medals'] = player_medals
achievement_livestat_list['livestat_medals'] = livestat_medals
|
Bumpybox/pyblish-bumpybox | refs/heads/master | pyblish_bumpybox/plugins/nuke/collect_reads.py | 1 | from pyblish import api
from pyblish_bumpybox import inventory
class CollectReads(api.ContextPlugin):
"""Collect all read nodes."""
order = inventory.get_order(__file__, "CollectReads")
label = "Reads"
hosts = ["nuke", "nukeassist"]
def process(self, context):
import os
import clique
import nuke
# creating instances per write node
for node in nuke.allNodes():
if node.Class() != "Read":
continue
if not node.metadata():
continue
# Determine output type
output_type = "img"
movie_formats = ["ari", "avi", "gif", "mov", "r3d"]
if node.metadata()["input/filereader"] in movie_formats:
output_type = "mov"
scene_formats = ["psd"]
if node.metadata()["input/filereader"] in scene_formats:
output_type = "scene"
# Create instance
instance = context.create_instance(node.name())
instance.data["families"] = [output_type, "local", "output"]
instance.data["family"] = "read"
instance.add(node)
path = nuke.filename(node)
# Adding/Checking publish attribute
if "publish" not in node.knobs():
knob = nuke.Boolean_Knob("publish", "Publish")
knob.setValue(False)
node.addKnob(knob)
# Compare against selection
selection = instance.context.data.get("selection", [])
publish = bool(node["publish"].getValue())
if selection:
if list(set(instance) & set(selection)):
publish = True
else:
publish = False
instance.data["publish"] = publish
# Collecting file paths
label = "{0} - {1}".format(node.name(), os.path.basename(path))
if output_type == "img":
# This could be improved because it does not account for "#"
# being in a sequence.
if "#" in path:
padding = path.count("#")
path = path.replace(
"#" * padding, "%{0:0>2}d".format(padding)
)
try:
collection = clique.parse(path + " []")
except ValueError as e:
collections, remainder = clique.assemble(
[path],
minimum_items=1,
patterns=[clique.PATTERNS['frames']]
)
if collections:
collection = collections[0]
else:
context.remove(instance)
self.log.warning(
"Collection error on \"{0}\": "
"{1}".format(node.name(), e)
)
continue
for f in os.listdir(os.path.dirname(path)):
file_path = os.path.join(os.path.dirname(path), f)
file_path = file_path.replace("\\", "/")
if collection.match(file_path):
collection.add(file_path)
# Limit to frame range
first = node["first"].value()
last = node["last"].value()
indexes = list(collection.indexes)
collection.indexes.clear()
collection.indexes.update(
set(indexes) & set([x for x in range(first, last + 1)])
)
instance.data["collection"] = collection
label = "{0} - {1}".format(
node.name(), os.path.basename(collection.format())
)
else:
instance.data["output_path"] = path
instance.data["label"] = label
def instanceToggled(instance, value):
# Removing and adding the knob to support NukeAssist, where
# you can't modify the knob value directly.
instance[0].removeKnob(instance[0]["publish"])
knob = nuke.Boolean_Knob(
"publish", "Publish"
)
knob.setValue(value)
instance[0].addKnob(knob)
instance.data["instanceToggled"] = instanceToggled
|
dustymugs/pgpointcloud_utils | refs/heads/master | pgpointcloud_utils/pcpoint.py | 1 | import copy
import struct
import binascii
import pyproj
from decimal import Decimal
from numeric_string_parser import NumericStringParser
from .pcexception import *
from .pcformat import PcDimension, PcFormat
class PcPoint(object):
_dimensions = []
# header format
#
# byte (endian)
# uint32 (pcid)
_HEADER_FORMAT = ['B', 'I']
def __init__(
self,
pcformat, values=None
):
self._pcformat = None
self._raw_values = []
if pcformat is not None:
self.pcformat = pcformat
if values is not None:
self.values = values
@property
def pcformat(self):
return self._pcformat
@pcformat.setter
def pcformat(self, new_value):
if not isinstance(new_value, PcFormat):
raise PcInvalidArgException(
message='Value not an instance of PcFormat'
)
self._pcformat = new_value
# the number of possible values is driven by
num_dimensions = len(self._pcformat.dimensions)
num_values = len(self._raw_values)
if num_values < 1:
self._raw_values = [0.] * num_dimensions
elif num_values > num_dimensions:
self._raw_values = self._raw_values[:num_dimensions]
elif num_values < num_dimensions:
self._raw_values += ([0.] * (num_dimensions - num_values))
@staticmethod
def _compute_processed_value(value, dimension):
if Decimal(dimension.scale) != Decimal(PcDimension.DEFAULT_SCALE):
return value * dimension.scale
else:
return value
@staticmethod
def _compute_raw_value(value, dimension):
if Decimal(dimension.scale) != Decimal(PcDimension.DEFAULT_SCALE):
return value / dimension.scale
else:
return value
@property
def values(self):
'''
return processed values. raw values are never returned
'''
return map(
PcPoint._compute_processed_value,
self._raw_values,
self.pcformat.dimensions
)
@values.setter
def values(self, new_values):
'''
set raw values by converting provided values
'''
if not isinstance(new_values, list):
raise PcInvalidArgException(
message='Value not a list'
)
dimensions = self.pcformat.dimensions
num_dimensions = len(dimensions)
if len(new_values) != num_dimensions:
raise PcInvalidArgException(
message='Value has different number of elements than PcFormat dimensions'
)
self._raw_values = map(
PcPoint._compute_raw_value,
new_values,
dimensions
)
@classmethod
def is_ndr(cls, data):
'''
get endian-ness
True = NDR, False = XDR
'''
return bool(ord(data[0]))
@classmethod
def header_format(cls, is_ndr):
if is_ndr:
frmt = ['<']
else:
frmt = ['>']
frmt += cls._HEADER_FORMAT
return ' '.join(frmt)
@classmethod
def combined_format(cls, is_ndr, pcformat):
header_format = cls.header_format(is_ndr)
data_format = pcformat.struct_format
return ' '.join([header_format, data_format])
@classmethod
def extract_pcid_from_binary(cls, data):
s = struct.Struct(cls.header_format(
is_ndr=cls.is_ndr(data)
))
header = s.unpack(data[:s.size])
return header[1]
@classmethod
def extract_pcid_from_hex(cls, hexstr):
return cls.extract_pcid_from_binary(binascii.unhexlify(hexstr))
@classmethod
def from_binary(cls, pcformat, data):
'''
deserialize PcPoint from binary representation. returns tuple
'''
s = struct.Struct(cls.combined_format(
is_ndr=cls.is_ndr(data),
pcformat=pcformat
))
values = [v for v in s.unpack(data)]
pt = PcPoint(pcformat=pcformat)
pt._raw_values = values[len(PcPoint._HEADER_FORMAT):]
return pt
@classmethod
def from_hex(cls, pcformat, hexstr):
'''
deserialize PcPoint from hex representation. returns tuple
'''
return cls.from_binary(pcformat, binascii.unhexlify(hexstr))
def as_binary(self):
'''
serialize PcPoint. returns binary representation
'''
if self.pcformat is None:
raise PcRunTimeException(
message='Cannot dump PcPoint without a PcFormat'
)
s = struct.Struct(PcPoint.combined_format(
is_ndr=True,
pcformat=self.pcformat
))
values = [1, self.pcformat.pcid] + self._raw_values
return s.pack(*values)
def as_hex(self):
'''
serialize PcPoint. returns hex representation
'''
return binascii.hexlify(self.as_binary())
def get_value(self, name_or_pos):
'''
return the value of provided dimension name or position (1-based)
'''
if self.pcformat is None:
raise PcRunTimeException(
message='Cannot get dimension value from PcPoint without PcFormat'
)
# get raw value
if isinstance(name_or_pos, int):
# position is 1-based
raw_value = self._raw_values[name_or_pos - 1]
else:
raw_value = self._raw_values[self.pcformat.get_dimension_index(name_or_pos)]
dim = self.pcformat.get_dimension(name_or_pos)
if Decimal(dim.scale) != Decimal(PcDimension.DEFAULT_SCALE):
value = raw_value * dim.scale
else:
value = raw_value
return value
def set_value(self, name_or_pos, value):
'''
set the value of provided dimension name or position (1-based)
'''
if self.pcformat is None:
raise PcRunTimeException(
message='Cannot set dimension value from PcPoint without PcFormat'
)
# scale if dimension has scale
dim = self.pcformat.get_dimension(name_or_pos)
if Decimal(dim.scale) != Decimal(PcDimension.DEFAULT_SCALE):
raw_value = value / dim.scale
else:
raw_value = value
if isinstance(name_or_pos, int):
# position is 1-based
self._raw_values[name_or_pos - 1] = raw_value
else:
self._raw_values[self.pcformat.get_dimension_index(name_or_pos)] = raw_value
def copy(self):
'''
returns a copy of this PcPoint
'''
pt = PcPoint(pcformat=self.pcformat)
pt._raw_values = copy.deepcopy(self._raw_values)
return pt
def transform(self, pcformat, mapping):
'''
transform PcPoint to provided pcformat using the given mapping
transforms by:
1. converting values
2. reprojecting coordinates (X,Y) if pcformat has different SRID
returns new PcPoint
'''
# if From pcformat == To pcformat, return PcPoint
if self.pcformat == pcformat:
return self.copy()
# get info of From pcformat
from_dimensions = self.pcformat.dimensions
num_from_dimensions = len(from_dimensions)
# get info of To pcformat
to_dimensions = pcformat.dimensions
num_to_dimensions = len(to_dimensions)
# load mapping
if not isinstance(mapping, dict):
raise PcInvalidArgException(
message='mapping not a dict'
)
# new pcpoint
to_pcpoint = PcPoint(pcformat=pcformat)
#
# run conversion
#
# placeholders for if expressions are needed
expr_assignment = {}
nsp = None
to_values = [0.] * num_to_dimensions
map_keys = mapping.keys()
for to_idx in xrange(num_to_dimensions):
to_dimension = to_dimensions[to_idx]
to_position = to_idx + 1
by_position = False
# position match
if to_position in map_keys:
map_from = mapping[to_position]
by_position = True
# name match
elif to_dimension.name in map_keys:
map_from = mapping[to_dimension.name]
# no match, exception
else:
raise PcInvalidArgException(
message='Destination PcFormat dimension "{dimension}" at position {position} not found in mapping'.format(
dimension=to_dimension.name,
position=to_position
)
)
# inspect map_from
# None, use the "to"
if map_from is None:
if by_position:
to_values[to_idx] = self.get_value(to_position)
else:
to_values[to_idx] = self.get_value(to_dimension.name)
# integer, use as index
elif isinstance(map_from, int):
to_values[to_idx] = self.get_value(map_from)
# string, use as dimension name
elif isinstance(map_from, str):
to_values[to_idx] = self.get_value(map_from)
# dictionary, more advanced behavior
elif isinstance(map_from, dict):
if map_from.has_key('value'):
to_values[to_idx] = map_from.get('value')
elif map_from.has_key('expression'):
expr = map_from.get('expression')
# assignment object does not exist
if len(expr_assignment) < 1:
for from_idx in xrange(num_from_dimensions):
from_position = from_idx + 1
from_value = str(self.get_value(from_position))
expr_assignment['$' + str(from_idx + 1)] = from_value
expr_assignment['$' + from_dimensions[from_idx].name] = from_value
# instance of NumericStringParser
nsp = NumericStringParser()
# substitute values for placeholders
for k, v in expr_assignment.iteritems():
expr = expr.replace(k, v)
# evaluate expression
to_values[to_idx] = nsp.eval(expr)
else:
if by_position:
key = to_position
else:
key = to_dimension.name
raise PcInvalidArgException(
message="Unrecognized dictionary for mapping key: {key} ".format(
key=key
)
)
else:
if by_position:
key = to_position
else:
key = to_dimension.name
raise PcInvalidArgException(
message="Unrecognized value for mapping key: {key} ".format(
key=key
)
)
# set values
to_pcpoint.values = to_values
# reproject if different srid
if self.pcformat.srid != pcformat.srid:
if (
self.pcformat.proj4text is None or
len(self.pcformat.proj4text) < 1 or
pcformat.proj4text is None or
len(pcformat.proj4text) < 1
):
raise PcRunTimeException(
message='Cannot reproject coordinates. Missing proj4text'
)
try:
from_proj = pyproj.Proj(self.pcformat.proj4text)
to_proj = pyproj.Proj(pcformat.proj4text)
except:
raise PcRunTimeException(
message='Cannot reproject coordinates. Invalid proj4text'
)
to_x, to_y = pyproj.transform(
from_proj,
to_proj,
to_pcpoint.get_value('X'),
to_pcpoint.get_value('Y')
)
to_pcpoint.set_value('X', to_x)
to_pcpoint.set_value('Y', to_y)
return to_pcpoint
|
cmdelatorre/roses | refs/heads/master | skills/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
liorvh/golismero | refs/heads/master | thirdparty_libs/django/core/serializers/pyyaml.py | 110 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
import decimal
import yaml
from io import StringIO
from django.db import models
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import six
class DjangoSafeDumper(yaml.SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
if isinstance(stream_or_string, six.string_types):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.safe_load(stream), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
raise DeserializationError(e)
|
moritzschaefer/luigi | refs/heads/master | test/remote_scheduler_test.py | 67 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
import unittest
import luigi.server
import server_test
tempdir = tempfile.mkdtemp()
class DummyTask(luigi.Task):
id = luigi.Parameter()
def run(self):
f = self.output().open('w')
f.close()
def output(self):
return luigi.LocalTarget(os.path.join(tempdir, str(self.id)))
class RemoteSchedulerTest(server_test.ServerTestBase):
def _test_run(self, workers):
tasks = [DummyTask(id) for id in range(20)]
luigi.build(tasks, workers=workers, scheduler_port=self.get_http_port())
for t in tasks:
self.assertEqual(t.complete(), True)
self.assertTrue(os.path.exists(t.output().path))
def test_single_worker(self):
self._test_run(workers=1)
def test_multiple_workers(self):
self._test_run(workers=10)
if __name__ == '__main__':
unittest.main()
|
zeroSteiner/king-phisher | refs/heads/master | king_phisher/client/windows/__init__.py | 5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/windows/__init__.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from .main import *
from .rpc_terminal import *
|
adamkh/micropython | refs/heads/master | docs/conf.py | 6 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MicroPython documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 21 11:42:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Work out the port to generate the docs for
from collections import OrderedDict
micropy_port = os.getenv('MICROPY_PORT') or 'pyboard'
tags.add('port_' + micropy_port)
ports = OrderedDict((
("unix", "unix"),
("pyboard", "the pyboard"),
("wipy", "the WiPy"),
("esp8266", "esp8266"),
))
# The members of the html_context dict are available inside topindex.html
url_prefix = os.getenv('MICROPY_URL_PREFIX') or '/'
html_context = {
'port':micropy_port,
'port_name':ports[micropy_port],
'all_ports':[(n, url_prefix + p) for p, n in ports.items()],
}
# Specify a custom master document based on the port name
master_doc = micropy_port + '_' + 'index'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#master_doc = 'index'
# General information about the project.
project = 'MicroPython'
copyright = '2014, Damien P. George'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6'
# The full version, including alpha/beta/rc tags.
release = '1.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '../../logo/trans-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "topindex.html"}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MicroPython.tex', 'MicroPython Documentation',
'Damien P. George and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'micropython', 'MicroPython Documentation',
['Damien P. George and contributors'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MicroPython', 'MicroPython Documentation',
'Damien P. George and contributors', 'MicroPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# Append the other ports' specific folders/files to the exclude pattern
exclude_patterns.extend([port + '*' for port in ports if port != micropy_port])
# Exclude pyb module if the port is the WiPy
if micropy_port == 'wipy':
exclude_patterns.append('library/pyb*')
else: # exclude machine
exclude_patterns.append('library/machine*')
|
leeon/annotated-django | refs/heads/note | tests/model_formsets/models.py | 69 | from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class BetterAuthor(Author):
write_speed = models.IntegerField()
@python_2_unicode_compatible
class Book(models.Model):
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title'),
)
ordering = ['id']
def __str__(self):
return self.title
@python_2_unicode_compatible
class BookWithCustomPK(models.Model):
my_pk = models.DecimalField(max_digits=5, decimal_places=0, primary_key=True)
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
def __str__(self):
return '%s: %s' % (self.my_pk, self.title)
class Editor(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class BookWithOptionalAltEditor(models.Model):
author = models.ForeignKey(Author)
# Optional secondary author
alt_editor = models.ForeignKey(Editor, blank=True, null=True)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title', 'alt_editor'),
)
def __str__(self):
return self.title
@python_2_unicode_compatible
class AlternateBook(Book):
notes = models.CharField(max_length=100)
def __str__(self):
return '%s - %s' % (self.title, self.notes)
@python_2_unicode_compatible
class AuthorMeeting(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author)
created = models.DateField(editable=False)
def __str__(self):
return self.name
class CustomPrimaryKey(models.Model):
my_pk = models.CharField(max_length=10, primary_key=True)
some_field = models.CharField(max_length=100)
# models for inheritance tests.
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
city = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Owner(models.Model):
auto_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
place = models.ForeignKey(Place)
def __str__(self):
return "%s at %s" % (self.name, self.place)
class Location(models.Model):
place = models.ForeignKey(Place, unique=True)
# this is purely for testing the data doesn't matter here :)
lat = models.CharField(max_length=100)
lon = models.CharField(max_length=100)
@python_2_unicode_compatible
class OwnerProfile(models.Model):
owner = models.OneToOneField(Owner, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %d" % (self.owner.name, self.age)
@python_2_unicode_compatible
class Restaurant(Place):
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class MexicanRestaurant(Restaurant):
serves_tacos = models.BooleanField(default=False)
class ClassyMexicanRestaurant(MexicanRestaurant):
restaurant = models.OneToOneField(MexicanRestaurant, parent_link=True, primary_key=True)
tacos_are_yummy = models.BooleanField(default=False)
# models for testing unique_together validation when a fk is involved and
# using inlineformset_factory.
@python_2_unicode_compatible
class Repository(models.Model):
name = models.CharField(max_length=25)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Revision(models.Model):
repository = models.ForeignKey(Repository)
revision = models.CharField(max_length=40)
class Meta:
unique_together = (("repository", "revision"),)
def __str__(self):
return "%s (%s)" % (self.revision, six.text_type(self.repository))
# models for testing callable defaults (see bug #7975). If you define a model
# with a callable default value, you cannot rely on the initial value in a
# form.
class Person(models.Model):
name = models.CharField(max_length=128)
class Membership(models.Model):
person = models.ForeignKey(Person)
date_joined = models.DateTimeField(default=datetime.datetime.now)
karma = models.IntegerField()
# models for testing a null=True fk to a parent
class Team(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Player(models.Model):
team = models.ForeignKey(Team, null=True)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
# Models for testing custom ModelForm save methods in formsets and inline formsets
@python_2_unicode_compatible
class Poet(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Poem(models.Model):
poet = models.ForeignKey(Poet)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.name
|
2014c2g4/c2g4 | refs/heads/master | w2/static/Brython2.0.0-20140209-164925/Lib/pydoc.py | 40 | #!/usr/bin/env python3
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on the given port on the
local machine. Port number 0 can be used to get an arbitrary unused port.
Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
open a Web browser to interactively browse documentation. The -p option
can be used with the -b option to explicitly specify the server port.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/X.Y/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__all__ = ['help']
__author__ = "Ka-Ping Yee <[email protected]>"
__date__ = "26 February 2001"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import builtins
import imp
import importlib.machinery
import inspect
import io
import os
import pkgutil
import platform
import re
import sys
import time
import tokenize
import warnings
from collections import deque
from reprlib import Repr
from traceback import extract_tb, format_exception_only
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = doc.strip().split('\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not lines[1].rstrip():
return lines[0], '\n'.join(lines[2:])
return '', '\n'.join(lines)
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = pairs[1].join(text.split(pairs[0]))
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return (inspect.isfunction(obj) or
inspect.ismethod(obj) or
inspect.isbuiltin(obj) or
inspect.ismethoddescriptor(obj))
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant or internal.
if name in {'__author__', '__builtins__', '__cached__', '__credits__',
'__date__', '__doc__', '__file__', '__initializing__',
'__loader__', '__module__', '__name__', '__package__',
'__path__', '__qualname__', '__slots__', '__version__'}:
return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return True
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
results.append((name, kind, cls, value))
return results
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line: break
line = line.strip()
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not line.strip():
line = file.readline()
if not line: break
result = line.split('"""')[0].strip()
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
try:
file = tokenize.open(filename)
except IOError:
# module can't be opened, so skip it
return None
binary_suffixes = importlib.machinery.BYTECODE_SUFFIXES[:]
binary_suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
if any(filename.endswith(x) for x in binary_suffixes):
# binary modules have to be imported
file.close()
if any(filename.endswith(x) for x in
importlib.machinery.BYTECODE_SUFFIXES):
loader = importlib.machinery.SourcelessFileLoader('__temp__',
filename)
else:
loader = importlib.machinery.ExtensionFileLoader('__temp__',
filename)
try:
module = loader.load_module('__temp__')
except:
return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else:
# text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
self.filename = filename
self.exc, self.value, self.tb = exc_info
def __str__(self):
exc = self.exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
with open(path, 'rb') as file:
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.seek(0)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Remove the module from sys.modules and re-import to try
# and avoid problems with partially loaded modules.
# Also remove any submodules because they won't appear
# in the newly loaded module's namespace if they're already
# in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and value.name == path:
# No such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in path.split('.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
PYTHONDOCS = os.environ.get("PYTHONDOCS",
"http://docs.python.org/%d.%d/library"
% sys.version_info[:2])
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError(message)
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)
basedir = os.path.join(sys.base_exec_prefix, "lib",
"python%d.%d" % sys.version_info[:2])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'_thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(text.expandtabs())
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, modpkginfo):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = modpkginfo
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def filelink(self, url, path):
"""Make a link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + ', '.join(parents) + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
('.'.join(parts[:i+1]), parts[i]))
linkedname = '.'.join(links + parts[-1:])
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = self.filelink(url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % ', '.join(info)
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda t: self.modulelink(t[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = [value for (key, value) in classes]
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', ' '.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', ' '.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', '<br>\n'.join(contents))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
attrs.sort(key=lambda t: t[0])
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % ', '.join(parents)
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % self.classlink(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.__func__
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):
# ignore a module if its name contains a surrogate character
continue
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text)
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = [prefix + line for line in text.split('\n')]
if lines: lines[-1] = lines[-1].rstrip()
return '\n'.join(lines)
def section(self, title, contents):
"""Format a section with a given heading."""
clean_contents = self.indent(contents).rstrip()
return self.bold(title) + '\n' + clean_contents + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = (classname(c, modname) for c in bases)
result = result + '(%s)' % ', '.join(parents)
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
all = getattr(object, '__all__', None)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE REFERENCE', docloc + """
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
""")
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', '\n'.join(modpkgs))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', '\n'.join(submodules))
if classes:
classlist = [value for key, value in classes]
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', '\n'.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', '\n'.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', '\n'.join(contents))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % ', '.join(parents)
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % classname(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.__func__
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
class _PlainTextDoc(TextDoc):
"""Subclass of TextDoc which overrides string styling"""
def bold(self, text):
return text
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if not hasattr(sys.stdout, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(text).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write('\n'.join(lines[:inc]) + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in path.split('.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport('.'.join(parts[:n+1]), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = builtins
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
plaintext = _PlainTextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError('no Python documentation found for %r' % thing)
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
output=None):
"""Display text documentation, given an object or a path to an object."""
try:
if output is None:
pager(render_doc(thing, title, forceload))
else:
output.write(render_doc(thing, title, forceload, plaintext))
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w', encoding='utf-8')
file.write(page)
file.close()
print('wrote', name + '.html')
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'False': '',
'None': '',
'True': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'nonlocal NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'nonlocal': ('nonlocal', 'global NAMESPACES'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "b'", '"""', '"', 'r"', 'b"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.items():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '
'NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'CONVERSIONS': ('conversions', ''),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '
'LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = replace(request, '"', '', "'", '').strip()
if request.lower() in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using input() when appropriate."""
if self.input is sys.stdin:
return input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(request.split()[1])
elif request in self.symbols: self.showsymbol(request)
elif request in ['True', 'False', 'None']:
# special case these keywords since they are objects too
doc(eval(request), 'Help on %s:')
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:', output=self._output)
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:', output=self._output)
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the interactive help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = list(sorted(items))
colw = width // columns
rows = (len(items) + columns - 1) // columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(doc.strip() + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import formatter
buffer = io.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + ', '.join(xrefs.split()) + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def _gettopic(self, topic, more_xrefs=''):
"""Return unbuffered tuple of (topic, xrefs).
If an error occurs here, the exception is caught and displayed by
the url handler.
This function duplicates the showtopic method but returns its
result directly so it can be formatted for display in an html page.
"""
try:
import pydoc_data.topics
except ImportError:
return('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''' , '')
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
raise ValueError('could not find topic')
if isinstance(target, str):
return self._gettopic(target, more_xrefs)
label, xrefs = target
doc = pydoc_data.topics.topics[label]
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
return doc, xrefs
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if modname.find('.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = key.lower()
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
name = __import__(modname).__doc__ or ''
desc = name.split('\n')[0]
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
try:
loader = importer.find_module(modname)
except SyntaxError:
# raised by tests for bad coding cookies or BOM
continue
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(modname)
except Exception:
if onerror:
onerror(modname)
continue
desc = source_synopsis(io.StringIO(source)) or ''
if hasattr(loader, 'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
try:
module = loader.load_module(modname)
except ImportError:
if onerror:
onerror(modname)
continue
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print(modname, desc and '- ' + desc)
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------- enhanced Web browser interface
def _start_server(urlhandler, port):
"""Start an HTTP server thread on a specific port.
Start an HTML/text server thread, so HTML or text documents can be
browsed dynamically and interactively with a Web browser. Example use:
>>> import time
>>> import pydoc
Define a URL handler. To determine what the client is asking
for, check the URL and content_type.
Then get or generate some text or HTML code and return it.
>>> def my_url_handler(url, content_type):
... text = 'the URL sent was: (%s, %s)' % (url, content_type)
... return text
Start server thread on port 0.
If you use port 0, the server will pick a random port number.
You can then use serverthread.port to get the port number.
>>> port = 0
>>> serverthread = pydoc._start_server(my_url_handler, port)
Check that the server is really started. If it is, open browser
and get first page. Use serverthread.url as the starting page.
>>> if serverthread.serving:
... import webbrowser
The next two lines are commented out so a browser doesn't open if
doctest is run on this module.
#... webbrowser.open(serverthread.url)
#True
Let the server do its thing. We just need to monitor its status.
Use time.sleep so the loop doesn't hog the CPU.
>>> starttime = time.time()
>>> timeout = 1 #seconds
This is a short timeout for testing purposes.
>>> while serverthread.serving:
... time.sleep(.01)
... if serverthread.serving and time.time() - starttime > timeout:
... serverthread.stop()
... break
Print any errors that may have occurred.
>>> print(serverthread.error)
None
"""
import http.server
import email.message
import select
import threading
class DocHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Process a request from an HTML browser.
The URL received is in self.path.
Get an HTML page from self.urlhandler and send it.
"""
if self.path.endswith('.css'):
content_type = 'text/css'
else:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)
self.end_headers()
self.wfile.write(self.urlhandler(
self.path, content_type).encode('utf-8'))
def log_message(self, *args):
# Don't log messages.
pass
class DocServer(http.server.HTTPServer):
def __init__(self, port, callback):
self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
self.quit = False
def serve_until_quit(self):
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd:
self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback:
self.callback(self)
class ServerThread(threading.Thread):
def __init__(self, urlhandler, port):
self.urlhandler = urlhandler
self.port = int(port)
threading.Thread.__init__(self)
self.serving = False
self.error = None
def run(self):
"""Start the server."""
try:
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
DocHandler.urlhandler = staticmethod(self.urlhandler)
docsvr = DocServer(self.port, self.ready)
self.docserver = docsvr
docsvr.serve_until_quit()
except Exception as e:
self.error = e
def ready(self, server):
self.serving = True
self.host = server.host
self.port = server.server_port
self.url = 'http://%s:%d/' % (self.host, self.port)
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
self.serving = False
self.url = None
thread = ServerThread(urlhandler, port)
thread.start()
# Wait until thread.serving is True to make sure we are
# really up before returning.
while not thread.error and not thread.serving:
time.sleep(.01)
return thread
def _url_handler(url, content_type="text/html"):
"""The pydoc url handler for use with the pydoc server.
If the content_type is 'text/css', the _pydoc.css style
sheet is read and returned if it exits.
If the content_type is 'text/html', then the result of
get_html_page(url) is returned.
"""
class _HTMLDoc(HTMLDoc):
def page(self, title, contents):
"""Format an HTML page."""
css_path = "pydoc_data/_pydoc.css"
css_link = (
'<link rel="stylesheet" type="text/css" href="%s">' %
css_path)
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Pydoc: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
%s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
</body></html>''' % (title, css_link, html_navbar(), contents)
def filelink(self, url, path):
return '<a href="getfile?key=%s">%s</a>' % (url, path)
html = _HTMLDoc()
def html_navbar():
version = html.escape("%s [%s, %s]" % (platform.python_version(),
platform.python_build()[0],
platform.python_compiler()))
return """
<div style='float:left'>
Python %s<br>%s
</div>
<div style='float:right'>
<div style='text-align:center'>
<a href="index.html">Module Index</a>
: <a href="topics.html">Topics</a>
: <a href="keywords.html">Keywords</a>
</div>
<div>
<form action="get" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Get">
</form>
<form action="search" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Search">
</form>
</div>
</div>
""" % (version, html.escape(platform.platform(terse=True)))
def html_index():
"""Module Index page."""
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
names = [name for name in sys.builtin_module_names
if name != '__main__']
contents = html.multicolumn(names, bltinlink)
contents = [heading, '<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
contents.append(html.index(dir, seen))
contents.append(
'<p align=right><font color="#909090" face="helvetica,'
'arial"><strong>pydoc</strong> by Ka-Ping Yee'
'<[email protected]></font>')
return 'Index of Modules', ''.join(contents)
def html_search(key):
"""Search results page."""
# scan for modules
search_result = []
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
search_result.append((modname, desc and '- ' + desc))
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# format page
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
results = []
heading = html.heading(
'<big><big><strong>Search Results</strong></big></big>',
'#ffffff', '#7799ee')
for name, desc in search_result:
results.append(bltinlink(name) + desc)
contents = heading + html.bigsection(
'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
return 'Search Results', contents
def html_getfile(path):
"""Get and display a source file listing safely."""
path = path.replace('%20', ' ')
with tokenize.open(path) as fp:
lines = html.escape(fp.read())
body = '<pre>%s</pre>' % lines
heading = html.heading(
'<big><big><strong>File Listing</strong></big></big>',
'#ffffff', '#7799ee')
contents = heading + html.bigsection(
'File: %s' % path, '#ffffff', '#ee77aa', body)
return 'getfile %s' % path, contents
def html_topics():
"""Index of topic texts available."""
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.topics.keys())
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Topics', '#ffffff', '#ee77aa', contents)
return 'Topics', contents
def html_keywords():
"""Index of keywords."""
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.keywords.keys())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Keywords', '#ffffff', '#ee77aa', contents)
return 'Keywords', contents
def html_topicpage(topic):
"""Topic or keyword help page."""
buf = io.StringIO()
htmlhelp = Helper(buf, buf)
contents, xrefs = htmlhelp._gettopic(topic)
if topic in htmlhelp.keywords:
title = 'KEYWORD'
else:
title = 'TOPIC'
heading = html.heading(
'<big><big><strong>%s</strong></big></big>' % title,
'#ffffff', '#7799ee')
contents = '<pre>%s</pre>' % html.markup(contents)
contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)
if xrefs:
xrefs = sorted(xrefs.split())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
xrefs = html.multicolumn(xrefs, bltinlink)
xrefs = html.section('Related help topics: ',
'#ffffff', '#ee77aa', xrefs)
return ('%s %s' % (title, topic),
''.join((heading, contents, xrefs)))
def html_getobj(url):
obj = locate(url, forceload=1)
if obj is None and url != 'None':
raise ValueError('could not find object')
title = describe(obj)
content = html.document(obj, url)
return title, content
def html_error(url, exc):
heading = html.heading(
'<big><big><strong>Error</strong></big></big>',
'#ffffff', '#7799ee')
contents = '<br>'.join(html.escape(line) for line in
format_exception_only(type(exc), exc))
contents = heading + html.bigsection(url, '#ffffff', '#bb0000',
contents)
return "Error - %s" % url, contents
def get_html_page(url):
"""Generate an HTML page for url."""
complete_url = url
if url.endswith('.html'):
url = url[:-5]
try:
if url in ("", "index"):
title, content = html_index()
elif url == "topics":
title, content = html_topics()
elif url == "keywords":
title, content = html_keywords()
elif '=' in url:
op, _, url = url.partition('=')
if op == "search?key":
title, content = html_search(url)
elif op == "getfile?key":
title, content = html_getfile(url)
elif op == "topic?key":
# try topics first, then objects.
try:
title, content = html_topicpage(url)
except ValueError:
title, content = html_getobj(url)
elif op == "get?key":
# try objects first, then topics.
if url in ("", "index"):
title, content = html_index()
else:
try:
title, content = html_getobj(url)
except ValueError:
title, content = html_topicpage(url)
else:
raise ValueError('bad pydoc url')
else:
title, content = html_getobj(url)
except Exception as exc:
# Catch any errors and display them in an error page.
title, content = html_error(complete_url, exc)
return html.page(title, content)
if url.startswith('/'):
url = url[1:]
if content_type == 'text/css':
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, url)
with open(css_path) as fp:
return ''.join(fp.readlines())
elif content_type == 'text/html':
return get_html_page(url)
# Errors outside the url handler are caught by the server.
raise TypeError('unknown content type %r for url %s' % (content_type, url))
def browse(port=0, *, open_browser=True):
"""Start the enhanced pydoc Web server and open a Web browser.
Use port '0' to start the server on an arbitrary port.
Set open_browser to False to suppress opening a browser.
"""
import webbrowser
serverthread = _start_server(_url_handler, port)
if serverthread.error:
print(serverthread.error)
return
if serverthread.serving:
server_help_msg = 'Server commands: [b]rowser, [q]uit'
if open_browser:
webbrowser.open(serverthread.url)
try:
print('Server ready at', serverthread.url)
print(server_help_msg)
while serverthread.serving:
cmd = input('server> ')
cmd = cmd.lower()
if cmd == 'q':
break
elif cmd == 'b':
webbrowser.open(serverthread.url)
else:
print(server_help_msg)
except (KeyboardInterrupt, EOFError):
print()
finally:
if serverthread.serving:
serverthread.stop()
print('Server stopped')
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and x.find(os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage(Exception): pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'bk:p:w')
writing = False
start_server = False
open_browser = False
port = None
for opt, val in opts:
if opt == '-b':
start_server = True
open_browser = True
if opt == '-k':
apropos(val)
return
if opt == '-p':
start_server = True
port = val
if opt == '-w':
writing = True
if start_server:
if port is None:
port = 0
browse(port, open_browser=open_browser)
return
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print('file %r does not exist' % arg)
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport as value:
print(value)
except (getopt.error, BadUsage):
cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print("""pydoc - the Python documentation tool
{cmd} <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '{sep}', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
{cmd} -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
{cmd} -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
{cmd} -b
Start an HTTP server on an arbitrary unused port and open a Web browser
to interactively browse documentation. The -p option can be used with
the -b option to explicitly specify the server port.
{cmd} -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '{sep}', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""".format(cmd=cmd, sep=os.sep))
if __name__ == '__main__':
cli()
|
shawncaojob/LC | refs/heads/master | QUESTIONS/146_lru_cache.py | 1 | # 146. LRU Cache Add to List QuestionEditorial Solution My Submissions
# Total Accepted: 101493 Total Submissions: 636611 Difficulty: Hard Contributors: Admin
# Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and set.
#
# get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
# set(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item.
#
# Follow up:
# Could you do both operations in O(1) time complexity?
#
# Example:
#
# LRUCache cache = new LRUCache( 2 /* capacity */ );
#
# cache.put(1, 1);
# cache.put(2, 2);
# cache.get(1); // returns 1
# cache.put(3, 3); // evicts key 2
# cache.get(2); // returns -1 (not found)
# cache.put(4, 4); // evicts key 1
# cache.get(1); // returns -1 (not found)
# cache.get(3); // returns 3
# cache.get(4); // returns 4
# Subscribe to see which companies asked this question
#
#
# 2017.02.24 Rewrite
# 1. Use dd for o(1). Initiate ddHead(LeastRecent), ddTail(MostRecent), mp{}, cap
# 2. Get(). If in map update node to Tail and not in map.
# 3. Put().
# 3.1 If in map update value and move tail.
# 3.2 If not in map. If max cap, remove head.next first, add to tail
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
def get(self, key):
"""
:type key: int
:rtype: int
"""
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if __name__ == "__main__":
node1 = DLinkedNode("A", "A")
node2 = DLinkedNode("B", "B")
node1.post = node2
node2.pre = node1
node3 = DLinkedNode("C", "C")
node4 = DLinkedNode("D", "D")
node1.add(node3)
node1.add(node4)
# node3.remove()
# node4.remove()
# print(node2.value)
# node2.popTail()
# node3.popTail()
node1.moveToHead(node3)
cur = node1
while cur:
print(cur.key, cur.value, cur, cur.pre, cur.post)
cur = cur.post
|
rosmo/boto | refs/heads/develop | boto/ec2/elb/securitygroup.py | 152 | # Copyright (c) 2010 Reza Lotun http://reza.lotun.name
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class SecurityGroup(object):
def __init__(self, connection=None):
self.name = None
self.owner_alias = None
def __repr__(self):
return 'SecurityGroup(%s, %s)' % (self.name, self.owner_alias)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'GroupName':
self.name = value
elif name == 'OwnerAlias':
self.owner_alias = value
|
tanmaykm/edx-platform | refs/heads/master | lms/djangoapps/bulk_email/models.py | 9 | """
Models for bulk email
"""
import logging
import markupsafe
from django.contrib.auth.models import User
from django.db import models
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.course_groups.cohorts import get_cohort_by_name
from openedx.core.lib.html_to_text import html_to_text
from openedx.core.lib.mail_utils import wrap_message
from config_models.models import ConfigurationModel
from student.roles import CourseStaffRole, CourseInstructorRole
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
from util.keyword_substitution import substitute_keywords_with_data
from util.query import use_read_replica_if_available
log = logging.getLogger(__name__)
class Email(models.Model):
"""
Abstract base class for common information for an email.
"""
sender = models.ForeignKey(User, default=1, blank=True, null=True)
slug = models.CharField(max_length=128, db_index=True)
subject = models.CharField(max_length=128, blank=True)
html_message = models.TextField(null=True, blank=True)
text_message = models.TextField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta(object):
app_label = "bulk_email"
abstract = True
# Bulk email targets - the send to options that users can select from when they send email.
SEND_TO_MYSELF = 'myself'
SEND_TO_STAFF = 'staff'
SEND_TO_LEARNERS = 'learners'
SEND_TO_COHORT = 'cohort'
EMAIL_TARGET_CHOICES = zip(
[SEND_TO_MYSELF, SEND_TO_STAFF, SEND_TO_LEARNERS, SEND_TO_COHORT],
['Myself', 'Staff and instructors', 'All students', 'Specific cohort']
)
EMAIL_TARGETS = {target[0] for target in EMAIL_TARGET_CHOICES}
class Target(models.Model):
"""
A way to refer to a particular group (within a course) as a "Send to:" target.
Django hackery in this class - polymorphism does not work well in django, for reasons relating to how
each class is represented by its own database table. Due to this, we can't just override
methods of Target in CohortTarget and get the child method, as one would expect. The
workaround is to check to see that a given target is a CohortTarget (self.target_type ==
SEND_TO_COHORT), then explicitly call the method on self.cohorttarget, which is created
by django as part of this inheritance setup. These calls require pylint disable no-member in
several locations in this class.
"""
target_type = models.CharField(max_length=64, choices=EMAIL_TARGET_CHOICES)
class Meta(object):
app_label = "bulk_email"
def __unicode__(self):
return "CourseEmail Target: {}".format(self.short_display())
def short_display(self):
"""
Returns a short display name
"""
if self.target_type == SEND_TO_COHORT:
return self.cohorttarget.short_display() # pylint: disable=no-member
else:
return self.target_type
def long_display(self):
"""
Returns a long display name
"""
if self.target_type == SEND_TO_COHORT:
return self.cohorttarget.long_display() # pylint: disable=no-member
else:
return self.get_target_type_display()
def get_users(self, course_id, user_id=None):
"""
Gets the users for a given target.
Result is returned in the form of a queryset, and may contain duplicates.
"""
staff_qset = CourseStaffRole(course_id).users_with_role()
instructor_qset = CourseInstructorRole(course_id).users_with_role()
staff_instructor_qset = (staff_qset | instructor_qset)
enrollment_qset = User.objects.filter(
is_active=True,
courseenrollment__course_id=course_id,
courseenrollment__is_active=True
)
if self.target_type == SEND_TO_MYSELF:
if user_id is None:
raise ValueError("Must define self user to send email to self.")
user = User.objects.filter(id=user_id)
return use_read_replica_if_available(user)
elif self.target_type == SEND_TO_STAFF:
return use_read_replica_if_available(staff_instructor_qset)
elif self.target_type == SEND_TO_LEARNERS:
return use_read_replica_if_available(enrollment_qset.exclude(id__in=staff_instructor_qset))
elif self.target_type == SEND_TO_COHORT:
return self.cohorttarget.cohort.users.filter(id__in=enrollment_qset) # pylint: disable=no-member
else:
raise ValueError("Unrecognized target type {}".format(self.target_type))
class CohortTarget(Target):
"""
Subclass of Target, specifically referring to a cohort.
"""
cohort = models.ForeignKey('course_groups.CourseUserGroup')
class Meta:
app_label = "bulk_email"
def __init__(self, *args, **kwargs):
kwargs['target_type'] = SEND_TO_COHORT
super(CohortTarget, self).__init__(*args, **kwargs)
def short_display(self):
return "{}-{}".format(self.target_type, self.cohort.name)
def long_display(self):
return "Cohort: {}".format(self.cohort.name)
@classmethod
def ensure_valid_cohort(cls, cohort_name, course_id):
"""
Ensures cohort_name is a valid cohort for course_id.
Returns the cohort if valid, raises an error otherwise.
"""
if cohort_name is None:
raise ValueError("Cannot create a CohortTarget without specifying a cohort_name.")
try:
cohort = get_cohort_by_name(name=cohort_name, course_key=course_id)
except CourseUserGroup.DoesNotExist:
raise ValueError(
"Cohort {cohort} does not exist in course {course_id}".format(
cohort=cohort_name,
course_id=course_id
)
)
return cohort
class CourseEmail(Email):
"""
Stores information for an email to a course.
"""
class Meta(object):
app_label = "bulk_email"
course_id = CourseKeyField(max_length=255, db_index=True)
# to_option is deprecated and unused, but dropping db columns is hard so it's still here for legacy reasons
to_option = models.CharField(max_length=64, choices=[("deprecated", "deprecated")])
targets = models.ManyToManyField(Target)
template_name = models.CharField(null=True, max_length=255)
from_addr = models.CharField(null=True, max_length=255)
def __unicode__(self):
return self.subject
@classmethod
def create(
cls, course_id, sender, targets, subject, html_message,
text_message=None, template_name=None, from_addr=None, cohort_name=None):
"""
Create an instance of CourseEmail.
"""
# automatically generate the stripped version of the text from the HTML markup:
if text_message is None:
text_message = html_to_text(html_message)
new_targets = []
for target in targets:
# split target, to handle cohort:cohort_name
target_split = target.split(':', 1)
# Ensure our desired target exists
if target_split[0] not in EMAIL_TARGETS:
fmt = 'Course email being sent to unrecognized target: "{target}" for "{course}", subject "{subject}"'
msg = fmt.format(target=target, course=course_id, subject=subject)
raise ValueError(msg)
elif target_split[0] == SEND_TO_COHORT:
# target_split[1] will contain the cohort name
cohort = CohortTarget.ensure_valid_cohort(target_split[1], course_id)
new_target, _ = CohortTarget.objects.get_or_create(target_type=target_split[0], cohort=cohort)
else:
new_target, _ = Target.objects.get_or_create(target_type=target_split[0])
new_targets.append(new_target)
# create the task, then save it immediately:
course_email = cls(
course_id=course_id,
sender=sender,
subject=subject,
html_message=html_message,
text_message=text_message,
template_name=template_name,
from_addr=from_addr,
)
course_email.save() # Must exist in db before setting M2M relationship values
course_email.targets.add(*new_targets)
course_email.save()
return course_email
def get_template(self):
"""
Returns the corresponding CourseEmailTemplate for this CourseEmail.
"""
return CourseEmailTemplate.get_template(name=self.template_name)
class Optout(models.Model):
"""
Stores users that have opted out of receiving emails from a course.
"""
# Allowing null=True to support data migration from email->user.
# We need to first create the 'user' column with some sort of default in order to run the data migration,
# and given the unique index, 'null' is the best default value.
user = models.ForeignKey(User, db_index=True, null=True)
course_id = CourseKeyField(max_length=255, db_index=True)
class Meta(object):
app_label = "bulk_email"
unique_together = ('user', 'course_id')
# Defines the tag that must appear in a template, to indicate
# the location where the email message body is to be inserted.
COURSE_EMAIL_MESSAGE_BODY_TAG = '{{message_body}}'
class CourseEmailTemplate(models.Model):
"""
Stores templates for all emails to a course to use.
This is expected to be a singleton, to be shared across all courses.
Initialization takes place in a migration that in turn loads a fixture.
The admin console interface disables add and delete operations.
Validation is handled in the CourseEmailTemplateForm class.
"""
class Meta(object):
app_label = "bulk_email"
html_template = models.TextField(null=True, blank=True)
plain_template = models.TextField(null=True, blank=True)
name = models.CharField(null=True, max_length=255, unique=True, blank=True)
@staticmethod
def get_template(name=None):
"""
Fetch the current template
If one isn't stored, an exception is thrown.
"""
try:
return CourseEmailTemplate.objects.get(name=name)
except CourseEmailTemplate.DoesNotExist:
log.exception("Attempting to fetch a non-existent course email template")
raise
@staticmethod
def _render(format_string, message_body, context):
"""
Create a text message using a template, message body and context.
Convert message body (`message_body`) into an email message
using the provided template. The template is a format string,
which is rendered using format() with the provided `context` dict.
Any keywords encoded in the form %%KEYWORD%% found in the message
body are substituted with user data before the body is inserted into
the template.
Output is returned as a unicode string. It is not encoded as utf-8.
Such encoding is left to the email code, which will use the value
of settings.DEFAULT_CHARSET to encode the message.
"""
# Substitute all %%-encoded keywords in the message body
if 'user_id' in context and 'course_id' in context:
message_body = substitute_keywords_with_data(message_body, context)
result = format_string.format(**context)
# Note that the body tag in the template will now have been
# "formatted", so we need to do the same to the tag being
# searched for.
message_body_tag = COURSE_EMAIL_MESSAGE_BODY_TAG.format()
result = result.replace(message_body_tag, message_body, 1)
# finally, return the result, after wrapping long lines and without converting to an encoded byte array.
return wrap_message(result)
def render_plaintext(self, plaintext, context):
"""
Create plain text message.
Convert plain text body (`plaintext`) into plaintext email message using the
stored plain template and the provided `context` dict.
"""
return CourseEmailTemplate._render(self.plain_template, plaintext, context)
def render_htmltext(self, htmltext, context):
"""
Create HTML text message.
Convert HTML text body (`htmltext`) into HTML email message using the
stored HTML template and the provided `context` dict.
"""
# HTML-escape string values in the context (used for keyword substitution).
for key, value in context.iteritems():
if isinstance(value, basestring):
context[key] = markupsafe.escape(value)
return CourseEmailTemplate._render(self.html_template, htmltext, context)
class CourseAuthorization(models.Model):
"""
Enable the course email feature on a course-by-course basis.
"""
class Meta(object):
app_label = "bulk_email"
# The course that these features are attached to.
course_id = CourseKeyField(max_length=255, db_index=True, unique=True)
# Whether or not to enable instructor email
email_enabled = models.BooleanField(default=False)
@classmethod
def instructor_email_enabled(cls, course_id):
"""
Returns whether or not email is enabled for the given course id.
"""
try:
record = cls.objects.get(course_id=course_id)
return record.email_enabled
except cls.DoesNotExist:
return False
def __unicode__(self):
not_en = "Not "
if self.email_enabled:
not_en = ""
# pylint: disable=no-member
return u"Course '{}': Instructor Email {}Enabled".format(self.course_id.to_deprecated_string(), not_en)
class BulkEmailFlag(ConfigurationModel):
"""
Enables site-wide configuration for the bulk_email feature.
Staff can only send bulk email for a course if all the following conditions are true:
1. BulkEmailFlag is enabled.
2. Course-specific authorization not required, or course authorized to use bulk email.
"""
# boolean field 'enabled' inherited from parent ConfigurationModel
require_course_email_auth = models.BooleanField(default=True)
@classmethod
def feature_enabled(cls, course_id=None):
"""
Looks at the currently active configuration model to determine whether the bulk email feature is available.
If the flag is not enabled, the feature is not available.
If the flag is enabled, course-specific authorization is required, and the course_id is either not provided
or not authorixed, the feature is not available.
If the flag is enabled, course-specific authorization is required, and the provided course_id is authorized,
the feature is available.
If the flag is enabled and course-specific authorization is not required, the feature is available.
"""
if not BulkEmailFlag.is_enabled():
return False
elif BulkEmailFlag.current().require_course_email_auth:
if course_id is None:
return False
else:
return CourseAuthorization.instructor_email_enabled(course_id)
else: # implies enabled == True and require_course_email == False, so email is globally enabled
return True
class Meta(object):
app_label = "bulk_email"
def __unicode__(self):
current_model = BulkEmailFlag.current()
return u"BulkEmailFlag: enabled {}, require_course_email_auth: {}".format(
current_model.is_enabled(),
current_model.require_course_email_auth
)
|
BigBrother1984/android_external_chromium_org | refs/heads/kitkat | tools/telemetry/telemetry/core/platform/profiler/iprofiler_profiler.py | 23 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import signal
import sys
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.core.platform import profiler
# pexpect is not available on all platforms so use the third_party version.
sys.path.append(os.path.join(
util.GetChromiumSrcDir(), 'third_party', 'pexpect'))
try:
import pexpect # pylint: disable=F0401
except ImportError:
pass
class _SingleProcessIprofilerProfiler(object):
"""An internal class for using iprofiler for a given process."""
def __init__(self, pid, output_path):
self._output_path = output_path
output_dir = os.path.dirname(self._output_path)
output_file = os.path.basename(self._output_path)
self._proc = pexpect.spawn(
'iprofiler', ['-timeprofiler', '-T', '300', '-a', str(pid),
'-d', output_dir, '-o', output_file],
timeout=300)
while True:
if self._proc.getecho():
output = self._proc.readline().strip()
if not output:
continue
if 'iprofiler: Profiling process' in output:
break
print output
self._proc.interact(escape_character='\x0d')
if 'Failed to authorize rights' in output:
raise exceptions.ProfilingException(
'Failed to authorize rights for iprofiler\n')
if 'iprofiler error' in output:
raise exceptions.ProfilingException(
'Failed to start iprofiler for process %s\n' %
self._output_path.split('.')[1])
self._proc.write('\x0d')
print
def Echo():
return self._proc.getecho()
util.WaitFor(Echo, timeout=5)
def CollectProfile(self):
self._proc.kill(signal.SIGINT)
try:
self._proc.wait()
except pexpect.ExceptionPexpect:
pass
finally:
self._proc = None
print 'To view the profile, run:'
print ' open -a Instruments %s.dtps' % self._output_path
return self._output_path
class IprofilerProfiler(profiler.Profiler):
def __init__(self, browser_backend, platform_backend, output_path):
super(IprofilerProfiler, self).__init__(
browser_backend, platform_backend, output_path)
process_output_file_map = self._GetProcessOutputFileMap()
self._process_profilers = []
for pid, output_file in process_output_file_map.iteritems():
if '.utility' in output_file:
# The utility process may not have been started by Telemetry.
# So we won't have permissing to profile it
continue
self._process_profilers.append(
_SingleProcessIprofilerProfiler(pid, output_file))
@classmethod
def name(cls):
return 'iprofiler'
@classmethod
def is_supported(cls, options):
if sys.platform != 'darwin':
return False
if not options:
return True
return (not options.browser_type.startswith('android') and
not options.browser_type.startswith('cros'))
def CollectProfile(self):
output_files = []
for single_process in self._process_profilers:
output_files.append(single_process.CollectProfile())
return output_files
|
amrdraz/kodr | refs/heads/master | app/brython/www/src/Lib/binascii.py | 8 | """A pure Python implementation of binascii.
Rather slow and buggy in corner cases.
PyPy provides an RPython version too.
"""
# borrowed from https://bitbucket.org/pypy/pypy/src/f2bf94943a41/lib_pypy/binascii.py
class Error(Exception):
pass
class Done(Exception):
pass
class Incomplete(Exception):
pass
def a2b_uu(s):
if not s:
return ''
length = (ord(s[0]) - 0x20) % 64
def quadruplets_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
except IndexError:
s += ' '
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
return
s = s[4:]
try:
result = [''.join(
[chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)),
chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f))
]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())]
except ValueError:
raise Error('Illegal char')
result = ''.join(result)
trailingdata = result[length:]
if trailingdata.strip('\x00'):
raise Error('Trailing garbage')
result = result[:length]
if len(result) < length:
result += ((length - len(result)) * '\x00')
return bytes(result, __BRYTHON__.charset)
def b2a_uu(s):
length = len(s)
if length > 45:
raise Error('At most 45 bytes at once')
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
s += '\0\0'
yield ord(s[0]), ord(s[1]), ord(s[2])
return
s = s[3:]
result = [''.join(
[chr(0x20 + (( A >> 2 ) & 0x3F)),
chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
chr(0x20 + (( C ) & 0x3F))])
for A, B, C in triples_gen(s)]
return chr(ord(' ') + (length & 0o77)) + ''.join(result) + '\n'
table_a2b_base64 = {
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5,
'G': 6,
'H': 7,
'I': 8,
'J': 9,
'K': 10,
'L': 11,
'M': 12,
'N': 13,
'O': 14,
'P': 15,
'Q': 16,
'R': 17,
'S': 18,
'T': 19,
'U': 20,
'V': 21,
'W': 22,
'X': 23,
'Y': 24,
'Z': 25,
'a': 26,
'b': 27,
'c': 28,
'd': 29,
'e': 30,
'f': 31,
'g': 32,
'h': 33,
'i': 34,
'j': 35,
'k': 36,
'l': 37,
'm': 38,
'n': 39,
'o': 40,
'p': 41,
'q': 42,
'r': 43,
's': 44,
't': 45,
'u': 46,
'v': 47,
'w': 48,
'x': 49,
'y': 50,
'z': 51,
'0': 52,
'1': 53,
'2': 54,
'3': 55,
'4': 56,
'5': 57,
'6': 58,
'7': 59,
'8': 60,
'9': 61,
'+': 62,
'/': 63,
'=': 0,
}
def a2b_base64(s):
if not isinstance(s, (str, bytes)):
raise TypeError("expected string, got %r" % (s,))
s = s.rstrip()
# clean out all invalid characters, this also strips the final '=' padding
# check for correct padding
def next_valid_char(s, pos):
for i in range(pos + 1, len(s)):
c = s[i]
if c < '\x7f':
try:
table_a2b_base64[c]
return c
except KeyError:
pass
return None
quad_pos = 0
leftbits = 0
leftchar = 0
res = []
for i, c in enumerate(s):
if isinstance(c, int):
c = chr(c)
if c > '\x7f' or c == '\n' or c == '\r' or c == ' ':
continue
if c == '=':
if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='):
continue
else:
leftbits = 0
break
try:
next_c = table_a2b_base64[c]
except KeyError:
continue
quad_pos = (quad_pos + 1) & 0x03
leftchar = (leftchar << 6) | next_c
leftbits += 6
if leftbits >= 8:
leftbits -= 8
res.append((leftchar >> leftbits & 0xff))
leftchar &= ((1 << leftbits) - 1)
if leftbits != 0:
raise Error('Incorrect padding')
return bytes(''.join([chr(i) for i in res]),__BRYTHON__.charset)
table_b2a_base64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"\
"0123456789+/"
def b2a_base64(s):
length = len(s)
final_length = length % 3
def triples_gen(s):
while s:
try:
yield s[0], s[1], s[2]
except IndexError:
s += b'\0\0'
yield s[0], s[1], s[2]
return
s = s[3:]
a = triples_gen(s[ :length - final_length])
result = [''.join(
[table_b2a_base64[( A >> 2 ) & 0x3F],
table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
table_b2a_base64[( C ) & 0x3F]])
for A, B, C in a]
final = s[length - final_length:]
if final_length == 0:
snippet = ''
elif final_length == 1:
a = final[0]
snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
table_b2a_base64[(a << 4 ) & 0x3F] + '=='
else:
a = final[0]
b = final[1]
snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
table_b2a_base64[(b << 2) & 0x3F] + '='
return bytes(''.join(result) + snippet + '\n', __BRYTHON__.charset)
def a2b_qp(s, header=False):
inp = 0
odata = []
while inp < len(s):
if s[inp] == '=':
inp += 1
if inp >= len(s):
break
# Soft line breaks
if (s[inp] == '\n') or (s[inp] == '\r'):
if s[inp] != '\n':
while inp < len(s) and s[inp] != '\n':
inp += 1
if inp < len(s):
inp += 1
elif s[inp] == '=':
# broken case from broken python qp
odata.append('=')
inp += 1
elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers:
ch = chr(int(s[inp:inp+2], 16))
inp += 2
odata.append(ch)
else:
odata.append('=')
elif header and s[inp] == '_':
odata.append(' ')
inp += 1
else:
odata.append(s[inp])
inp += 1
return bytes(''.join(odata), __BRYTHON__.charset)
def b2a_qp(data, quotetabs=False, istext=True, header=False):
"""quotetabs=True means that tab and space characters are always
quoted.
istext=False means that \r and \n are treated as regular characters
header=True encodes space characters with '_' and requires
real '_' characters to be quoted.
"""
MAXLINESIZE = 76
# See if this string is using CRLF line ends
lf = data.find('\n')
crlf = lf > 0 and data[lf-1] == '\r'
inp = 0
linelen = 0
odata = []
while inp < len(data):
c = data[inp]
if (c > '~' or
c == '=' or
(header and c == '_') or
(c == '.' and linelen == 0 and (inp+1 == len(data) or
data[inp+1] == '\n' or
data[inp+1] == '\r')) or
(not istext and (c == '\r' or c == '\n')) or
((c == '\t' or c == ' ') and (inp + 1 == len(data))) or
(c <= ' ' and c != '\r' and c != '\n' and
(quotetabs or (not quotetabs and (c != '\t' and c != ' '))))):
linelen += 3
if linelen >= MAXLINESIZE:
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 3
odata.append('=' + two_hex_digits(ord(c)))
inp += 1
else:
if (istext and
(c == '\n' or (inp+1 < len(data) and c == '\r' and
data[inp+1] == '\n'))):
linelen = 0
# Protect against whitespace on end of line
if (len(odata) > 0 and
(odata[-1] == ' ' or odata[-1] == '\t')):
ch = ord(odata[-1])
odata[-1] = '='
odata.append(two_hex_digits(ch))
if crlf: odata.append('\r')
odata.append('\n')
if c == '\r':
inp += 2
else:
inp += 1
else:
if (inp + 1 < len(data) and
data[inp+1] != '\n' and
(linelen + 1) >= MAXLINESIZE):
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 0
linelen += 1
if header and c == ' ':
c = '_'
odata.append(c)
inp += 1
return ''.join(odata)
hex_numbers = '0123456789ABCDEF'
def hex(n):
if n == 0:
return '0'
if n < 0:
n = -n
sign = '-'
else:
sign = ''
arr = []
def hex_gen(n):
""" Yield a nibble at a time. """
while n:
yield n % 0x10
n = n / 0x10
for nibble in hex_gen(n):
arr = [hex_numbers[nibble]] + arr
return sign + ''.join(arr)
def two_hex_digits(n):
return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
def strhex_to_int(s):
i = 0
for c in s:
i = i * 0x10 + hex_numbers.index(c)
return i
hqx_encoding = '!"#$%&\'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr'
DONE = 0x7f
SKIP = 0x7e
FAIL = 0x7d
table_a2b_hqx = [
#^@ ^A ^B ^C ^D ^E ^F ^G
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#\b \t \n ^K ^L \r ^N ^O
FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
#^P ^Q ^R ^S ^T ^U ^V ^W
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#^X ^Y ^Z ^[ ^\ ^] ^^ ^_
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
# ! " # $ % & '
FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
#( ) * + , - . /
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
#0 1 2 3 4 5 6 7
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
#8 9 : ; < = > ?
0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
#@ A B C D E F G
0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
#H I J K L M N O
0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
#P Q R S T U V W
0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
#X Y Z [ \ ] ^ _
0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
#` a b c d e f g
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
#h i j k l m n o
0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
#p q r s t u v w
0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
#x y z { | } ~ ^?
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
]
def a2b_hqx(s):
result = []
def quadruples_gen(s):
t = []
for c in s:
res = table_a2b_hqx[ord(c)]
if res == SKIP:
continue
elif res == FAIL:
raise Error('Illegal character')
elif res == DONE:
yield t
raise Done
else:
t.append(res)
if len(t) == 4:
yield t
t = []
yield t
done = 0
try:
for snippet in quadruples_gen(s):
length = len(snippet)
if length == 4:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3])))
elif length == 3:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
elif length == 2:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
except Done:
done = 1
except Error:
raise
return (''.join(result), done)
# should this return a bytes object?
#return (bytes(''.join(result), __BRYTHON__.charset), done)
def b2a_hqx(s):
result =[]
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
yield tuple([ord(c) for c in s])
s = s[3:]
for snippet in triples_gen(s):
length = len(snippet)
if length == 3:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)])
result.append(hqx_encoding[snippet[2] & 0x3f])
elif length == 2:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2])
elif length == 1:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4)])
return ''.join(result)
crctab_hqx = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
]
def crc_hqx(s, crc):
for c in s:
crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
return crc
def rlecode_hqx(s):
"""
Run length encoding for binhex4.
The CPython implementation does not do run length encoding
of \x90 characters. This implementation does.
"""
if not s:
return ''
result = []
prev = s[0]
count = 1
# Add a dummy character to get the loop to go one extra round.
# The dummy must be different from the last character of s.
# In the same step we remove the first character, which has
# already been stored in prev.
if s[-1] == '!':
s = s[1:] + '?'
else:
s = s[1:] + '!'
for c in s:
if c == prev and count < 255:
count += 1
else:
if count == 1:
if prev != '\x90':
result.append(prev)
else:
result.extend(['\x90', '\x00'])
elif count < 4:
if prev != '\x90':
result.extend([prev] * count)
else:
result.extend(['\x90', '\x00'] * count)
else:
if prev != '\x90':
result.extend([prev, '\x90', chr(count)])
else:
result.extend(['\x90', '\x00', '\x90', chr(count)])
count = 1
prev = c
return ''.join(result)
def rledecode_hqx(s):
s = s.split('\x90')
result = [s[0]]
prev = s[0]
for snippet in s[1:]:
count = ord(snippet[0])
if count > 0:
result.append(prev[-1] * (count-1))
prev = snippet
else:
result.append('\x90')
prev = '\x90'
result.append(snippet[1:])
return ''.join(result)
crc_32_tab = [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
]
def crc32(s, crc=0):
result = 0
crc = ~int(crc) & 0xffffffff
#crc = ~long(crc) & 0xffffffffL
for c in s:
crc = crc_32_tab[(crc ^ int(ord(c))) & 0xff] ^ (crc >> 8)
#crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8)
#/* Note: (crc >> 8) MUST zero fill on left
result = crc ^ 0xffffffff
if result > 2**31:
result = ((result + 2**31) % 2**32) - 2**31
return result
def b2a_hex(s):
result = []
for char in s:
c = (ord(char) >> 4) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
c = ord(char) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
return ''.join(result)
hexlify = b2a_hex
table_hex = [
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1
]
def a2b_hex(t):
result = []
def pairs_gen(s):
while s:
try:
yield table_hex[ord(s[0])], table_hex[ord(s[1])]
except IndexError:
if len(s):
raise TypeError('Odd-length string')
return
s = s[2:]
for a, b in pairs_gen(t):
if a < 0 or b < 0:
raise TypeError('Non-hexadecimal digit found')
result.append(chr((a << 4) + b))
return bytes(''.join(result), __BRYTHON__.charset)
unhexlify = a2b_hex
|
ChenglongChen/topical_word_embeddings | refs/heads/master | TWE-2/gensim/models/tfidfmodel.py | 59 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
import logging
import math
from gensim import interfaces, matutils, utils
from six import iteritems
logger = logging.getLogger('gensim.models.tfidfmodel')
def df2idf(docfreq, totaldocs, log_base=2.0, add=0.0):
"""
Compute default inverse-document-frequency for a term with document frequency `doc_freq`::
idf = add + log(totaldocs / doc_freq)
"""
return add + math.log(1.0 * totaldocs / docfreq, log_base)
def precompute_idfs(wglobal, dfs, total_docs):
"""Precompute the inverse document frequency mapping for all terms."""
# not strictly necessary and could be computed on the fly in TfidfModel__getitem__.
# this method is here just to speed things up a little.
return dict((termid, wglobal(df, total_docs))
for termid, df in iteritems(dfs))
class TfidfModel(interfaces.TransformationABC):
"""
Objects of this class realize the transformation between word-document co-occurrence
matrix (integers) into a locally/globally weighted TF_IDF matrix (positive floats).
The main methods are:
1. constructor, which calculates inverse document counts for all terms in the training corpus.
2. the [] method, which transforms a simple count representation into the TfIdf
space.
>>> tfidf = TfidfModel(corpus)
>>> print(tfidf[some_doc])
>>> tfidf.save('/tmp/foo.tfidf_model')
Model persistency is achieved via its load/save methods.
"""
def __init__(self, corpus=None, id2word=None, dictionary=None,
wlocal=utils.identity, wglobal=df2idf, normalize=True):
"""
Compute tf-idf by multiplying a local component (term frequency) with a
global component (inverse document frequency), and normalizing
the resulting documents to unit length. Formula for unnormalized weight
of term `i` in document `j` in a corpus of D documents::
weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})
or, more generally::
weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)
so you can plug in your own custom `wlocal` and `wglobal` functions.
Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
formula above.
`normalize` dictates how the final transformed vectors will be normalized.
`normalize=True` means set to unit length (default); `False` means don't
normalize. You can also set `normalize` to your own function that accepts
and returns a sparse vector.
If `dictionary` is specified, it must be a `corpora.Dictionary` object
and it will be used to directly construct the inverse document frequency
mapping (then `corpus`, if specified, is ignored).
"""
self.normalize = normalize
self.id2word = id2word
self.wlocal, self.wglobal = wlocal, wglobal
self.num_docs, self.num_nnz, self.idfs = None, None, None
if dictionary is not None:
# user supplied a Dictionary object, which already contains all the
# statistics we need to construct the IDF mapping. we can skip the
# step that goes through the corpus (= an optimization).
if corpus is not None:
logger.warning("constructor received both corpus and explicit "
"inverse document frequencies; ignoring the corpus")
self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
self.dfs = dictionary.dfs.copy()
self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
elif corpus is not None:
self.initialize(corpus)
else:
# NOTE: everything is left uninitialized; presumably the model will
# be initialized in some other way
pass
def __str__(self):
return "TfidfModel(num_docs=%s, num_nnz=%s)" % (self.num_docs, self.num_nnz)
def initialize(self, corpus):
"""
Compute inverse document weights, which will be used to modify term
frequencies for documents.
"""
logger.info("collecting document frequencies")
dfs = {}
numnnz, docno = 0, -1
for docno, bow in enumerate(corpus):
if docno % 10000 == 0:
logger.info("PROGRESS: processing document #%i" % docno)
numnnz += len(bow)
for termid, _ in bow:
dfs[termid] = dfs.get(termid, 0) + 1
# keep some stats about the training corpus
self.num_docs = docno + 1
self.num_nnz = numnnz
self.dfs = dfs
# and finally compute the idf weights
n_features = max(dfs) if dfs else 0
logger.info("calculating IDF weights for %i documents and %i features (%i matrix non-zeros)" %
(self.num_docs, n_features, self.num_nnz))
self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
def __getitem__(self, bow, eps=1e-12):
"""
Return tf-idf representation of the input vector and/or corpus.
"""
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
# unknown (new) terms will be given zero weight (NOT infinity/huge weight,
# as strict application of the IDF formula would dictate)
vector = [(termid, self.wlocal(tf) * self.idfs.get(termid))
for termid, tf in bow if self.idfs.get(termid, 0.0) != 0.0]
# and finally, normalize the vector either to unit length, or use a
# user-defined normalization function
if self.normalize is True:
vector = matutils.unitvec(vector)
elif self.normalize:
vector = self.normalize(vector)
# make sure there are no explicit zeroes in the vector (must be sparse)
vector = [(termid, weight) for termid, weight in vector if abs(weight) > eps]
return vector
#endclass TfidfModel
|
Lujeni/ansible | refs/heads/devel | lib/ansible/modules/storage/netapp/na_elementsw_ldap.py | 44 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_ldap
short_description: NetApp Element Software Manage ldap admin users
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Enable, disable ldap, and add ldap users
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
authType:
description:
- Identifies which user authentication method to use.
choices: ['DirectBind', 'SearchAndBind']
groupSearchBaseDn:
description:
- The base DN of the tree to start the group search (will do a subtree search from here)
groupSearchType:
description:
- Controls the default group search filter used
choices: ['NoGroup', 'ActiveDirectory', 'MemberDN']
serverURIs:
description:
- A comma-separated list of LDAP server URIs
userSearchBaseDN:
description:
- The base DN of the tree to start the search (will do a subtree search from here)
searchBindDN:
description:
- A dully qualified DN to log in with to perform an LDAp search for the user (needs read access to the LDAP directory).
searchBindPassword:
description:
- The password for the searchBindDN account used for searching
userSearchFilter:
description:
- the LDAP Filter to use
userDNTemplate:
description:
- A string that is used form a fully qualified user DN.
groupSearchCustomFilter:
description:
- For use with the CustomFilter Search type
'''
EXAMPLES = """
- name: disable ldap authentication
na_elementsw_ldap:
state: absent
username: "{{ admin username }}"
password: "{{ admin password }}"
hostname: "{{ hostname }}"
- name: Enable ldap authentication
na_elementsw_ldap:
state: present
username: "{{ admin username }}"
password: "{{ admin password }}"
hostname: "{{ hostname }}"
authType: DirectBind
serverURIs: ldap://svmdurlabesx01spd_ldapclnt
groupSearchType: MemberDN
userDNTemplate: uid=%USERNAME%,cn=users,cn=accounts,dc=corp,dc="{{ company name }}",dc=com
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
try:
import solidfire.common
except Exception:
HAS_SF_SDK = False
class NetappElementLdap(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(
state=dict(type='str', required=True, choices=['absent', 'present']),
authType=dict(type='str', choices=['DirectBind', 'SearchAndBind']),
groupSearchBaseDn=dict(type='str'),
groupSearchType=dict(type='str', choices=['NoGroup', 'ActiveDirectory', 'MemberDN']),
serverURIs=dict(type='str'),
userSearchBaseDN=dict(type='str'),
searchBindDN=dict(type='str'),
searchBindPassword=dict(type='str', no_log=True),
userSearchFilter=dict(type='str'),
userDNTemplate=dict(type='str'),
groupSearchCustomFilter=dict(type='str'),
)
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True,
)
param = self.module.params
# set up state variables
self.state = param['state']
self.authType = param['authType']
self.groupSearchBaseDn = param['groupSearchBaseDn']
self.groupSearchType = param['groupSearchType']
self.serverURIs = param['serverURIs']
if self.serverURIs is not None:
self.serverURIs = self.serverURIs.split(',')
self.userSearchBaseDN = param['userSearchBaseDN']
self.searchBindDN = param['searchBindDN']
self.searchBindPassword = param['searchBindPassword']
self.userSearchFilter = param['userSearchFilter']
self.userDNTemplate = param['userDNTemplate']
self.groupSearchCustomFilter = param['groupSearchCustomFilter']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_ldap_configuration(self):
"""
Return ldap configuration if found
:return: Details about the ldap configuration. None if not found.
:rtype: solidfire.models.GetLdapConfigurationResult
"""
ldap_config = self.sfe.get_ldap_configuration()
return ldap_config
def enable_ldap(self):
"""
Enable LDAP
:return: nothing
"""
try:
self.sfe.enable_ldap_authentication(self.serverURIs, auth_type=self.authType,
group_search_base_dn=self.groupSearchBaseDn,
group_search_type=self.groupSearchType,
group_search_custom_filter=self.groupSearchCustomFilter,
search_bind_dn=self.searchBindDN,
search_bind_password=self.searchBindPassword,
user_search_base_dn=self.userSearchBaseDN,
user_search_filter=self.userSearchFilter,
user_dntemplate=self.userDNTemplate)
except solidfire.common.ApiServerError as error:
self.module.fail_json(msg='Error enabling LDAP %s: %s' % (self.account_id, to_native(error)),
exception=traceback.format_exc())
def check_config(self, ldap_config):
"""
Check to see if the ldap config has been modified.
:param ldap_config: The LDAP configuration
:return: False if the config is the same as the playbook, True if it is not
"""
if self.authType != ldap_config.ldap_configuration.auth_type:
return True
if self.serverURIs != ldap_config.ldap_configuration.server_uris:
return True
if self.groupSearchBaseDn != ldap_config.ldap_configuration.group_search_base_dn:
return True
if self.groupSearchType != ldap_config.ldap_configuration.group_search_type:
return True
if self.groupSearchCustomFilter != ldap_config.ldap_configuration.group_search_custom_filter:
return True
if self.searchBindDN != ldap_config.ldap_configuration.search_bind_dn:
return True
if self.searchBindPassword != ldap_config.ldap_configuration.search_bind_password:
return True
if self.userSearchBaseDN != ldap_config.ldap_configuration.user_search_base_dn:
return True
if self.userSearchFilter != ldap_config.ldap_configuration.user_search_filter:
return True
if self.userDNTemplate != ldap_config.ldap_configuration.user_dntemplate:
return True
return False
def apply(self):
changed = False
ldap_config = self.get_ldap_configuration()
if self.state == 'absent':
if ldap_config and ldap_config.ldap_configuration.enabled:
changed = True
if self.state == 'present' and self.check_config(ldap_config):
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
self.enable_ldap()
elif self.state == 'absent':
self.sfe.disable_ldap_authentication()
self.module.exit_json(changed=changed)
def main():
v = NetappElementLdap()
v.apply()
if __name__ == '__main__':
main()
|
jnerin/ansible | refs/heads/devel | lib/ansible/template/safe_eval.py | 39 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from ansible import constants as C
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import builtins
from ansible.plugins.loader import filter_loader, test_loader
def safe_eval(expr, locals=None, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained).
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
locals = {} if locals is None else locals
# define certain JSON types
# eg. JSON booleans are unknown to python eval()
JSON_TYPES = {
'false': False,
'null': None,
'true': True,
}
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
# ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.USub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if sys.version_info[:2] >= (2, 7):
SAFE_NODES.update(
set(
(ast.Set,)
)
)
# And in Python 3.4 too
if sys.version_info[:2] >= (3, 4):
SAFE_NODES.update(
set(
(ast.NameConstant,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
# Disallow calls to builtin functions that we have not vetted
# as safe. Other functions are excluded by setting locals in
# the call to eval() later on
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, string_types):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
# Note: passing our own globals and locals here constrains what
# callables (and other identifiers) are recognized. this is in
# addition to the filtering of builtins done in CleansingNodeVisitor
result = eval(compiled, JSON_TYPES, dict(locals))
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is to support late evaluation
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
|
suqinhuang/virt-test | refs/heads/master | libvirt/update_config.py | 15 | #!/usr/bin/python
"""
Populate/update config files for virt-test
@copyright: Red Hat 2013
"""
import os, sys
import common
from autotest.client.shared import logging_manager
from virttest import data_dir, bootstrap, utils_misc
test_dir = os.path.dirname(sys.modules[__name__].__file__)
test_dir = os.path.abspath(test_dir)
t_type = os.path.basename(test_dir)
shared_dir = os.path.join(data_dir.get_root_dir(), "shared")
if __name__ == "__main__":
import optparse
option_parser = optparse.OptionParser()
option_parser.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Exhibit debug messages")
options, args = option_parser.parse_args()
if options.verbose:
logging_manager.configure_logging(utils_misc.VirtLoggingConfig(),
verbose=options.verbose)
bootstrap.create_config_files(test_dir, shared_dir, interactive=False,
force_update=True)
bootstrap.create_subtests_cfg(t_type)
bootstrap.create_guest_os_cfg(t_type)
|
ZuluPro/libcloud | refs/heads/trunk | libcloud/__init__.py | 2 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libcloud provides a unified interface to the cloud computing resources.
:var __version__: Current version of libcloud
"""
import os
import codecs
from libcloud.base import DriverType # NOQA
from libcloud.base import DriverTypeFactoryMap # NOQA
from libcloud.base import get_driver # NOQA
try:
import paramiko
have_paramiko = True
except ImportError:
have_paramiko = False
__all__ = [
'__version__',
'enable_debug'
]
__version__ = '2.2.2dev'
def enable_debug(fo):
"""
Enable library wide debugging to a file-like object.
:param fo: Where to append debugging information
:type fo: File like object, only write operations are used.
"""
from libcloud.common.base import Connection
from libcloud.utils.loggingconnection import LoggingConnection
LoggingConnection.log = fo
Connection.conn_class = LoggingConnection
def _init_once():
"""
Utility function that is ran once on Library import.
This checks for the LIBCLOUD_DEBUG environment variable, which if it exists
is where we will log debug information about the provider transports.
"""
path = os.getenv('LIBCLOUD_DEBUG')
if path:
mode = 'a'
# Special case for /dev/stderr and /dev/stdout on Python 3.
from libcloud.utils.py3 import PY3
# Opening those files in append mode will throw "illegal seek"
# exception there.
# Late import to avoid setup.py related side affects
if path in ['/dev/stderr', '/dev/stdout'] and PY3:
mode = 'w'
fo = codecs.open(path, mode, encoding='utf8')
enable_debug(fo)
if have_paramiko:
paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG)
_init_once()
|
AymericBebert/MusicLearning | refs/heads/master | src/file_actions.py | 1 | #!/usr/bin/env python3
# -*-coding:utf-8-*-
"""
This module is used to read and write sound files
Structure of a sound object : sound[canal][sample] (type int list list)
"""
import os
import wave
from subprocess import call
import glob
import numpy as np
from pydub import AudioSegment
from log import writeLog
from timer import timer_start, timer_stop
## Fonctions de conversion entre les fichiers wav et les sounds (listes) ##
def display_params(soundParams):
"""Display some sound metadata"""
nbch, ss, sf, nbs = soundParams[:4]
writeLog("info", "Number of channels: {}".format(nbch), {"inFile": False})
writeLog("info", "Sample size in bytes: {}".format(ss), {"inFile": False})
writeLog("info", "Sampling frequency: {}".format(sf), {"inFile": False})
writeLog("info", "Number of samples: {}".format(nbs), {"inFile": False})
writeLog("info", "Duration : {:.1f}s".format(nbs/sf), {"inFile": False})
def extract_sound(soundFileLoc):
"""Extract the wav file soundFileLoc into a numpy array, shape (nbch, nbs)"""
timer_start("Extracting {}".format(soundFileLoc))
err_return = np.array([[]]), (0, 0, 0, 0, "NONE", "not compressed")
if os.path.splitext(soundFileLoc)[1].lower() in (".mp3", ".wav", ".au"):
try:
audiofile = AudioSegment.from_file(soundFileLoc)
except Exception:
writeLog("error", "File not found or other I/O error. (DECODING FAILED)")
return err_return
if audiofile.sample_width == 2:
data = np.fromstring(audiofile.raw_data, np.int16)
elif audiofile.sample_width == 4:
data = np.fromstring(audiofile.raw_data, np.int32)
else:
writeLog("error", "extract_sound(): sample_width is not 2 or 4")
return err_return
sf = audiofile.frame_rate
x = []
for chn in range(audiofile.channels):
x.append(data[chn::audiofile.channels])
x = np.array(x)
else:
writeLog("error", "readAudioFile(): Unknown file type!")
return err_return
timer_stop("Extracting {}".format(soundFileLoc))
# number of channels, sample size, sampling frequency, number of samples
return x, (x.shape[0], audiofile.sample_width, sf, x.shape[1], "NONE", "not compressed")
def write_sound(sound, soundParams, soundFileLoc):
"""Write sound in wav file soundFileLoc, croping values (saturation)"""
timer_start("Writing {}".format(soundFileLoc))
nbch, ss, _, nbs = soundParams[:4]
if nbch == 1:
rawSound = sound[0]
elif nbch == 2:
rawSound = sound.T.reshape((1, 2*sound.shape[1]))[0, :]
else:
writeLog("error", "Not a mono nor stereo file")
soundFile = wave.open(soundFileLoc, "w")
soundFile.setparams(soundParams)
# writing binary of nbch*nbs int, taken from rawSound
soundFile.writeframes(wave.struct.pack("<"+str(nbch*nbs)+("-bhiq"[ss]), *rawSound))
soundFile.close()
timer_stop("Writing {}".format(soundFileLoc))
print("Fichier {} created!".format(soundFileLoc))
### Function working on sounds ###
def empty_sound(nbch, nbs, dtype=np.int16):
"""Generates an empty sound"""
return np.zeros((nbch, nbs), dtype=dtype)
def convert_to_mono(sound):
"""Converts a stereo sound to mono"""
nbch = len(sound)
if nbch == 1:
return sound
elif nbch == 2:
return np.array([(sound[0]/2 + sound[1]/2)]).astype(sound.dtype)
else:
writeLog("error", "Not a mono nor stereo file")
def convert_to_stereo(sound):
"""Converts a mono sound to stereo"""
nbch = len(sound)
if nbch == 2:
return sound
elif nbch == 1:
return np.array([sound[0], sound[0]])
else:
writeLog("error", "Not a mono nor stereo file")
### Fonction de conversion wav <-> mp3 et d'exploitation directe de mp3 ###
def to_mp3(ini, out, bitrate="320k"):
"""Converts the file to an mp3 file"""
options = ["-threads", "auto", "-y", "-loglevel", "quiet"]
call(["avconv", "-i", ini, "-c:a", "libmp3lame", "-ab", bitrate] + options + [out])
writeLog("debug", "File {} created!".format(out))
def to_wav(ini, out):
"""Converts the file to an wav file"""
# options = ["-threads", "auto", "-y", "-loglevel", "quiet"]
# call(["avconv", "-i", ini] + options + [out])
call(["mpg123", "-w", out, ini])
writeLog("debug", "File {} created!".format(out))
def folder_mp3_to_wav():
"""Convert all not already converted mp3 to wav"""
for _, f in enumerate(os.listdir("../data/samplesMP3")):
samplesMP3 = glob.glob("../data/samplesMP3/{}/*.mp3".format(f))
for s in samplesMP3:
trackName = os.path.splitext(os.path.split(s)[1])[0]
swav = "../data/samples/{}/{}.wav".format(f, trackName)
if not os.path.isdir(os.path.split(swav)[0]):
os.mkdir(os.path.split(swav)[0])
if not os.path.isfile(swav):
to_wav(s, swav)
writeLog("debug", "MP3 to WAV conversions finished.")
def mp3_mp3(fonction, soundFileLoc1, soundFileLoc2, moreArgs=None, bitrate="320k"):
"""Wraps a wav -> wav function to make it mp3 -> mp3"""
if moreArgs is None:
moreArgs = []
to_wav(soundFileLoc1, "./temp1.wav")
fonction("./temp1.wav", "./temp2.wav", *moreArgs)
to_mp3("./temp2.wav", soundFileLoc2, bitrate)
os.remove("./temp1.wav")
os.remove("./temp2.wav")
|
JohnDenker/brython | refs/heads/master | www/src/Lib/test/test_poll.py | 23 | # Test case for the os.poll() function
import os
import random
import select
import _testcapi
try:
import threading
except ImportError:
threading = None
import time
import unittest
from test.support import TESTFN, run_unittest, reap_threads
try:
select.poll
except AttributeError:
raise unittest.SkipTest("select.poll not defined -- skipping test_poll")
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class PollTests(unittest.TestCase):
def test_poll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.poll()
NUM_PIPES = 12
MSG = b" This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
raise RuntimeError("no pipes ready for writing")
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
raise RuntimeError("no pipes ready for reading")
rd = random.choice(ready_readers)
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close( rd )
p.unregister( r2w[rd] )
p.unregister( rd )
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def poll_unit_tests(self):
# returns NVAL for invalid file descriptor
FD = 42
try:
os.close(FD)
except OSError:
pass
p = select.poll()
p.register(FD)
r = p.poll()
self.assertEqual(r[0], (FD, select.POLLNVAL))
f = open(TESTFN, 'w')
fd = f.fileno()
p = select.poll()
p.register(f)
r = p.poll()
self.assertEqual(r[0][0], fd)
f.close()
r = p.poll()
self.assertEqual(r[0], (fd, select.POLLNVAL))
os.unlink(TESTFN)
# type error for invalid arguments
p = select.poll()
self.assertRaises(TypeError, p.register, p)
self.assertRaises(TypeError, p.unregister, p)
# can't unregister non-existent object
p = select.poll()
self.assertRaises(KeyError, p.unregister, 3)
# Test error cases
pollster = select.poll()
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
self.assertRaises(TypeError, pollster.register, Nope(), 0)
self.assertRaises(TypeError, pollster.register, Almost(), 0)
# Another test case for poll(). This is copied from the test case for
# select(), modified to use poll() instead.
def test_poll2(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
pollster = select.poll()
pollster.register( p, select.POLLIN )
for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
fdlist = pollster.poll(tout)
if (fdlist == []):
continue
fd, flags = fdlist[0]
if flags & select.POLLHUP:
line = p.readline()
if line != "":
self.fail('error: pipe seems to be closed, but still returns data')
continue
elif flags & select.POLLIN:
line = p.readline()
if not line:
break
continue
else:
self.fail('Unexpected return value from select.poll: %s' % fdlist)
p.close()
def test_poll3(self):
# test int overflow
pollster = select.poll()
pollster.register(1)
self.assertRaises(OverflowError, pollster.poll, 1 << 64)
x = 2 + 3
if x != 5:
self.fail('Overflow must have occurred')
pollster = select.poll()
# Issue 15989
self.assertRaises(OverflowError, pollster.register, 0,
_testcapi.SHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.register, 0,
_testcapi.USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, _testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, _testcapi.UINT_MAX + 1)
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def test_threaded_poll(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
rfds = []
for i in range(10):
fd = os.dup(r)
self.addCleanup(os.close, fd)
rfds.append(fd)
pollster = select.poll()
for fd in rfds:
pollster.register(fd, select.POLLIN)
t = threading.Thread(target=pollster.poll)
t.start()
try:
time.sleep(0.5)
# trigger ufds array reallocation
for fd in rfds:
pollster.unregister(fd)
pollster.register(w, select.POLLOUT)
self.assertRaises(RuntimeError, pollster.poll)
finally:
# and make the call to poll() from the thread return
os.write(w, b'spam')
t.join()
def test_main():
run_unittest(PollTests)
if __name__ == '__main__':
test_main()
|
foobarbazblarg/stayclean | refs/heads/master | stayclean-2016-february/participant.py | 60 | import datetime
class Participant:
def __init__(self):
self.name = ""
self.isStillIn = True
self.hasCheckedIn = False
self.relapseDate = None
@property
def hasRelapsed(self):
return self.relapseDate is not None
def setFromLine(self, lineString):
# format of participants.txt line:
# name hasCheckedIn isStillIn
# e.g.:
# foobarbazblarg True True
words = lineString.split()
self.name = words[0]
self.hasCheckedIn = words[1] == 'True'
self.isStillIn = words[2] == 'True'
if len(words) >= 4:
self.relapseDate = datetime.datetime.strptime(words[3], "%Y.%m.%d").date()
def relapseNowIfNotAlready(self):
if self.isStillIn:
self.isStillIn = False
self.relapseDate = datetime.date.today()
def relapseDayOfWeekIndex(self):
if self.relapseDate:
return self.relapseDate.weekday()
else:
return None
def relapseDayOfWeekName(self):
if self.relapseDayOfWeekIndex():
return {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[self.relapseDayOfWeekIndex()]
else:
return None
def asLine(self):
answer = self.name + " " + str(self.hasCheckedIn) + " " + str(self.isStillIn)
if self.relapseDate:
answer += " "
answer += self.relapseDate.strftime("%Y.%m.%d")
return answer
|
benoitsteiner/tensorflow-opencl | refs/heads/master | tensorflow/contrib/learn/python/learn/utils/gc.py | 45 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# Create the directories.
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# Create a simple parser that pulls the export_version from the directory.
path_regex = "^" + re.escape(base_dir) + "/(\\d+)$"
def parser(path):
match = re.match(path_regex, path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print(every_fifth(path_list)) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print(largest_three(all_paths)) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print(both(all_paths)) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# Delete everything not in 'both'.
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
Path = collections.namedtuple('Path', 'path export_version')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
def one_of_every_n_export_versions(n):
"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
"""A filter function that keeps exactly one out of every n paths."""
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(compat.as_str_any(base_dir),
compat.as_str_any(r)),
None))
if p:
paths.append(p)
return sorted(paths)
|
ayepezv/GAD_ERP | refs/heads/master | addons/auth_oauth/models/auth_oauth.py | 39 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class AuthOAuthProvider(models.Model):
"""Class defining the configuration values of an OAuth2 provider"""
_name = 'auth.oauth.provider'
_description = 'OAuth2 provider'
_order = 'name'
name = fields.Char(string='Provider name', required=True) # Name of the OAuth2 entity, Google, etc
client_id = fields.Char(string='Client ID') # Our identifier
auth_endpoint = fields.Char(string='Authentication URL', required=True) # OAuth provider URL to authenticate users
scope = fields.Char() # OAUth user data desired to access
validation_endpoint = fields.Char(string='Validation URL', required=True) # OAuth provider URL to validate tokens
data_endpoint = fields.Char(string='Data URL')
enabled = fields.Boolean(string='Allowed')
css_class = fields.Char(string='CSS class', default='zocial')
body = fields.Char(required=True)
sequence = fields.Integer()
|
Simon-Tang/onedrive-d-old | refs/heads/future | onedrive_d/od_main.py | 7 | #!/usr/bin/python3
"""
Main entry of onedrive-d.
"""
import sys
import click
import daemonocle
from . import od_glob
# this runs before any daemonocle code
config = od_glob.get_config_instance()
is_debug_mode = False
for arg in sys.argv:
if arg == '--debug':
od_glob.get_logger(config.params['MIN_LOG_LEVEL']).debug('running in debug mode.')
is_debug_mode = True
break
if not is_debug_mode:
od_glob.get_logger(config.params['MIN_LOG_LEVEL'], config.params['LOG_FILE_PATH']).debug('running in daemon node.')
@click.command(cls=daemonocle.cli.DaemonCLI, daemon_params={'pidfile': config.APP_CONF_PATH + '/onedrive.pid'})
def main():
mon = None
if not config.params['USE_GUI']:
from . import od_mon_cli
mon = od_mon_cli.Monitor()
else:
from . import od_mon_gtk
mon = od_mon_gtk.Monitor()
# start monitor engine
try:
mon.start()
except KeyboardInterrupt:
# for debugging, dump task db
mon.stop()
if __name__ == "__main__":
main()
|
pkruskal/scikit-learn | refs/heads/master | sklearn/neighbors/__init__.py | 306 | """
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .approximate import LSHForest
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LSHForest']
|
JianyuWang/nova | refs/heads/master | nova/tests/unit/api/openstack/compute/test_plugin_framework.py | 17 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
class PluginTest(test.NoDBTestCase):
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace")
def test_plugin_framework_index(self, mock_namespace):
mock_namespace.return_value = 'nova.api.v21.test_extensions'
app = fakes.wsgi_app_v21(init_only='test-basic')
req = fakes.HTTPRequest.blank('/v2/fake/test')
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val', resp_json['param'])
|
leiferikb/bitpop | refs/heads/master | src/tools/bisect-builds.py | 4 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Snapshot Build Bisect Tool
This script bisects a snapshot archive using binary search. It starts at
a bad revision (it will try to guess HEAD) and asks for a last known-good
revision. It will then binary search across this revision range by downloading,
unzipping, and opening Chromium for you. After testing the specific revision,
it will ask you whether it is good or bad before continuing the search.
"""
# The root URL for storage.
CHROMIUM_BASE_URL = 'http://commondatastorage.googleapis.com/chromium-browser-snapshots'
WEBKIT_BASE_URL = 'http://commondatastorage.googleapis.com/chromium-webkit-snapshots'
# The root URL for official builds.
OFFICIAL_BASE_URL = 'http://master.chrome.corp.google.com/official_builds'
# Changelogs URL.
CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \
'perf/dashboard/ui/changelog.html?' \
'url=/trunk/src&range=%d%%3A%d'
# Official Changelogs URL.
OFFICIAL_CHANGELOG_URL = 'http://omahaproxy.appspot.com/'\
'changelog?old_version=%s&new_version=%s'
# DEPS file URL.
DEPS_FILE = 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d'
# Blink Changelogs URL.
BLINK_CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \
'perf/dashboard/ui/changelog_blink.html?' \
'url=/trunk&range=%d%%3A%d'
DONE_MESSAGE_GOOD_MIN = 'You are probably looking for a change made after %s ' \
'(known good), but no later than %s (first known bad).'
DONE_MESSAGE_GOOD_MAX = 'You are probably looking for a change made after %s ' \
'(known bad), but no later than %s (first known good).'
###############################################################################
import json
import optparse
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import threading
import urllib
from distutils.version import LooseVersion
from xml.etree import ElementTree
import zipfile
class PathContext(object):
"""A PathContext is used to carry the information used to construct URLs and
paths when dealing with the storage server and archives."""
def __init__(self, base_url, platform, good_revision, bad_revision,
is_official, is_aura, flash_path = None, pdf_path = None):
super(PathContext, self).__init__()
# Store off the input parameters.
self.base_url = base_url
self.platform = platform # What's passed in to the '-a/--archive' option.
self.good_revision = good_revision
self.bad_revision = bad_revision
self.is_official = is_official
self.is_aura = is_aura
self.flash_path = flash_path
self.pdf_path = pdf_path
# The name of the ZIP file in a revision directory on the server.
self.archive_name = None
# Set some internal members:
# _listing_platform_dir = Directory that holds revisions. Ends with a '/'.
# _archive_extract_dir = Uncompressed directory in the archive_name file.
# _binary_name = The name of the executable to run.
if self.platform in ('linux', 'linux64', 'linux-arm'):
self._binary_name = 'chrome'
elif self.platform == 'mac':
self.archive_name = 'chrome-mac.zip'
self._archive_extract_dir = 'chrome-mac'
elif self.platform == 'win':
self.archive_name = 'chrome-win32.zip'
self._archive_extract_dir = 'chrome-win32'
self._binary_name = 'chrome.exe'
else:
raise Exception('Invalid platform: %s' % self.platform)
if is_official:
if self.platform == 'linux':
self._listing_platform_dir = 'precise32bit/'
self.archive_name = 'chrome-precise32bit.zip'
self._archive_extract_dir = 'chrome-precise32bit'
elif self.platform == 'linux64':
self._listing_platform_dir = 'precise64bit/'
self.archive_name = 'chrome-precise64bit.zip'
self._archive_extract_dir = 'chrome-precise64bit'
elif self.platform == 'mac':
self._listing_platform_dir = 'mac/'
self._binary_name = 'Google Chrome.app/Contents/MacOS/Google Chrome'
elif self.platform == 'win':
if self.is_aura:
self._listing_platform_dir = 'win-aura/'
else:
self._listing_platform_dir = 'win/'
else:
if self.platform in ('linux', 'linux64', 'linux-arm'):
self.archive_name = 'chrome-linux.zip'
self._archive_extract_dir = 'chrome-linux'
if self.platform == 'linux':
self._listing_platform_dir = 'Linux/'
elif self.platform == 'linux64':
self._listing_platform_dir = 'Linux_x64/'
elif self.platform == 'linux-arm':
self._listing_platform_dir = 'Linux_ARM_Cross-Compile/'
elif self.platform == 'mac':
self._listing_platform_dir = 'Mac/'
self._binary_name = 'Chromium.app/Contents/MacOS/Chromium'
elif self.platform == 'win':
self._listing_platform_dir = 'Win/'
def GetListingURL(self, marker=None):
"""Returns the URL for a directory listing, with an optional marker."""
marker_param = ''
if marker:
marker_param = '&marker=' + str(marker)
return self.base_url + '/?delimiter=/&prefix=' + \
self._listing_platform_dir + marker_param
def GetDownloadURL(self, revision):
"""Gets the download URL for a build archive of a specific revision."""
if self.is_official:
return "%s/%s/%s%s" % (
OFFICIAL_BASE_URL, revision, self._listing_platform_dir,
self.archive_name)
else:
return "%s/%s%s/%s" % (self.base_url, self._listing_platform_dir,
revision, self.archive_name)
def GetLastChangeURL(self):
"""Returns a URL to the LAST_CHANGE file."""
return self.base_url + '/' + self._listing_platform_dir + 'LAST_CHANGE'
def GetLaunchPath(self):
"""Returns a relative path (presumably from the archive extraction location)
that is used to run the executable."""
return os.path.join(self._archive_extract_dir, self._binary_name)
def IsAuraBuild(self, build):
"""Check the given build is Aura."""
return build.split('.')[3] == '1'
def IsASANBuild(self, build):
"""Check the given build is ASAN build."""
return build.split('.')[3] == '2'
def ParseDirectoryIndex(self):
"""Parses the Google Storage directory listing into a list of revision
numbers."""
def _FetchAndParse(url):
"""Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If
next-marker is not None, then the listing is a partial listing and another
fetch should be performed with next-marker being the marker= GET
parameter."""
handle = urllib.urlopen(url)
document = ElementTree.parse(handle)
# All nodes in the tree are namespaced. Get the root's tag name to extract
# the namespace. Etree does namespaces as |{namespace}tag|.
root_tag = document.getroot().tag
end_ns_pos = root_tag.find('}')
if end_ns_pos == -1:
raise Exception("Could not locate end namespace for directory index")
namespace = root_tag[:end_ns_pos + 1]
# Find the prefix (_listing_platform_dir) and whether or not the list is
# truncated.
prefix_len = len(document.find(namespace + 'Prefix').text)
next_marker = None
is_truncated = document.find(namespace + 'IsTruncated')
if is_truncated is not None and is_truncated.text.lower() == 'true':
next_marker = document.find(namespace + 'NextMarker').text
# Get a list of all the revisions.
all_prefixes = document.findall(namespace + 'CommonPrefixes/' +
namespace + 'Prefix')
# The <Prefix> nodes have content of the form of
# |_listing_platform_dir/revision/|. Strip off the platform dir and the
# trailing slash to just have a number.
revisions = []
for prefix in all_prefixes:
revnum = prefix.text[prefix_len:-1]
try:
revnum = int(revnum)
revisions.append(revnum)
except ValueError:
pass
return (revisions, next_marker)
# Fetch the first list of revisions.
(revisions, next_marker) = _FetchAndParse(self.GetListingURL())
# If the result list was truncated, refetch with the next marker. Do this
# until an entire directory listing is done.
while next_marker:
next_url = self.GetListingURL(next_marker)
(new_revisions, next_marker) = _FetchAndParse(next_url)
revisions.extend(new_revisions)
return revisions
def GetRevList(self):
"""Gets the list of revision numbers between self.good_revision and
self.bad_revision."""
# Download the revlist and filter for just the range between good and bad.
minrev = min(self.good_revision, self.bad_revision)
maxrev = max(self.good_revision, self.bad_revision)
revlist_all = map(int, self.ParseDirectoryIndex())
revlist = [x for x in revlist_all if x >= int(minrev) and x <= int(maxrev)]
revlist.sort()
# Set good and bad revisions to be legit revisions.
if revlist:
if self.good_revision < self.bad_revision:
self.good_revision = revlist[0]
self.bad_revision = revlist[-1]
else:
self.bad_revision = revlist[0]
self.good_revision = revlist[-1]
# Fix chromium rev so that the deps blink revision matches REVISIONS file.
if self.base_url == WEBKIT_BASE_URL:
revlist_all.sort()
self.good_revision = FixChromiumRevForBlink(revlist,
revlist_all,
self,
self.good_revision)
self.bad_revision = FixChromiumRevForBlink(revlist,
revlist_all,
self,
self.bad_revision)
return revlist
def GetOfficialBuildsList(self):
"""Gets the list of official build numbers between self.good_revision and
self.bad_revision."""
# Download the revlist and filter for just the range between good and bad.
minrev = min(self.good_revision, self.bad_revision)
maxrev = max(self.good_revision, self.bad_revision)
handle = urllib.urlopen(OFFICIAL_BASE_URL)
dirindex = handle.read()
handle.close()
build_numbers = re.findall(r'<a href="([0-9][0-9].*)/">', dirindex)
final_list = []
i = 0
parsed_build_numbers = [LooseVersion(x) for x in build_numbers]
for build_number in sorted(parsed_build_numbers):
path = OFFICIAL_BASE_URL + '/' + str(build_number) + '/' + \
self._listing_platform_dir + self.archive_name
i = i + 1
try:
connection = urllib.urlopen(path)
connection.close()
if build_number > maxrev:
break
if build_number >= minrev:
# If we are bisecting Aura, we want to include only builds which
# ends with ".1".
if self.is_aura:
if self.IsAuraBuild(str(build_number)):
final_list.append(str(build_number))
# If we are bisecting only official builds (without --aura),
# we can not include builds which ends with '.1' or '.2' since
# they have different folder hierarchy inside.
elif (not self.IsAuraBuild(str(build_number)) and
not self.IsASANBuild(str(build_number))):
final_list.append(str(build_number))
except urllib.HTTPError, e:
pass
return final_list
def UnzipFilenameToDir(filename, directory):
"""Unzip |filename| to |directory|."""
cwd = os.getcwd()
if not os.path.isabs(filename):
filename = os.path.join(cwd, filename)
zf = zipfile.ZipFile(filename)
# Make base.
if not os.path.isdir(directory):
os.mkdir(directory)
os.chdir(directory)
# Extract files.
for info in zf.infolist():
name = info.filename
if name.endswith('/'): # dir
if not os.path.isdir(name):
os.makedirs(name)
else: # file
directory = os.path.dirname(name)
if not os.path.isdir(directory):
os.makedirs(directory)
out = open(name, 'wb')
out.write(zf.read(name))
out.close()
# Set permissions. Permission info in external_attr is shifted 16 bits.
os.chmod(name, info.external_attr >> 16L)
os.chdir(cwd)
def FetchRevision(context, rev, filename, quit_event=None, progress_event=None):
"""Downloads and unzips revision |rev|.
@param context A PathContext instance.
@param rev The Chromium revision number/tag to download.
@param filename The destination for the downloaded file.
@param quit_event A threading.Event which will be set by the master thread to
indicate that the download should be aborted.
@param progress_event A threading.Event which will be set by the master thread
to indicate that the progress of the download should be
displayed.
"""
def ReportHook(blocknum, blocksize, totalsize):
if quit_event and quit_event.isSet():
raise RuntimeError("Aborting download of revision %s" % str(rev))
if progress_event and progress_event.isSet():
size = blocknum * blocksize
if totalsize == -1: # Total size not known.
progress = "Received %d bytes" % size
else:
size = min(totalsize, size)
progress = "Received %d of %d bytes, %.2f%%" % (
size, totalsize, 100.0 * size / totalsize)
# Send a \r to let all progress messages use just one line of output.
sys.stdout.write("\r" + progress)
sys.stdout.flush()
download_url = context.GetDownloadURL(rev)
try:
urllib.urlretrieve(download_url, filename, ReportHook)
if progress_event and progress_event.isSet():
print
except RuntimeError, e:
pass
def RunRevision(context, revision, zipfile, profile, num_runs, command, args):
"""Given a zipped revision, unzip it and run the test."""
print "Trying revision %s..." % str(revision)
# Create a temp directory and unzip the revision into it.
cwd = os.getcwd()
tempdir = tempfile.mkdtemp(prefix='bisect_tmp')
UnzipFilenameToDir(zipfile, tempdir)
os.chdir(tempdir)
# Run the build as many times as specified.
testargs = ['--user-data-dir=%s' % profile] + args
# The sandbox must be run as root on Official Chrome, so bypass it.
if ((context.is_official or context.flash_path or context.pdf_path) and
context.platform.startswith('linux')):
testargs.append('--no-sandbox')
if context.flash_path:
testargs.append('--ppapi-flash-path=%s' % context.flash_path)
# We have to pass a large enough Flash version, which currently needs not
# be correct. Instead of requiring the user of the script to figure out and
# pass the correct version we just spoof it.
testargs.append('--ppapi-flash-version=99.9.999.999')
if context.pdf_path:
shutil.copy(context.pdf_path, os.path.dirname(context.GetLaunchPath()))
testargs.append('--enable-print-preview')
runcommand = []
for token in shlex.split(command):
if token == "%a":
runcommand.extend(testargs)
else:
runcommand.append( \
token.replace('%p', os.path.abspath(context.GetLaunchPath())) \
.replace('%s', ' '.join(testargs)))
results = []
for i in range(0, num_runs):
subproc = subprocess.Popen(runcommand,
bufsize=-1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = subproc.communicate()
results.append((subproc.returncode, stdout, stderr))
os.chdir(cwd)
try:
shutil.rmtree(tempdir, True)
except Exception, e:
pass
for (returncode, stdout, stderr) in results:
if returncode:
return (returncode, stdout, stderr)
return results[0]
def AskIsGoodBuild(rev, official_builds, status, stdout, stderr):
"""Ask the user whether build |rev| is good or bad."""
# Loop until we get a response that we can parse.
while True:
response = raw_input('Revision %s is ' \
'[(g)ood/(b)ad/(r)etry/(u)nknown/(q)uit]: ' %
str(rev))
if response and response in ('g', 'b', 'r', 'u'):
return response
if response and response == 'q':
raise SystemExit()
class DownloadJob(object):
"""DownloadJob represents a task to download a given Chromium revision."""
def __init__(self, context, name, rev, zipfile):
super(DownloadJob, self).__init__()
# Store off the input parameters.
self.context = context
self.name = name
self.rev = rev
self.zipfile = zipfile
self.quit_event = threading.Event()
self.progress_event = threading.Event()
def Start(self):
"""Starts the download."""
fetchargs = (self.context,
self.rev,
self.zipfile,
self.quit_event,
self.progress_event)
self.thread = threading.Thread(target=FetchRevision,
name=self.name,
args=fetchargs)
self.thread.start()
def Stop(self):
"""Stops the download which must have been started previously."""
self.quit_event.set()
self.thread.join()
os.unlink(self.zipfile)
def WaitFor(self):
"""Prints a message and waits for the download to complete. The download
must have been started previously."""
print "Downloading revision %s..." % str(self.rev)
self.progress_event.set() # Display progress of download.
self.thread.join()
def Bisect(base_url,
platform,
official_builds,
is_aura,
good_rev=0,
bad_rev=0,
num_runs=1,
command="%p %a",
try_args=(),
profile=None,
flash_path=None,
pdf_path=None,
interactive=True,
evaluate=AskIsGoodBuild):
"""Given known good and known bad revisions, run a binary search on all
archived revisions to determine the last known good revision.
@param platform Which build to download/run ('mac', 'win', 'linux64', etc.).
@param official_builds Specify build type (Chromium or Official build).
@param good_rev Number/tag of the known good revision.
@param bad_rev Number/tag of the known bad revision.
@param num_runs Number of times to run each build for asking good/bad.
@param try_args A tuple of arguments to pass to the test application.
@param profile The name of the user profile to run with.
@param interactive If it is false, use command exit code for good or bad
judgment of the argument build.
@param evaluate A function which returns 'g' if the argument build is good,
'b' if it's bad or 'u' if unknown.
Threading is used to fetch Chromium revisions in the background, speeding up
the user's experience. For example, suppose the bounds of the search are
good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on
whether revision 50 is good or bad, the next revision to check will be either
25 or 75. So, while revision 50 is being checked, the script will download
revisions 25 and 75 in the background. Once the good/bad verdict on rev 50 is
known:
- If rev 50 is good, the download of rev 25 is cancelled, and the next test
is run on rev 75.
- If rev 50 is bad, the download of rev 75 is cancelled, and the next test
is run on rev 25.
"""
if not profile:
profile = 'profile'
context = PathContext(base_url, platform, good_rev, bad_rev,
official_builds, is_aura, flash_path, pdf_path)
cwd = os.getcwd()
print "Downloading list of known revisions..."
_GetDownloadPath = lambda rev: os.path.join(cwd,
'%s-%s' % (str(rev), context.archive_name))
if official_builds:
revlist = context.GetOfficialBuildsList()
else:
revlist = context.GetRevList()
# Get a list of revisions to bisect across.
if len(revlist) < 2: # Don't have enough builds to bisect.
msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist
raise RuntimeError(msg)
# Figure out our bookends and first pivot point; fetch the pivot revision.
minrev = 0
maxrev = len(revlist) - 1
pivot = maxrev / 2
rev = revlist[pivot]
zipfile = _GetDownloadPath(rev)
fetch = DownloadJob(context, 'initial_fetch', rev, zipfile)
fetch.Start()
fetch.WaitFor()
# Binary search time!
while fetch and fetch.zipfile and maxrev - minrev > 1:
if bad_rev < good_rev:
min_str, max_str = "bad", "good"
else:
min_str, max_str = "good", "bad"
print 'Bisecting range [%s (%s), %s (%s)].' % (revlist[minrev], min_str, \
revlist[maxrev], max_str)
# Pre-fetch next two possible pivots
# - down_pivot is the next revision to check if the current revision turns
# out to be bad.
# - up_pivot is the next revision to check if the current revision turns
# out to be good.
down_pivot = int((pivot - minrev) / 2) + minrev
down_fetch = None
if down_pivot != pivot and down_pivot != minrev:
down_rev = revlist[down_pivot]
down_fetch = DownloadJob(context, 'down_fetch', down_rev,
_GetDownloadPath(down_rev))
down_fetch.Start()
up_pivot = int((maxrev - pivot) / 2) + pivot
up_fetch = None
if up_pivot != pivot and up_pivot != maxrev:
up_rev = revlist[up_pivot]
up_fetch = DownloadJob(context, 'up_fetch', up_rev,
_GetDownloadPath(up_rev))
up_fetch.Start()
# Run test on the pivot revision.
status = None
stdout = None
stderr = None
try:
(status, stdout, stderr) = RunRevision(context,
rev,
fetch.zipfile,
profile,
num_runs,
command,
try_args)
except Exception, e:
print >> sys.stderr, e
# Call the evaluate function to see if the current revision is good or bad.
# On that basis, kill one of the background downloads and complete the
# other, as described in the comments above.
try:
if not interactive:
if status:
answer = 'b'
print 'Bad revision: %s' % rev
else:
answer = 'g'
print 'Good revision: %s' % rev
else:
answer = evaluate(rev, official_builds, status, stdout, stderr)
if answer == 'g' and good_rev < bad_rev or \
answer == 'b' and bad_rev < good_rev:
fetch.Stop()
minrev = pivot
if down_fetch:
down_fetch.Stop() # Kill the download of the older revision.
fetch = None
if up_fetch:
up_fetch.WaitFor()
pivot = up_pivot
fetch = up_fetch
elif answer == 'b' and good_rev < bad_rev or \
answer == 'g' and bad_rev < good_rev:
fetch.Stop()
maxrev = pivot
if up_fetch:
up_fetch.Stop() # Kill the download of the newer revision.
fetch = None
if down_fetch:
down_fetch.WaitFor()
pivot = down_pivot
fetch = down_fetch
elif answer == 'r':
pass # Retry requires no changes.
elif answer == 'u':
# Nuke the revision from the revlist and choose a new pivot.
fetch.Stop()
revlist.pop(pivot)
maxrev -= 1 # Assumes maxrev >= pivot.
if maxrev - minrev > 1:
# Alternate between using down_pivot or up_pivot for the new pivot
# point, without affecting the range. Do this instead of setting the
# pivot to the midpoint of the new range because adjacent revisions
# are likely affected by the same issue that caused the (u)nknown
# response.
if up_fetch and down_fetch:
fetch = [up_fetch, down_fetch][len(revlist) % 2]
elif up_fetch:
fetch = up_fetch
else:
fetch = down_fetch
fetch.WaitFor()
if fetch == up_fetch:
pivot = up_pivot - 1 # Subtracts 1 because revlist was resized.
else:
pivot = down_pivot
zipfile = fetch.zipfile
if down_fetch and fetch != down_fetch:
down_fetch.Stop()
if up_fetch and fetch != up_fetch:
up_fetch.Stop()
else:
assert False, "Unexpected return value from evaluate(): " + answer
except SystemExit:
print "Cleaning up..."
for f in [_GetDownloadPath(revlist[down_pivot]),
_GetDownloadPath(revlist[up_pivot])]:
try:
os.unlink(f)
except OSError:
pass
sys.exit(0)
rev = revlist[pivot]
return (revlist[minrev], revlist[maxrev])
def GetBlinkDEPSRevisionForChromiumRevision(rev):
"""Returns the blink revision that was in REVISIONS file at
chromium revision |rev|."""
# . doesn't match newlines without re.DOTALL, so this is safe.
blink_re = re.compile(r'webkit_revision\D*(\d+)')
url = urllib.urlopen(DEPS_FILE % rev)
m = blink_re.search(url.read())
url.close()
if m:
return int(m.group(1))
else:
raise Exception('Could not get Blink revision for Chromium rev %d'
% rev)
def GetBlinkRevisionForChromiumRevision(self, rev):
"""Returns the blink revision that was in REVISIONS file at
chromium revision |rev|."""
file_url = "%s/%s%d/REVISIONS" % (self.base_url,
self._listing_platform_dir, rev)
url = urllib.urlopen(file_url)
data = json.loads(url.read())
url.close()
if 'webkit_revision' in data:
return data['webkit_revision']
else:
raise Exception('Could not get blink revision for cr rev %d' % rev)
def FixChromiumRevForBlink(revisions_final, revisions, self, rev):
"""Returns the chromium revision that has the correct blink revision
for blink bisect, DEPS and REVISIONS file might not match since
blink snapshots point to tip of tree blink.
Note: The revisions_final variable might get modified to include
additional revisions."""
blink_deps_rev = GetBlinkDEPSRevisionForChromiumRevision(rev)
while (GetBlinkRevisionForChromiumRevision(self, rev) > blink_deps_rev):
idx = revisions.index(rev)
if idx > 0:
rev = revisions[idx-1]
if rev not in revisions_final:
revisions_final.insert(0, rev)
revisions_final.sort()
return rev
def GetChromiumRevision(url):
"""Returns the chromium revision read from given URL."""
try:
# Location of the latest build revision number
return int(urllib.urlopen(url).read())
except Exception, e:
print('Could not determine latest revision. This could be bad...')
return 999999999
def main():
usage = ('%prog [options] [-- chromium-options]\n'
'Perform binary search on the snapshot builds to find a minimal\n'
'range of revisions where a behavior change happened. The\n'
'behaviors are described as "good" and "bad".\n'
'It is NOT assumed that the behavior of the later revision is\n'
'the bad one.\n'
'\n'
'Revision numbers should use\n'
' Official versions (e.g. 1.0.1000.0) for official builds. (-o)\n'
' SVN revisions (e.g. 123456) for chromium builds, from trunk.\n'
' Use base_trunk_revision from http://omahaproxy.appspot.com/\n'
' for earlier revs.\n'
' Chrome\'s about: build number and omahaproxy branch_revision\n'
' are incorrect, they are from branches.\n'
'\n'
'Tip: add "-- --no-first-run" to bypass the first run prompts.')
parser = optparse.OptionParser(usage=usage)
# Strangely, the default help output doesn't include the choice list.
choices = ['mac', 'win', 'linux', 'linux64', 'linux-arm']
# linux-chromiumos lacks a continuous archive http://crbug.com/78158
parser.add_option('-a', '--archive',
choices = choices,
help = 'The buildbot archive to bisect [%s].' %
'|'.join(choices))
parser.add_option('-o', action="store_true", dest='official_builds',
help = 'Bisect across official ' +
'Chrome builds (internal only) instead of ' +
'Chromium archives.')
parser.add_option('-b', '--bad', type = 'str',
help = 'A bad revision to start bisection. ' +
'May be earlier or later than the good revision. ' +
'Default is HEAD.')
parser.add_option('-f', '--flash_path', type = 'str',
help = 'Absolute path to a recent Adobe Pepper Flash ' +
'binary to be used in this bisection (e.g. ' +
'on Windows C:\...\pepflashplayer.dll and on Linux ' +
'/opt/google/chrome/PepperFlash/libpepflashplayer.so).')
parser.add_option('-d', '--pdf_path', type = 'str',
help = 'Absolute path to a recent PDF pluggin ' +
'binary to be used in this bisection (e.g. ' +
'on Windows C:\...\pdf.dll and on Linux ' +
'/opt/google/chrome/libpdf.so). Option also enables ' +
'print preview.')
parser.add_option('-g', '--good', type = 'str',
help = 'A good revision to start bisection. ' +
'May be earlier or later than the bad revision. ' +
'Default is 0.')
parser.add_option('-p', '--profile', '--user-data-dir', type = 'str',
help = 'Profile to use; this will not reset every run. ' +
'Defaults to a clean profile.', default = 'profile')
parser.add_option('-t', '--times', type = 'int',
help = 'Number of times to run each build before asking ' +
'if it\'s good or bad. Temporary profiles are reused.',
default = 1)
parser.add_option('-c', '--command', type = 'str',
help = 'Command to execute. %p and %a refer to Chrome ' +
'executable and specified extra arguments respectively. ' +
'Use %s to specify all extra arguments as one string. ' +
'Defaults to "%p %a". Note that any extra paths ' +
'specified should be absolute.',
default = '%p %a')
parser.add_option('-l', '--blink', action='store_true',
help = 'Use Blink bisect instead of Chromium. ')
parser.add_option('', '--not-interactive', action='store_true',
help = 'Use command exit code to tell good/bad revision.',
default=False)
parser.add_option('--aura',
dest='aura',
action='store_true',
default=False,
help='Allow the script to bisect aura builds')
(opts, args) = parser.parse_args()
if opts.archive is None:
print 'Error: missing required parameter: --archive'
print
parser.print_help()
return 1
if opts.aura:
if opts.archive != 'win' or not opts.official_builds:
print 'Error: Aura is supported only on Windows platform '\
'and official builds.'
return 1
if opts.blink:
base_url = WEBKIT_BASE_URL
else:
base_url = CHROMIUM_BASE_URL
# Create the context. Initialize 0 for the revisions as they are set below.
context = PathContext(base_url, opts.archive, 0, 0,
opts.official_builds, opts.aura, None)
# Pick a starting point, try to get HEAD for this.
if opts.bad:
bad_rev = opts.bad
else:
bad_rev = '999.0.0.0'
if not opts.official_builds:
bad_rev = GetChromiumRevision(context.GetLastChangeURL())
# Find out when we were good.
if opts.good:
good_rev = opts.good
else:
good_rev = '0.0.0.0' if opts.official_builds else 0
if opts.flash_path:
flash_path = opts.flash_path
msg = 'Could not find Flash binary at %s' % flash_path
assert os.path.exists(flash_path), msg
if opts.pdf_path:
pdf_path = opts.pdf_path
msg = 'Could not find PDF binary at %s' % pdf_path
assert os.path.exists(pdf_path), msg
if opts.official_builds:
good_rev = LooseVersion(good_rev)
bad_rev = LooseVersion(bad_rev)
else:
good_rev = int(good_rev)
bad_rev = int(bad_rev)
if opts.times < 1:
print('Number of times to run (%d) must be greater than or equal to 1.' %
opts.times)
parser.print_help()
return 1
(min_chromium_rev, max_chromium_rev) = Bisect(
base_url, opts.archive, opts.official_builds, opts.aura, good_rev,
bad_rev, opts.times, opts.command, args, opts.profile, opts.flash_path,
opts.pdf_path, not opts.not_interactive)
# Get corresponding blink revisions.
try:
min_blink_rev = GetBlinkRevisionForChromiumRevision(context,
min_chromium_rev)
max_blink_rev = GetBlinkRevisionForChromiumRevision(context,
max_chromium_rev)
except Exception, e:
# Silently ignore the failure.
min_blink_rev, max_blink_rev = 0, 0
if opts.blink:
# We're done. Let the user know the results in an official manner.
if good_rev > bad_rev:
print DONE_MESSAGE_GOOD_MAX % (str(min_blink_rev), str(max_blink_rev))
else:
print DONE_MESSAGE_GOOD_MIN % (str(min_blink_rev), str(max_blink_rev))
print 'BLINK CHANGELOG URL:'
print ' ' + BLINK_CHANGELOG_URL % (max_blink_rev, min_blink_rev)
else:
# We're done. Let the user know the results in an official manner.
if good_rev > bad_rev:
print DONE_MESSAGE_GOOD_MAX % (str(min_chromium_rev),
str(max_chromium_rev))
else:
print DONE_MESSAGE_GOOD_MIN % (str(min_chromium_rev),
str(max_chromium_rev))
if min_blink_rev != max_blink_rev:
print ("NOTE: There is a Blink roll in the range, "
"you might also want to do a Blink bisect.")
print 'CHANGELOG URL:'
if opts.official_builds:
print OFFICIAL_CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
else:
print ' ' + CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
if __name__ == '__main__':
sys.exit(main())
|
emawind84/readthedocs.org | refs/heads/master | readthedocs/core/utils/tasks/permission_checks.py | 30 | __all__ = ('user_id_matches',)
def user_id_matches(request, state, context):
user_id = context.get('user_id', None)
if user_id is not None and request.user.is_authenticated():
if request.user.id == user_id:
return True
return False
|
lrn-guru/lrn | refs/heads/master | lrn/repl.py | 1 | import os
import api
import lrn
l = api.l
from subprocess import call
def check_progress(cmd):
task = api.get_task()
test = task['test']
test_dir = api.get_tests_dir()
command = "python {}/{}.py {}".format(test_dir, test, cmd)
status = call(command.split())
return status
def repl():
try:
while True:
l('lrn ', 'cyan', False)
l('> ', 'blue', False)
cmd = raw_input('')
if 'cd ' in cmd:
folder = cmd[3:]
os.chdir(folder)
else:
os.system(cmd)
outcome = check_progress(cmd)
if outcome == 0:
lrn.next_task()
except (EOFError, KeyboardInterrupt):
print('Exiting lrn. Enter `lrn resume` to resume.')
exit(0)
if __name__ == '__main__':
repl()
|
Tatsh-ansible/ansible | refs/heads/devel | lib/ansible/module_utils/nxos.py | 9 | #
# This code is part of Ansible, but is an independent component.
#
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat, Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list, ComplexList
from ansible.module_utils.connection import exec_command
from ansible.module_utils.six import iteritems
from ansible.module_utils.urls import fetch_url
_DEVICE_CONNECTION = None
nxos_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE'])),
'use_ssl': dict(type='bool'),
'validate_certs': dict(type='bool'),
'timeout': dict(type='int'),
'provider': dict(type='dict'),
'transport': dict(choices=['cli', 'nxapi'])
}
# Add argument's default value here
ARGS_DEFAULT_VALUE = {
'transport': 'cli',
'timeout': 10
}
def get_argspec():
return nxos_argument_spec
def check_args(module, warnings):
provider = module.params['provider'] or {}
for key in nxos_argument_spec:
if module._name == 'nxos_user':
if key not in ['password', 'provider', 'transport'] and module.params[key]:
warnings.append('argument %s has been deprecated and will be in a future version' % key)
else:
if key not in ['provider', 'transport'] and module.params[key]:
warnings.append('argument %s has been deprecated and will be removed in a future version' % key)
# set argument's default value if not provided in input
# This is done to avoid unwanted argument deprecation warning
# in case argument is not given as input (outside provider).
for key in ARGS_DEFAULT_VALUE:
if not module.params.get(key, None):
module.params[key] = ARGS_DEFAULT_VALUE[key]
if provider:
for param in ('password',):
if provider.get(param):
module.no_log_values.update(return_values(provider[param]))
def load_params(module):
provider = module.params.get('provider') or dict()
for key, value in iteritems(provider):
if key in nxos_argument_spec:
if module.params.get(key) is None and value is not None:
module.params[key] = value
def get_connection(module):
global _DEVICE_CONNECTION
if not _DEVICE_CONNECTION:
load_params(module)
if is_nxapi(module):
conn = Nxapi(module)
else:
conn = Cli(module)
_DEVICE_CONNECTION = conn
return _DEVICE_CONNECTION
class Cli:
def __init__(self, module):
self._module = module
self._device_configs = {}
def exec_command(self, command):
if isinstance(command, dict):
command = self._module.jsonify(command)
return exec_command(self._module, command)
def get_config(self, flags=[]):
"""Retrieves the current config from the device or cache
"""
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
rc, out, err = self.exec_command(cmd)
if rc != 0:
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
cfg = to_text(out, errors='surrogate_then_replace').strip()
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return results
"""
responses = list()
for item in to_list(commands):
if item['output'] == 'json' and not is_json(item['command']):
cmd = '%s | json' % item['command']
elif item['output'] == 'text' and is_json(item['command']):
cmd = item['command'].split('|')[0]
else:
cmd = item['command']
rc, out, err = self.exec_command(cmd)
out = to_text(out, errors='surrogate_then_replace')
if check_rc and rc != 0:
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
try:
out = self._module.from_json(out)
except ValueError:
out = str(out).strip()
responses.append(out)
return responses
def load_config(self, config):
"""Sends configuration commands to the remote device
"""
rc, out, err = self.exec_command('configure')
if rc != 0:
self._module.fail_json(msg='unable to enter configuration mode', output=to_text(err, errors='surrogate_then_replace'))
for cmd in config:
rc, out, err = self.exec_command(cmd)
if rc != 0:
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
self.exec_command('end')
class Nxapi:
OUTPUT_TO_COMMAND_TYPE = {
'text': 'cli_show_ascii',
'json': 'cli_show',
'bash': 'bash',
'config': 'cli_conf'
}
def __init__(self, module):
self._module = module
self._nxapi_auth = None
self._device_configs = {}
self._module.params['url_username'] = self._module.params['username']
self._module.params['url_password'] = self._module.params['password']
host = self._module.params['host']
port = self._module.params['port']
if self._module.params['use_ssl']:
proto = 'https'
port = port or 443
else:
proto = 'http'
port = port or 80
self._url = '%s://%s:%s/ins' % (proto, host, port)
def _error(self, msg, **kwargs):
self._nxapi_auth = None
if 'url' not in kwargs:
kwargs['url'] = self._url
self._module.fail_json(msg=msg, **kwargs)
def _request_builder(self, commands, output, version='1.0', chunk='0', sid=None):
"""Encodes a NXAPI JSON request message
"""
try:
command_type = self.OUTPUT_TO_COMMAND_TYPE[output]
except KeyError:
msg = 'invalid format, received %s, expected one of %s' % \
(output, ','.join(self.OUTPUT_TO_COMMAND_TYPE.keys()))
self._error(msg=msg)
if isinstance(commands, (list, set, tuple)):
commands = ' ;'.join(commands)
msg = {
'version': version,
'type': command_type,
'chunk': chunk,
'sid': sid,
'input': commands,
'output_format': 'json'
}
return dict(ins_api=msg)
def send_request(self, commands, output='text', check_status=True):
# only 10 show commands can be encoded in each request
# messages sent to the remote device
if output != 'config':
commands = collections.deque(to_list(commands))
stack = list()
requests = list()
while commands:
stack.append(commands.popleft())
if len(stack) == 10:
body = self._request_builder(stack, output)
data = self._module.jsonify(body)
requests.append(data)
stack = list()
if stack:
body = self._request_builder(stack, output)
data = self._module.jsonify(body)
requests.append(data)
else:
body = self._request_builder(commands, 'config')
requests = [self._module.jsonify(body)]
headers = {'Content-Type': 'application/json'}
result = list()
timeout = self._module.params['timeout']
for req in requests:
if self._nxapi_auth:
headers['Cookie'] = self._nxapi_auth
response, headers = fetch_url(
self._module, self._url, data=req, headers=headers,
timeout=timeout, method='POST'
)
self._nxapi_auth = headers.get('set-cookie')
if headers['status'] != 200:
self._error(**headers)
try:
response = self._module.from_json(response.read())
except ValueError:
self._module.fail_json(msg='unable to parse response')
output = response['ins_api']['outputs']['output']
for item in to_list(output):
if check_status and item['code'] != '200':
self._error(output=output, **item)
elif 'body' in item:
result.append(item['body'])
# else:
# error in command but since check_status is disabled
# silently drop it.
# result.append(item['msg'])
return result
def get_config(self, flags=[]):
"""Retrieves the current config from the device or cache
"""
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
out = self.send_request(cmd)
cfg = str(out[0]).strip()
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return results
"""
output = None
queue = list()
responses = list()
def _send(commands, output):
return self.send_request(commands, output, check_status=check_rc)
for item in to_list(commands):
if is_json(item['command']):
item['command'] = str(item['command']).split('|')[0]
item['output'] = 'json'
if all((output == 'json', item['output'] == 'text')) or all((output == 'text', item['output'] == 'json')):
responses.extend(_send(queue, output))
queue = list()
output = item['output'] or 'json'
queue.append(item['command'])
if queue:
responses.extend(_send(queue, output))
return responses
def load_config(self, commands):
"""Sends the ordered set of commands to the device
"""
commands = to_list(commands)
self.send_request(commands, output='config')
def is_json(cmd):
return str(cmd).endswith('| json')
def is_text(cmd):
return not is_json(cmd)
def is_nxapi(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
return 'nxapi' in (transport, provider_transport)
def to_command(module, commands):
if is_nxapi(module):
default_output = 'json'
else:
default_output = 'text'
transform = ComplexList(dict(
command=dict(key=True),
output=dict(default=default_output),
prompt=dict(),
answer=dict()
), module)
commands = transform(to_list(commands))
for item in commands:
if is_json(item['command']):
item['output'] = 'json'
return commands
def get_config(module, flags=[]):
conn = get_connection(module)
return conn.get_config(flags)
def run_commands(module, commands, check_rc=True):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands), check_rc)
def load_config(module, config):
conn = get_connection(module)
return conn.load_config(config)
|
nhomar/odoo-mirror | refs/heads/8.0 | addons/account_followup/report/__init__.py | 447 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_followup_print
import account_followup_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pvalienteverde/MeetUpIntroMLySistemasRecomendacion | refs/heads/master | scripts/SistemasRecomendacion/CollaborativeFiltering.py | 1 | import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from pyspark.ml.evaluation import RegressionEvaluator,Evaluator
from math import sqrt
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import cosine_similarity
class EvaluadorRMSE(Evaluator):
"""
Evalua RMSE de forma robusta.
Es Igual que RegressionEvaluator con metric=rmse pero descartando valores no predecidos
"""
def __init__(self,predictionCol, targetCol):
super(EvaluadorRMSE, self).__init__()
self.predictionCol=predictionCol
self.targetCol=targetCol
def _evaluate(self, dataset):
error=rmse(dataset,self.predictionCol,self.targetCol)
print ("Error: {}".format(error))
return error
def isLargerBetter(self):
return False
class ModelBasedALS(object):
"""
Envoltorio para la clase ALS de ml de Spark.
Da soporte a los metodos de ALS de mllib
"""
def __init__(self,modelALS):
super(ModelBasedALS, self).__init__()
"""
Parametros
----------
modelALS : objeto entrenado de pyspark.ml.recommendation.ALS
"""
self.userIndex,self.userFactors = self.toArray(modelALS.userFactors)
self.itemIndex,self.itemFactors = self.toArray(modelALS.itemFactors)
self.prediccion=pd.DataFrame(data=self.userFactors.dot(self.itemFactors.T),columns=self.itemIndex,index=self.userIndex)
self.relacion_index_user=dict(zip(self.userIndex,range(len(self.userIndex))))
self.relacion_index_item=dict(zip(self.itemIndex,range(len(self.itemIndex))))
def predictAll(self,user_item:pd.DataFrame,tag_prediccion='prediccion'):
"""
Devuelve todas las predicciones dado el par (user,item)
"""
estimaciones=[]
for tupla in user_item.values:
try:
estimacion=self.prediccion.iloc[self.relacion_index_user[tupla[0]],self.relacion_index_item[tupla[1]]]
estimaciones.append(estimacion)
except:
estimaciones.append(np.nan)
user_item[tag_prediccion]=estimaciones
return user_item
def recommendProducts(self,user:int,n:int=3):
"""
Devuelve el top de productos recomendados para el usuario
"""
usuario=self.prediccion.loc[user]
usuario.sort(ascending=False)
return usuario.iloc[:n]
def recommendUsers(self,product:int,n:int=3):
"""
Devuelve el top de los usuarios de un producto
"""
productos=self.prediccion.loc[:,product]
productos.sort(ascending=False)
return productos.iloc[:n]
@staticmethod
def toArray(datos):
indices=[]
lista=[]
aaa=datos.rdd.map(lambda l:(l.id,l.features)).collect()
for tupla in aaa:
indices.append(tupla[0])
lista.append(tupla[1])
return indices,np.array(lista)
def rmse(dataset,predictionCol,targetCol):
valores=np.array(dataset.dropna().map(lambda r:[r[predictionCol],r[targetCol]]).collect())
error = sqrt(mean_squared_error(valores[:,0],valores[:,1]))
return error
|
alfonsotames/linux | refs/heads/master | scripts/gdb/linux/dmesg.py | 299 | #
# gdb helper commands and functions for Linux kernel debugging
#
# kernel log buffer dump
#
# Copyright (c) Siemens AG, 2011, 2012
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import sys
from linux import utils
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
def __init__(self):
super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
log_buf_addr = int(str(gdb.parse_and_eval(
"(void *)'printk.c'::log_buf")).split()[0], 16)
log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
inf = gdb.inferiors()[0]
start = log_buf_addr + log_first_idx
if log_first_idx < log_next_idx:
log_buf_2nd_half = -1
length = log_next_idx - log_first_idx
log_buf = utils.read_memoryview(inf, start, length).tobytes()
else:
log_buf_2nd_half = log_buf_len - log_first_idx
a = utils.read_memoryview(inf, start, log_buf_2nd_half)
b = utils.read_memoryview(inf, log_buf_addr, log_next_idx)
log_buf = a.tobytes() + b.tobytes()
pos = 0
while pos < log_buf.__len__():
length = utils.read_u16(log_buf[pos + 8:pos + 10])
if length == 0:
if log_buf_2nd_half == -1:
gdb.write("Corrupted log buffer!\n")
break
pos = log_buf_2nd_half
continue
text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
text = log_buf[pos + 16:pos + 16 + text_len].decode(
encoding='utf8', errors='replace')
time_stamp = utils.read_u64(log_buf[pos:pos + 8])
for line in text.splitlines():
msg = u"[{time:12.6f}] {line}\n".format(
time=time_stamp / 1000000000.0,
line=line)
# With python2 gdb.write will attempt to convert unicode to
# ascii and might fail so pass an utf8-encoded str instead.
if sys.hexversion < 0x03000000:
msg = msg.encode(encoding='utf8', errors='replace')
gdb.write(msg)
pos += length
LxDmesg()
|
zuher83/z-odoo8-addons | refs/heads/master | credit_control_limit/__openerp__.py | 1 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-2014 Zuher ELMAS. All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
'name': 'Credit control limit',
'version': '1',
"category" : 'Account',
'complexity': "easy",
'description': """
Calculating the amount payable by the partner who has not yet been invoiced. It sum the total amount invoiced nor paid and the total amount due to order not invoiced.
- Allow sales in the authorized credit limit.
- Verification of the total amount due by the partner. If the amount due exceeds the allowed limit, the document state write to "bloqued". A manager can confirm the order by forcing the state or cancel the order.
- If allow_credit is not checked, the credit limit not blocked order.
This module override sale workflow. If you want uninstall this addon, you need update after uninstalling sale module.
""",
'author': 'Zuher Elmas',
'depends': ['sale','stock_account'],
'data': ['credit_control_limit.xml',
'sale_workflow.xml',
'sale_view.xml',],
'demo_xml': [],
'test': [],
'installable': True,
'application': False,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
aristofor/Gifomaton | refs/heads/master | gifomaton/menu.py | 1 | # coding: utf-8
import pygame
from pygame.locals import *
from scene import Scene
from player import PlayerScene
from capture import CaptureScene
from models import mock_seqs
class MenuScene(Scene):
cols = 3
rows = 3
_screen_rect = None
_location = None
item_size = (430,310)
gut = 10
# Play hitboxes
tiles = None
# Capture command
captr = None
def __init__(self):
super(MenuScene, self).__init__()
self.font = pygame.font.SysFont('andalemono', 64)
seq = iter(mock_seqs)
missing = False
self.tiles = list()
for y in range(self.rows):
for x in range(self.cols):
if not missing:
try:
i = seq.next()
self.tiles.append(
{
'name': i['name'],
'rect': pygame.Rect(
x*(self.item_size[0]+self.gut), y*(self.item_size[1]+self.gut),
self.item_size[0], self.item_size[1] )
})
except StopIteration:
missing = True
i = None
self.captr = Rect(0,0,0,0)
#print self.tiles
def render(self, screen):
"""
FIXME: hitboxes et rectangles pas alignés (move fautif)
"""
screen.fill((0,0,0))
sr = screen.get_rect()
if sr != self._screen_rect:
self._screen_rect = sr
self._location = (
(sr[2]-( self.cols*(self.item_size[0]+self.gut)-self.gut ))/2,
(sr[3]-( self.cols*(self.item_size[1]+self.gut)-self.gut ))/2,
)
self.captr = Rect(sr[2]/2-60, sr[3]/2-50, 120, 100)
for z in self.tiles:
pygame.draw.rect(screen,(255,0,0), (z['rect']).move(self._location),1)
pygame.draw.rect(screen, (33,31,31), self.captr, 0)
pygame.draw.circle(screen, (255,0,0), (sr[2]/2, sr[3]/2), 40, 0)
title_gfx = self.font.render('GIFOMATON', True, (255, 255, 255))
screen.blit(title_gfx, (470, 364))
def update(self):
pass
def inlet(self, events):
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if self.captr.collidepoint(event.pos):
self.manager.go_to(CaptureScene())
break
else:
for z in self.tiles:
if z['rect'].collidepoint(event.pos):
#print("go_to {}".format(z['name']))
self.manager.go_to(PlayerScene(z['name']))
break
|
sfcl/severcart | refs/heads/master | index/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
imajes/Sick-Beard | refs/heads/master | sickbeard/metadata/wdtv.py | 3 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import re
import sickbeard
import generic
from sickbeard import logger, exceptions, helpers
from sickbeard import encodingKludge as ek
from lib.tvdb_api import tvdb_api, tvdb_exceptions
from sickbeard.exceptions import ex
try:
import xml.etree.cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
class WDTVMetadata(generic.GenericMetadata):
"""
Metadata generation class for WDTV
The following file structure is used:
show_root/folder.jpg (poster)
show_root/Season ##/folder.jpg (season thumb)
show_root/Season ##/filename.ext (*)
show_root/Season ##/filename.metathumb (episode thumb)
show_root/Season ##/filename.xml (episode metadata)
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
fanart,
poster,
banner,
episode_thumbnails,
season_posters,
season_banners,
season_all_poster,
season_all_banner)
self.name = 'WDTV'
self._ep_nfo_extension = 'xml'
self.poster_name = "folder.jpg"
# web-ui metadata template
self.eg_show_metadata = "<i>not supported</i>"
self.eg_episode_metadata = "Season##\\<i>filename</i>.xml"
self.eg_fanart = "<i>not supported</i>"
self.eg_poster = "folder.jpg"
self.eg_banner = "<i>not supported</i>"
self.eg_episode_thumbnails = "Season##\\<i>filename</i>.metathumb"
self.eg_season_posters = "Season##\\folder.jpg"
self.eg_season_banners = "<i>not supported</i>"
self.eg_season_all_poster = "<i>not supported</i>"
self.eg_season_all_banner = "<i>not supported</i>"
# Override with empty methods for unsupported features
def retrieveShowMetadata(self, folder):
# no show metadata generated, we abort this lookup function
return (None, None)
def create_show_metadata(self, show_obj):
pass
def get_show_file_path(self, show_obj):
pass
def create_fanart(self, show_obj):
pass
def create_banner(self, show_obj):
pass
def create_season_banners(self, show_obj):
pass
def create_season_all_poster(self, show_obj):
pass
def create_season_all_banner(self, show_obj):
pass
def get_episode_thumb_path(self, ep_obj):
"""
Returns the path where the episode thumbnail should be stored. Defaults to
the same path as the episode file but with a .metathumb extension.
ep_obj: a TVEpisode instance for which to create the thumbnail
"""
if ek.ek(os.path.isfile, ep_obj.location):
tbn_filename = helpers.replaceExtension(ep_obj.location, 'metathumb')
else:
return None
return tbn_filename
def get_season_poster_path(self, show_obj, season):
"""
Season thumbs for WDTV go in Show Dir/Season X/folder.jpg
If no season folder exists, None is returned
"""
dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))]
season_dir_regex = '^Season\s+(\d+)$'
season_dir = None
for cur_dir in dir_list:
if season == 0 and cur_dir == "Specials":
season_dir = cur_dir
break
match = re.match(season_dir_regex, cur_dir, re.I)
if not match:
continue
cur_season = int(match.group(1))
if cur_season == season:
season_dir = cur_dir
break
if not season_dir:
logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG)
return None
logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG)
return ek.ek(os.path.join, show_obj.location, season_dir, 'folder.jpg')
def _ep_data(self, ep_obj):
"""
Creates an elementTree XML structure for a WDTV style episode.xml
and returns the resulting data object.
ep_obj: a TVShow instance to create the NFO for
"""
eps_to_write = [ep_obj] + ep_obj.relatedEps
tvdb_lang = ep_obj.show.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms)
myShow = t[ep_obj.show.tvdbid]
except tvdb_exceptions.tvdb_shownotfound, e:
raise exceptions.ShowNotFoundException(e.message)
except tvdb_exceptions.tvdb_error, e:
logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + ex(e), logger.ERROR)
return False
rootNode = etree.Element("details")
# write an WDTV XML containing info for all matching episodes
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound):
logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?")
return None
if myEp["firstaired"] == None and ep_obj.season == 0:
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if myEp["episodename"] == None or myEp["firstaired"] == None:
return None
if len(eps_to_write) > 1:
episode = etree.SubElement(rootNode, "details")
else:
episode = rootNode
# TODO: get right EpisodeID
episodeID = etree.SubElement(episode, "id")
episodeID.text = str(curEpToWrite.tvdbid)
title = etree.SubElement(episode, "title")
title.text = ep_obj.prettyName()
seriesName = etree.SubElement(episode, "series_name")
if myShow["seriesname"] != None:
seriesName.text = myShow["seriesname"]
episodeName = etree.SubElement(episode, "episode_name")
if curEpToWrite.name != None:
episodeName.text = curEpToWrite.name
seasonNumber = etree.SubElement(episode, "season_number")
seasonNumber.text = str(curEpToWrite.season)
episodeNum = etree.SubElement(episode, "episode_number")
episodeNum.text = str(curEpToWrite.episode)
firstAired = etree.SubElement(episode, "firstaired")
if curEpToWrite.airdate != datetime.date.fromordinal(1):
firstAired.text = str(curEpToWrite.airdate)
year = etree.SubElement(episode, "year")
if myShow["firstaired"] != None:
try:
year_text = str(datetime.datetime.strptime(myShow["firstaired"], '%Y-%m-%d').year)
if year_text:
year.text = year_text
except:
pass
runtime = etree.SubElement(episode, "runtime")
if curEpToWrite.season != 0:
if myShow["runtime"] != None:
runtime.text = myShow["runtime"]
genre = etree.SubElement(episode, "genre")
if myShow["genre"] != None:
genre.text = " / ".join([x.strip() for x in myShow["genre"].split('|') if x and x.strip()])
director = etree.SubElement(episode, "director")
director_text = myEp['director']
if director_text != None:
director.text = director_text
if myShow["_actors"] != None:
for actor in myShow["_actors"]:
cur_actor_name_text = actor['name']
if cur_actor_name_text != None and cur_actor_name_text.strip():
cur_actor = etree.SubElement(episode, "actor")
cur_actor_name = etree.SubElement(cur_actor, "name")
cur_actor_name.text = cur_actor_name_text.strip()
cur_actor_role = etree.SubElement(cur_actor, "role")
cur_actor_role_text = actor['role']
if cur_actor_role_text != None:
cur_actor_role.text = cur_actor_role_text
overview = etree.SubElement(episode, "overview")
if curEpToWrite.description != None:
overview.text = curEpToWrite.description
# Make it purdy
helpers.indentXML(rootNode)
data = etree.ElementTree(rootNode)
return data
# present a standard "interface" from the module
metadata_class = WDTVMetadata
|
sirkubax/ansible | refs/heads/devel | lib/ansible/plugins/action/copy.py | 22 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import pipes
import tempfile
from ansible import constants as C
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum
from ansible.utils.unicode import to_bytes
from ansible.parsing.vault import VaultLib
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
''' handler for file transfer operations '''
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
raw = boolean(self._task.args.get('raw', 'no'))
force = boolean(self._task.args.get('force', 'yes'))
faf = self._task.first_available_file
if (source is None and content is None and faf is None) or dest is None:
return dict(failed=True, msg="src (or content) and dest are required")
elif (source is not None or faf is not None) and content is not None:
return dict(failed=True, msg="src and content are mutually exclusive")
elif content is not None and dest is not None and dest.endswith("/"):
return dict(failed=True, msg="dest must be a file if content is defined")
# Check if the source ends with a "/"
source_trailing_slash = False
if source:
source_trailing_slash = self._connection._shell.path_has_trailing_slash(source)
# Define content_tempfile in case we set it after finding content populated.
content_tempfile = None
# If content is defined make a temp file and write the content into it.
if content is not None:
try:
# If content comes to us as a dict it should be decoded json.
# We need to encode it back into a string to write it out.
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
return dict(failed=True, msg="could not write content temp file: %s" % err)
# if we have first_available_file in our vars
# look up the files and use the first one we find as src
elif faf:
source = self._get_first_available_file(faf, task_vars.get('_original_file', None))
if source is None:
return dict(failed=True, msg="could not find src in first_available_file list")
else:
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
else:
source = self._loader.path_dwim(source)
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = []
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(source):
# Get the amount of spaces to remove to get the relative path.
if source_trailing_slash:
sz = len(source) + 1
else:
sz = len(source.rsplit('/', 1)[0]) + 1
# Walk the directory and append the file tuples to source_files.
for base_path, sub_folders, files in os.walk(source):
for file in files:
full_path = os.path.join(base_path, file)
rel_path = full_path[sz:]
source_files.append((full_path, rel_path))
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = self._connection._shell.join_path(dest, '')
else:
source_files.append((source, os.path.basename(source)))
changed = False
diffs = []
module_result = {"changed": False}
# A register for if we executed a module.
# Used to cut down on command calls when not recursive.
module_executed = False
# Tell _execute_module to delete the file if there is one file.
delete_remote_tmp = (len(source_files) == 1)
# If this is a recursive action create a tmp path that we can share as the _exec_module create is too late.
if not delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
# expand any user home dir specifier
dest = self._remote_expand_user(dest, tmp)
for source_full, source_rel in source_files:
# Generate a hash of the local file.
local_checksum = checksum(source_full)
# If local_checksum is not defined we can't find the file so we should fail out.
if local_checksum is None:
return dict(failed=True, msg="could not find src=%s" % source_full)
# This is kind of optimization - if user told us destination is
# dir, do path manipulation right away, otherwise we still check
# for dest being a dir via remote call below.
if self._connection._shell.path_has_trailing_slash(dest):
dest_file = self._connection._shell.join_path(dest, source_rel)
else:
dest_file = self._connection._shell.join_path(dest)
# Attempt to get the remote checksum
remote_checksum = self._remote_checksum(tmp, dest_file)
if remote_checksum == '3':
# The remote_checksum was executed on a directory.
if content is not None:
# If source was defined as content remove the temporary file and fail out.
self._remove_tempfile_if_content_defined(content, content_tempfile)
return dict(failed=True, msg="can not use content with a dir as dest")
else:
# Append the relative source location to the destination and retry remote_checksum
dest_file = self._connection._shell.join_path(dest, source_rel)
remote_checksum = self._remote_checksum(tmp, dest_file)
if remote_checksum != '1' and not force:
# remote_file does not exist so continue to next iteration.
continue
if local_checksum != remote_checksum:
# The checksums don't match and we will change or error out.
changed = True
# Create a tmp path if missing only if this is not recursive.
# If this is recursive we already have a tmp path.
if delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
if self._play_context.diff and not raw:
diffs.append(self._get_diff_data(tmp, dest_file, source_full, task_vars))
if self._play_context.check_mode:
self._remove_tempfile_if_content_defined(content, content_tempfile)
changed = True
module_return = dict(changed=True)
continue
# Define a remote directory that we will copy the file to.
tmp_src = self._connection._shell.join_path(tmp, 'source')
if not raw:
self._connection.put_file(source_full, tmp_src)
else:
self._connection.put_file(source_full, dest_file)
# We have copied the file remotely and no longer require our content_tempfile
self._remove_tempfile_if_content_defined(content, content_tempfile)
# fix file permissions when the copy is done as a different user
if self._play_context.become and self._play_context.become_user != 'root':
self._remote_chmod('a+r', tmp_src, tmp)
if raw:
# Continue to next iteration if raw is defined.
continue
# Run the copy module
# src and dest here come after original and override them
# we pass dest only to make sure it includes trailing slash in case of recursive copy
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
dest=dest,
original_basename=source_rel,
)
)
module_return = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, delete_remote_tmp=delete_remote_tmp)
module_executed = True
else:
# no need to transfer the file, already correct hash, but still need to call
# the file module in case we want to change attributes
self._remove_tempfile_if_content_defined(content, content_tempfile)
if raw:
# Continue to next iteration if raw is defined.
self._remove_tmp_path(tmp)
continue
# Build temporary module_args.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=source_rel,
dest=dest,
original_basename=source_rel
)
)
# Execute the file module.
module_return = self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, delete_remote_tmp=delete_remote_tmp)
module_executed = True
if not module_return.get('checksum'):
module_return['checksum'] = local_checksum
if module_return.get('failed') == True:
return module_return
if module_return.get('changed') == True:
changed = True
# the file module returns the file path as 'path', but
# the copy module uses 'dest', so add it if it's not there
if 'path' in module_return and 'dest' not in module_return:
module_return['dest'] = module_return['path']
# Delete tmp path if we were recursive or if we did not execute a module.
if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
self._remove_tmp_path(tmp)
# TODO: Support detailed status/diff for multiple files
if module_executed and len(source_files) == 1:
result = module_return
else:
result = dict(dest=dest, src=source, changed=changed)
if len(diffs) == 1:
result['diff']=diffs[0]
return result
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
|
hachard/Cra-Magnet | refs/heads/master | flask/lib/python3.5/site-packages/openid/store/nonce.py | 180 | __all__ = [
'split',
'mkNonce',
'checkTimestamp',
]
from openid import cryptutil
from time import strptime, strftime, gmtime, time
from calendar import timegm
import string
NONCE_CHARS = string.ascii_letters + string.digits
# Keep nonces for five hours (allow five hours for the combination of
# request time and clock skew). This is probably way more than is
# necessary, but there is not much overhead in storing nonces.
SKEW = 60 * 60 * 5
time_fmt = '%Y-%m-%dT%H:%M:%SZ'
time_str_len = len('0000-00-00T00:00:00Z')
def split(nonce_string):
"""Extract a timestamp from the given nonce string
@param nonce_string: the nonce from which to extract the timestamp
@type nonce_string: str
@returns: A pair of a Unix timestamp and the salt characters
@returntype: (int, str)
@raises ValueError: if the nonce does not start with a correctly
formatted time string
"""
timestamp_str = nonce_string[:time_str_len]
try:
timestamp = timegm(strptime(timestamp_str, time_fmt))
except AssertionError: # Python 2.2
timestamp = -1
if timestamp < 0:
raise ValueError('time out of range')
return timestamp, nonce_string[time_str_len:]
def checkTimestamp(nonce_string, allowed_skew=SKEW, now=None):
"""Is the timestamp that is part of the specified nonce string
within the allowed clock-skew of the current time?
@param nonce_string: The nonce that is being checked
@type nonce_string: str
@param allowed_skew: How many seconds should be allowed for
completing the request, allowing for clock skew.
@type allowed_skew: int
@param now: The current time, as a Unix timestamp
@type now: int
@returntype: bool
@returns: Whether the timestamp is correctly formatted and within
the allowed skew of the current time.
"""
try:
stamp, _ = split(nonce_string)
except ValueError:
return False
else:
if now is None:
now = time()
# Time after which we should not use the nonce
past = now - allowed_skew
# Time that is too far in the future for us to allow
future = now + allowed_skew
# the stamp is not too far in the future and is not too far in
# the past
return past <= stamp <= future
def mkNonce(when=None):
"""Generate a nonce with the current timestamp
@param when: Unix timestamp representing the issue time of the
nonce. Defaults to the current time.
@type when: int
@returntype: str
@returns: A string that should be usable as a one-way nonce
@see: time
"""
salt = cryptutil.randomString(6, NONCE_CHARS)
if when is None:
t = gmtime()
else:
t = gmtime(when)
time_str = strftime(time_fmt, t)
return time_str + salt
|
GoogleChrome/big-rig | refs/heads/master | app/src/thirdparty/telemetry/internal/util/path.py | 6 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
# TODO(dtu): Move these functions from core.util to here.
GetBaseDir = util.GetBaseDir
GetTelemetryDir = util.GetTelemetryDir
GetUnittestDataDir = util.GetUnittestDataDir
GetChromiumSrcDir = util.GetChromiumSrcDir
AddDirToPythonPath = util.AddDirToPythonPath
GetBuildDirectories = util.GetBuildDirectories
def IsExecutable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def FindInstalledWindowsApplication(application_path):
"""Search common Windows installation directories for an application.
Args:
application_path: Path to application relative from installation location.
Returns:
A string representing the full path, or None if not found.
"""
search_paths = [os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('LOCALAPPDATA')]
search_paths += os.getenv('PATH', '').split(os.pathsep)
for search_path in search_paths:
if not search_path:
continue
path = os.path.join(search_path, application_path)
if IsExecutable(path):
return path
return None
def IsSubpath(subpath, superpath):
"""Returns True iff subpath is or is in superpath."""
subpath = os.path.realpath(subpath)
superpath = os.path.realpath(superpath)
while len(subpath) >= len(superpath):
if subpath == superpath:
return True
subpath = os.path.split(subpath)[0]
return False
def ListFiles(base_directory, should_include_dir=lambda _: True,
should_include_file=lambda _: True):
matching_files = []
for root, dirs, files in os.walk(base_directory):
dirs[:] = [dir_name for dir_name in dirs if should_include_dir(dir_name)]
matching_files += [os.path.join(root, file_name)
for file_name in files if should_include_file(file_name)]
return sorted(matching_files)
|
qiqi/fds | refs/heads/master | pascal_lite/operators/plinalg.py | 1 | import numpy as np
from numpy import *
try:
from mpi4py import MPI
except ImportError:
pass
def pQR(comm, A):
# Compute QR factorization A = Q*R. Q is assumed to be a tall matrix whose rows are distributed among processors.
Ashape = A.shape
assert Ashape[0] >= Ashape[1]
root = 0
rank = comm.Get_rank()
size = comm.Get_size()
# Compute Q1 and R1:
Q1, R1 = linalg.qr(A)
Rshape = R1.shape
assert len(Rshape) == 1 or len(Rshape) == 2
if len(Rshape) == 1:
mR = Rshape[0]
nR = 1
elif len(Rshape) == 2:
mR = Rshape[0]
nR = Rshape[1]
assert mR == nR
# Gather R1 in root processor:
sendbuf = R1
if rank == root:
recvbuf = np.empty((size, mR, nR), dtype='d')
else:
recvbuf = None
comm.Gather(sendbuf, recvbuf, root)
# Reshape recvbuf and compute Q2:
if rank == root:
R1full = recvbuf.reshape(size*mR, nR)
Q2, R = linalg.qr(R1full)
sendbuf = Q2
else:
R = np.empty((mR,nR), dtype='d')
sendbuf = None
# Broadcast R
comm.Bcast(R, root)
# Scatter Q2
recvbuf = np.empty((mR, nR), dtype='d')
comm.Scatter(sendbuf, recvbuf, root)
Q2 = recvbuf
# Compute Q
Q = dot(Q1,Q2)
S = np.diag(np.sign(np.diag(R)))
R = np.dot(S,R)
Q = np.dot(Q,S)
return Q, R
def pdot(comm, A, B):
# Compute matrix-matrix product C = A*B.
# The input data is distributed along the contraction dimensions
Ashape = A.shape
Bshape = B.shape
assert len(Ashape) == 1 or len(Ashape) == 2
if len(Ashape) == 1:
m1 = 1
n1 = Ashape[0]
elif len(Ashape) == 2:
m1 = Ashape[0]
n1 = Ashape[1]
assert len(Bshape) == 1 or len(Bshape) == 2
if len(Bshape) == 1:
m2 = Bshape[0]
n2 = 1
elif len(Bshape) == 2:
m2 = Bshape[0]
n2 = Bshape[1]
#C_local = dot(A,B)
#C_global = np.zeros((m1,n2))
C_local = (A*B).sum(-1)
C_global = np.zeros_like(C_local)
comm.Allreduce([C_local, MPI.DOUBLE], C_global, MPI.SUM)
return C_global
|
m-walters/mcmd | refs/heads/master | nn/cnn/cnn.py | 1 | import tensorflow as tf
#from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
x = tf.placeholder(tf.float32, shape=[None, 400, 3])
y_ = tf.placeholder(tf.float32, shape=[None, 5])
W_conv1 = weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
W_fc1 = weight_variable([28 * 28 * 64, 512])
b_fc1 = bias_variable([512])
h_conv2_flat = tf.reshape(h_conv2, [-1, 28*28*64])
h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([512, 10])
b_fc2 = bias_variable([10])
y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(200):
batch = mnist.train.next_batch(50)
if i % 10 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
|
pinterb/st2 | refs/heads/master | st2common/st2common/models/api/tag.py | 2 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.models.db.stormbase import TagField
class TagsHelper(object):
@staticmethod
def to_model(tags):
return [TagField(name=tag.get('name', ''), value=tag.get('value', '')) for tag in tags]
@staticmethod
def from_model(tags):
return [{'name': tag.name, 'value': tag.value} for tag in tags]
|
clarko1/Cramd | refs/heads/master | bigtable/hello/main_test.py | 3 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from main import main
TABLE_NAME_FORMAT = 'hello-bigtable-system-tests-{}'
TABLE_NAME_RANGE = 10000
def test_main(cloud_config, capsys):
table_name = TABLE_NAME_FORMAT.format(
random.randrange(TABLE_NAME_RANGE))
main(
cloud_config.project,
cloud_config.bigtable_instance,
table_name)
out, _ = capsys.readouterr()
assert 'Creating the {} table.'.format(table_name) in out
assert 'Writing some greetings to the table.' in out
assert 'Getting a single greeting by row key.' in out
assert 'Hello World!' in out
assert 'Scanning for all greetings' in out
assert 'Hello Cloud Bigtable!' in out
assert 'Deleting the {} table.'.format(table_name) in out
|
ANR-COMPASS/shesha | refs/heads/master | shesha/ao/imats.py | 1 | ## @package shesha.ao.imats
## @brief Computation implementations of interaction matrix
## @author COMPASS Team <https://github.com/ANR-COMPASS>
## @version 5.0.0
## @date 2020/05/18
## @copyright GNU Lesser General Public License
#
# This file is part of COMPASS <https://anr-compass.github.io/compass/>
#
# Copyright (C) 2011-2019 COMPASS Team <https://github.com/ANR-COMPASS>
# All rights reserved.
# Distributed under GNU - LGPL
#
# COMPASS is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser
# General Public License as published by the Free Software Foundation, either version 3 of the License,
# or any later version.
#
# COMPASS: End-to-end AO simulation tool using GPU acceleration
# The COMPASS platform was designed to meet the need of high-performance for the simulation of AO systems.
#
# The final product includes a software package for simulating all the critical subcomponents of AO,
# particularly in the context of the ELT and a real-time core based on several control approaches,
# with performances consistent with its integration into an instrument. Taking advantage of the specific
# hardware architecture of the GPU, the COMPASS tool allows to achieve adequate execution speeds to
# conduct large simulation campaigns called to the ELT.
#
# The COMPASS platform can be used to carry a wide variety of simulations to both testspecific components
# of AO of the E-ELT (such as wavefront analysis device with a pyramid or elongated Laser star), and
# various systems configurations such as multi-conjugate AO.
#
# COMPASS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with COMPASS.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.txt>.
import numpy as np # type: ignore
import time
from typing import List # Mypy checker
from tqdm import tqdm, trange
import shesha.config as conf
import shesha.constants as scons
import shesha.init.lgs_init as lgs
import shesha.util.hdf5_util as h5u
from shesha.sutra_wrap import Sensors, Dms, Rtc_FFF as Rtc
from shesha.constants import CONST
from astropy.io import fits
def imat_geom(wfs: Sensors, dms: Dms, p_wfss: List[conf.Param_wfs],
p_dms: List[conf.Param_dm], p_controller: conf.Param_controller,
meth: int = 0) -> np.ndarray:
""" Compute the interaction matrix with a geometric method
:parameters:
wfs: (Sensors) : Sensors object
dms: (Dms) : Dms object
p_wfss: (list of Param_wfs) : wfs settings
p_dms: (list of Param_dm) : dms settings
p_controller: (Param_controller) : controller settings
meth: (int) : (optional) method type (0 or 1)
"""
nwfs = p_controller.nwfs.size
ndm = p_controller.ndm.size
imat_size1 = 0
imat_size2 = 0
for dm in dms.d_dms:
dm.reset_shape()
for nw in range(nwfs):
nm = p_controller.nwfs[nw]
imat_size1 += p_wfss[nm]._nvalid * 2
for nmc in range(ndm):
nm = p_controller.ndm[nmc]
imat_size2 += p_dms[nm]._ntotact
imat_cpu = np.zeros((imat_size1, imat_size2), dtype=np.float32)
ind = 0
cc = 0
print("Doing imat geom...")
for nmc in range(ndm):
nm = p_controller.ndm[nmc]
dms.d_dms[nm].reset_shape()
for i in tqdm(range(p_dms[nm]._ntotact), desc="DM%d" % nmc):
dms.d_dms[nm].comp_oneactu(i, p_dms[nm].push4imat)
nslps = 0
for nw in range(nwfs):
n = p_controller.nwfs[nw]
wfs.d_wfs[n].d_gs.raytrace(dms, rst=1)
wfs.d_wfs[n].slopes_geom(meth)
imat_cpu[nslps:nslps + p_wfss[n]._nvalid * 2, ind] = np.array(
wfs.d_wfs[n].d_slopes)
nslps += p_wfss[n]._nvalid * 2
imat_cpu[:, ind] = imat_cpu[:, ind] / p_dms[nm].push4imat
ind = ind + 1
cc = cc + 1
dms.d_dms[nm].reset_shape()
return imat_cpu
def imat_init(ncontrol: int, rtc: Rtc, dms: Dms, p_dms: list, wfs: Sensors, p_wfss: list,
p_tel: conf.Param_tel, p_controller: conf.Param_controller, M2V=None,
dataBase: dict = {}, use_DB: bool = False) -> None:
""" Initialize and compute the interaction matrix on the GPU
:parameters:
ncontrol: (int) : controller's index
rtc: (Rtc) : Rtc object
dms: (Dms) : Dms object
p_dms: (Param_dms) : dms settings
wfs: (Sensors) : Sensors object
p_wfss: (list of Param_wfs) : wfs settings
p_tel: (Param_tel) : telescope settings
p_controller: (Param_controller) : controller settings
M2V:(np.array) : KL_matrix
dataBase:(dict): (optional) dict containing paths to files to load
use_DB:(bool) : (optional) use dataBase flag
"""
# first check if wfs is using lgs
# if so, load new lgs spot, just for imat
for i in range(len(p_wfss)):
if (p_wfss[i].gsalt > 0):
# TODO: check that
save_profile = p_wfss[i].proftype
p_wfss[i].proftype = scons.ProfType.GAUSS1
lgs.prep_lgs_prof(p_wfss[i], i, p_tel, wfs, imat=1)
if "imat" in dataBase:
imat = h5u.load_imat_from_dataBase(dataBase)
rtc.d_control[ncontrol].set_imat(imat)
else:
t0 = time.time()
if M2V is not None:
p_controller._M2V = M2V.copy()
rtc.do_imat_basis(ncontrol, dms, M2V.shape[1], M2V, p_controller.klpush)
else:
rtc.do_imat(ncontrol, dms)
print("done in %f s" % (time.time() - t0))
imat = np.array(rtc.d_control[ncontrol].d_imat)
if use_DB:
h5u.save_imat_in_dataBase(imat)
p_controller.set_imat(imat)
# Restore original profile in lgs spots
for i in range(len(p_wfss)):
if (p_wfss[i].gsalt > 0):
p_wfss[i].proftype = save_profile
lgs.prep_lgs_prof(p_wfss[i], i, p_tel, wfs)
#write imat_ts:
# loop over ts directions
# change WFS offset to direction
# do imat geom
def imat_geom_ts_multiple_direction(wfs: Sensors, dms: Dms, p_wfss: List[conf.Param_wfs],
p_dms: List[conf.Param_dm], p_geom: conf.Param_geom,
ind_TS: int, ind_dmseen: List, p_tel: conf.Param_tel,
x, y, meth: int = 0) -> np.ndarray:
""" Compute the interaction matrix with a geometric method for multiple truth sensors (with different direction)
:parameters:
wfs: (Sensors) : Sensors object
dms: (Dms) : Dms object
p_wfss: (list of Param_wfs) : wfs settings
ind_TS: (int) : index of the truth sensor in the wfs settings list
p_dms: (list of Param_dm) : dms settings
ind_DMs: (list of int) : indices of used DMs
p_controller: (Param_controller) : controller settings
meth: (int) : (optional) method type (0 or 1)
"""
if (ind_TS < 0):
ind_TS = len(p_wfss) - 1
imat_size2 = 0
print("DMS_SEEN: ", ind_dmseen)
for nm in ind_dmseen:
imat_size2 += p_dms[nm]._ntotact
imat_cpu = np.ndarray((0, imat_size2))
for i in trange(x.size, desc="TS pos"):
xpos = x[i]
ypos = y[i]
for k in ind_dmseen:
dims = p_dms[k]._n2 - p_dms[k]._n1 + 1
dim = p_geom._mpupil.shape[0]
if (dim < dims):
dim = dims
xoff = xpos * CONST.ARCSEC2RAD * \
p_dms[k].alt / p_tel.diam * p_geom.pupdiam
yoff = ypos * CONST.ARCSEC2RAD * \
p_dms[k].alt / p_tel.diam * p_geom.pupdiam
xoff = xoff + (dim - p_geom._n) / 2
yoff = yoff + (dim - p_geom._n) / 2
wfs.d_wfs[ind_TS].d_gs.remove_layer(p_dms[k].type, k)
wfs.d_wfs[ind_TS].d_gs.add_layer(p_dms[k].type, k, xoff, yoff)
imat_cpu = np.concatenate(
(imat_cpu, imat_geom_ts(wfs, dms, p_wfss, ind_TS, p_dms, ind_dmseen,
meth)), axis=0)
return imat_cpu
def imat_geom_ts(wfs: Sensors, dms: Dms, p_wfss: conf.Param_wfs, ind_TS: int,
p_dms: List[conf.Param_dm], ind_DMs: List[int],
meth: int = 0) -> np.ndarray:
""" Compute the interaction matrix with a geometric method for a single truth sensor
:parameters:
wfs: (Sensors) : Sensors object
dms: (Dms) : Dms object
p_wfss: (list of Param_wfs) : wfs settings
ind_TS: (int) : index of the truth sensor in the wfs settings list
p_dms: (list of Param_dm) : dms settings
ind_DMs: (list of int) : indices of used DMs
p_controller: (Param_controller) : controller settings
meth: (int) : (optional) method type (0 or 1)
"""
#nwfs = 1 #p_controller.nwfs.size # as parameter list of indices for wfs if several ts (only 1 ts for now)
ndm = len(ind_DMs) #p_controller.ndm.size # as parameter list of indices of used dms
imat_size1 = p_wfss[ind_TS]._nvalid * 2 # as parameter (nvalid)
imat_size2 = 0
# for nw in range(nwfs):
# nm = p_controller.nwfs[nw]
# imat_size1 += p_wfss[nm]._nvalid * 2
for dm in dms.d_dms:
dm.reset_shape()
imat_size2 = 0
for nm in ind_DMs:
imat_size2 += p_dms[nm]._ntotact
imat_cpu = np.zeros((imat_size1, imat_size2), dtype=np.float64)
ind = 0
cc = 0
for nm in tqdm(ind_DMs, desc="imat geom DM"):
dms.d_dms[nm].reset_shape()
for i in trange(p_dms[nm]._ntotact, desc="imat geom actu"):
dms.d_dms[nm].comp_oneactu(i, p_dms[nm].push4imat)
wfs.d_wfs[ind_TS].d_gs.raytrace(dms, rst=1)
wfs.d_wfs[ind_TS].slopes_geom(meth)
imat_cpu[:, ind] = np.array(wfs.d_wfs[ind_TS].d_slopes)
imat_cpu[:, ind] = imat_cpu[:, ind] / p_dms[nm].push4imat
ind = ind + 1
cc = cc + 1
dms.d_dms[nm].reset_shape()
return imat_cpu
def get_metaD(sup, TS_xpos=None, TS_ypos=None, ind_TS=-1, save_metaD=False, nControl=0):
"""Create an interaction matrix for the current simulation given TS position
:parameters:
sim : : current COMPASS simulation
TS_xpos : np.ndarray : TS position (x axis)
TS_ypos : np.ndarray : TS position (y axis)
:return:
metaD : np.ndarray :interaction matrix
"""
if (TS_xpos is None):
TS_xpos = np.array([t.xpos for t in sup.config.p_wfs_ts])
elif (isinstance(TS_xpos, list)):
TS_xpos = np.array(TS_xpos)
elif (isinstance(TS_xpos, int) or isinstance(TS_xpos, float)):
TS_xpos = np.array([TS_xpos]).astype(np.float32)
if (TS_xpos.size < 1):
TS_xpos = np.zeros((1))
if (TS_ypos is None):
TS_ypos = np.array([t.ypos for t in sup.config.p_wfs_ts])
elif (isinstance(TS_ypos, list)):
TS_ypos = np.array(TS_ypos)
elif (isinstance(TS_ypos, int) or isinstance(TS_ypos, float)):
TS_ypos = np.array([TS_ypos]).astype(np.float32)
if (TS_ypos.size < 1):
TS_ypos = np.zeros((1))
return imat_geom_ts_multiple_direction(sup.wfs._wfs, sup.dms._dms, sup.config.p_wfss,
sup.config.p_dms, sup.config.p_geom, ind_TS,
sup.config.p_controllers[nControl].ndm,
sup.config.p_tel, TS_xpos, TS_ypos)
|
teeple/pns_server | refs/heads/master | work/install/Python-2.7.4/Lib/json/encoder.py | 105 | """Implementation of JSONEncoder
"""
import re
try:
from _json import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from _json import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
#ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
INFINITY = float('inf')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u{0:04x}'.format(n)
#return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
#return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If *ensure_ascii* is true (the default), all non-ASCII
characters in the output are escaped with \uXXXX sequences,
and the results are str instances consisting of ASCII
characters only. If ensure_ascii is False, a result may be a
unicode instance. This usually happens if the input contains
unicode strings or the *encoding* parameter is used.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation. Since the default
item separator is ', ', the output might include trailing
whitespace when indent is specified. You can use
separators=(',', ': ') to avoid this.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if (_one_shot and c_make_encoder is not None
and self.indent is None and not self.sort_keys):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
|
nkhoit/dcss.py | refs/heads/master | dcss/connection.py | 1 | import paramiko
import time
import pexpect
import datetime
import logging
log = logging.getLogger(__name__)
SPLITTER = '`~`'
COMMAND = 'BOT_CMD:'
UTF8 = 'utf-8'
class LocalConnection():
def __init__(self, playerName):
self.isWaitingForResponse = False
self.process = None
self.delay = 0.25
self.validConnection = False
self.lastOutput = ''
self.playerName = playerName
def connect(self):
self.process = pexpect.spawn(
"crawl",
timeout=self.delay)
self.validConnection = self.process.isalive()
log.info("LocalConnection connected:" + str(self.validConnection))
return self.validConnection
def crawl_login(self):
# 'logging in' in this case is typing out the player's name
# and either starting a new game, or loading the old one
log.info("LocalConnection logging in with name: " + self.playerName)
# get_output ensures the program has fully loaded before continuing
self.get_output()
self.send_command(self.playerName, True)
# workaround for a weird bug?
self.process.setwinsize(24, 80)
# \x12 is Ctrl+R (redraw)
return self.send_command('\x12', False)
def disconnect(self):
self.process.terminate()
self.validConnection = False
log.info("LocalConnection disconnecting")
def get_output(self):
done = False
onceMore = True
output = ''
while not done:
match = self.process.expect(['\\x1b\[40m', pexpect.TIMEOUT])
if match == 0:
buf = self.process.before
if isinstance(buf, bytes):
buf = buf.decode()
output += buf
onceMore = True
elif match == 1:
if not onceMore:
done = True
else:
onceMore = False
self.lastOutput = output
log.debug("LocalConnection received: " + repr(self.lastOutput))
return output
def send_command(self, command, addNewline=False):
newlineLog = ""
if addNewline:
newlineLog = "\\r"
log.debug(
"LocalConnection sending command: " +
repr(command) +
newlineLog)
if(command):
self.isWaitingForResponse = True
self.process.send(command)
if(addNewline):
self.isWaitingForResponse = True
self.process.send("\r")
time.sleep(self.delay)
return self.get_output()
class RemoteConnection():
def __init__(self, crawlLoginName, crawlLoginPassword):
super().__init__()
self.isWaitingForResponse = False
self.connectionString = "crawl.akrasiac.org"
self.sshUsername = "joshua"
self.sshPassword = "joshua"
self.delay = 0.5
self.username = crawlLoginName
self.password = crawlLoginPassword
self.sshClient = None
self.sshChannel = None
self.bufferSize = 4096
self.validConnection = False
self.lastOutput = ''
def connect(self):
self.sshClient = paramiko.SSHClient()
self.sshClient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.sshClient.connect(
self.connectionString,
username=self.sshUsername,
password=self.sshPassword)
self.sshChannel = self.sshClient.invoke_shell()
# TODO:figure a way to verify connecting was successful
self.validConnection = True
log.info("RemoteConnection connected: " + str(self.validConnection))
return self.validConnection
def crawl_login(self):
# navigate the crawl login commands
self.send_command('L', False)
self.send_command(self.username, True)
self.send_command(self.password, True)
# select trunk branch
self.send_command('T', False)
result = self.send_command('P', False)
log.info("RemoteConnection logged in")
return result
def disconnect(self):
if self.sshClient:
self.sshClient.close()
self.validConnection = False
log.info("RemoteConnection disconnected")
def get_output(self):
output = ''
while(self.sshChannel.recv_ready()):
if(self.isWaitingForResponse):
self.isWaitingForResponse = False
buffer = self.sshChannel.recv(self.bufferSize)
if(len(buffer) != 0):
output += buffer.decode(UTF8)
self.lastOutput = output
log.debug("RemoteConnection received: " + repr(self.lastOutput))
return output
def send_command(self, command, addNewline):
log.debug("RemoteConnection sending command: " + str(command))
if(command):
self.isWaitingForResponse = True
self.sshChannel.send(command)
time.sleep(self.delay)
if(addNewline):
self.isWaitingForResponse = True
self.get_output()
self.sshChannel.send('\n')
time.sleep(self.delay)
return self.get_output()
|
GRArmstrong/invenio-inspire-ops | refs/heads/prod | modules/miscutil/lib/upgrades/invenio_2013_03_29_idxINDEX_stopwords_update.py | 19 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.dbquery import run_sql
depends_on = ['invenio_2013_03_28_idxINDEX_tokenizer']
def info():
return "Updates column remove_stopwords of idxINDEX table with path to default 'stopwords' file if necessary"
def do_upgrade():
#different stopwords file for every index:
#need to update default stopwords path for every index
from invenio.config import CFG_BIBINDEX_REMOVE_STOPWORDS
if CFG_BIBINDEX_REMOVE_STOPWORDS:
if CFG_BIBINDEX_REMOVE_STOPWORDS == 1:
run_sql("UPDATE idxINDEX SET remove_stopwords='stopwords.kb'")
def estimate():
return 1
def pre_upgrade():
pass
def post_upgrade():
print 'NOTE: please double check your new index stopword settings in BibIndex Admin Interface.'
|
emfcamp/micropython | refs/heads/tilda-master | tests/inlineasm/asmrettype.py | 54 | # test return type of inline asm
@micropython.asm_thumb
def ret_obj(r0) -> object:
pass
ret_obj(print)(1)
@micropython.asm_thumb
def ret_bool(r0) -> bool:
pass
print(ret_bool(0), ret_bool(1))
@micropython.asm_thumb
def ret_int(r0) -> int:
lsl(r0, r0, 29)
print(ret_int(0), hex(ret_int(1)), hex(ret_int(2)), hex(ret_int(4)))
@micropython.asm_thumb
def ret_uint(r0) -> uint:
lsl(r0, r0, 29)
print(ret_uint(0), hex(ret_uint(1)), hex(ret_uint(2)), hex(ret_uint(4)))
|
mkaluza/external_chromium_org | refs/heads/kk44 | third_party/tlslite/tlslite/integration/XMLRPCTransport.py | 87 | """TLS Lite + xmlrpclib."""
import xmlrpclib
import httplib
from tlslite.integration.HTTPTLSConnection import HTTPTLSConnection
from tlslite.integration.ClientHelper import ClientHelper
class XMLRPCTransport(xmlrpclib.Transport, ClientHelper):
"""Handles an HTTPS transaction to an XML-RPC server."""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new XMLRPCTransport.
An instance of this class can be passed to L{xmlrpclib.ServerProxy}
to use TLS with XML-RPC calls::
from tlslite.api import XMLRPCTransport
from xmlrpclib import ServerProxy
transport = XMLRPCTransport(user="alice", password="abra123")
server = ServerProxy("https://localhost", transport)
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Thus you should be prepared to handle TLS-specific
exceptions when calling methods of L{xmlrpclib.ServerProxy}. See the
client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
def make_connection(self, host):
# create a HTTPS connection object from a host descriptor
host, extra_headers, x509 = self.get_host_info(host)
http = HTTPTLSConnection(host, None,
self.username, self.password,
self.sharedKey,
self.certChain, self.privateKey,
self.checker.cryptoID,
self.checker.protocol,
self.checker.x509Fingerprint,
self.checker.x509TrustList,
self.checker.x509CommonName,
self.settings)
http2 = httplib.HTTP()
http2._setup(http)
return http2 |
CERNDocumentServer/invenio | refs/heads/prod | modules/miscutil/lib/memoiseutils_unit_tests.py | 3 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Unit tests for the memoise facility.
"""
from invenio.testutils import InvenioTestCase
from invenio.testutils import make_test_suite, run_test_suite
from invenio.memoiseutils import Memoise
class MemoiseTest(InvenioTestCase):
"""Unit test cases for Memoise."""
def test_memoise_fib(self):
"""memoiseutils - test fib() memoisation"""
from invenio.bibtaskex import fib
fib_memoised = Memoise(fib)
self.assertEqual(fib(17), fib_memoised(17))
TEST_SUITE = make_test_suite(MemoiseTest, )
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
murarivarma/candy_crush | refs/heads/gh-pages | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
jayme-github/CouchPotatoServer | refs/heads/master | couchpotato/core/notifications/base.py | 3 | from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.request import jsonified
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
log = CPLog(__name__)
class Notification(Plugin):
default_title = Env.get('appname')
test_message = 'ZOMG Lazors Pewpewpew!'
listen_to = [
'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated',
]
dont_listen_to = []
def __init__(self):
addEvent('notify.%s' % self.getName().lower(), self.notify)
addApiView(self.testNotifyName(), self.test)
# Attach listeners
for listener in self.listen_to:
if not listener in self.dont_listen_to:
addEvent(listener, self.createNotifyHandler(listener))
def createNotifyHandler(self, listener):
def notify(message = None, group = {}, data = None):
if not self.conf('on_snatch', default = True) and listener == 'movie.snatched':
return
return self.notify(message = message, data = data if data else group, listener = listener)
return notify
def notify(self, message = '', data = {}, listener = None):
pass
def test(self):
test_type = self.testNotifyName()
log.info('Sending test to %s', test_type)
success = self.notify(
message = self.test_message,
data = {},
listener = 'test'
)
return jsonified({'success': success})
def testNotifyName(self):
return 'notify.%s.test' % self.getName().lower()
|
flavour/Turkey | refs/heads/master | modules/unit_tests/s3/s3datatable.py | 6 | # -*- coding: utf-8 -*-
#
# S3DataTable Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3/s3datatable.py
#
import unittest
import datetime
from gluon import *
from gluon.storage import Storage
from s3.s3data import S3DataTable
# =============================================================================
class S3DataTableTests(unittest.TestCase):
# -------------------------------------------------------------------------
def setUp(self):
"""
Set up the list of fields each time since the call to S3DataTables
could change it.
"""
current.auth.override = True
resource = current.s3db.resource("org_office")
list_fields = ["id",
"organisation_id$name",
"organisation_id$address",
"name",
"office_type_id",
"location_id$L0",
"location_id$L1",
"location_id$L2",
"location_id$L3",
"phone1",
"email"
]
self.resource = resource
self.list_fields = list_fields
data = resource.select(list_fields)
self.data = data["rows"]
self.rfields = data["rfields"]
# -------------------------------------------------------------------------
def testDataTableInitialOrderby(self):
""" Test the initial orderby for different types of input. """
table = self.resource.table
dt = S3DataTable(self.rfields, self.data)
expected = [[1, "asc"]]
actual = dt.orderby
self.assertEqual(expected, actual)
dt = S3DataTable(self.rfields, self.data,
orderby=table.name)
expected = [[3, "asc"]]
actual = dt.orderby
self.assertEqual(expected, actual)
dt = S3DataTable(self.rfields, self.data,
orderby=~table.name)
expected = [[3, "desc"]]
actual = dt.orderby
self.assertEqual(expected, actual)
dt = S3DataTable(self.rfields, self.data,
orderby=table.office_type_id | table.name)
expected = [[4, "asc"], [3, "asc"]]
actual = dt.orderby
self.assertEqual(expected, actual)
dt = S3DataTable(self.rfields, self.data,
orderby=~table.office_type_id | table.name)
expected = [[4, "desc"], [3, "asc"]]
actual = dt.orderby
self.assertEqual(expected, actual)
otable = current.s3db.org_organisation
dt = S3DataTable(self.rfields, self.data,
orderby=otable.name | ~table.office_type_id | table.name)
expected = [[1, "asc"], [4, "desc"], [3, "asc"]]
actual = dt.orderby
self.assertEqual(expected, actual)
# -------------------------------------------------------------------------
def tearDown(cls):
current.auth.override = False
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
S3DataTableTests,
)
# END ======================================================================== |
wimnat/ansible-modules-extras | refs/heads/devel | cloud/vmware/vmware_vm_facts.py | 75 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_vm_facts
short_description: Return basic facts pertaining to a vSphere virtual machine guest
description:
- Return basic facts pertaining to a vSphere virtual machine guest
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Gather all registered virtual machines
local_action:
module: vmware_vm_facts
hostname: esxi_or_vcenter_ip_or_hostname
username: username
password: password
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getallvms.py
def get_all_virtual_machines(content):
virtual_machines = get_all_objs(content, [vim.VirtualMachine])
_virtual_machines = {}
for vm in virtual_machines:
_ip_address = ""
summary = vm.summary
if summary.guest is not None:
_ip_address = summary.guest.ipAddress
if _ip_address is None:
_ip_address = ""
virtual_machine = {
summary.config.name: {
"guest_fullname": summary.config.guestFullName,
"power_state": summary.runtime.powerState,
"ip_address": _ip_address
}
}
_virtual_machines.update(virtual_machine)
return _virtual_machines
def main():
argument_spec = vmware_argument_spec()
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
try:
content = connect_to_api(module)
_virtual_machines = get_all_virtual_machines(content)
module.exit_json(changed=False, virtual_machines=_virtual_machines)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
elijah513/django | refs/heads/master | django/contrib/gis/gdal/layer.py | 477 | from ctypes import byref, c_double
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import (
GDALException, OGRIndexError, SRSException,
)
from django.contrib.gis.gdal.feature import Feature
from django.contrib.gis.gdal.field import OGRFieldTypes
from django.contrib.gis.gdal.geometries import OGRGeometry
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.prototypes import (
ds as capi, geom as geom_api, srs as srs_api,
)
from django.contrib.gis.gdal.srs import SpatialReference
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_L_* routines are relevant here.
class Layer(GDALBase):
"A class that wraps an OGR Layer, needs to be instantiated from a DataSource object."
def __init__(self, layer_ptr, ds):
"""
Initializes on an OGR C pointer to the Layer and the `DataSource` object
that owns this layer. The `DataSource` object is required so that a
reference to it is kept with this Layer. This prevents garbage
collection of the `DataSource` while this Layer is still active.
"""
if not layer_ptr:
raise GDALException('Cannot create Layer, invalid pointer given')
self.ptr = layer_ptr
self._ds = ds
self._ldefn = capi.get_layer_defn(self._ptr)
# Does the Layer support random reading?
self._random_read = self.test_capability(b'RandomRead')
def __getitem__(self, index):
"Gets the Feature at the specified index."
if isinstance(index, six.integer_types):
# An integer index was given -- we cannot do a check based on the
# number of features because the beginning and ending feature IDs
# are not guaranteed to be 0 and len(layer)-1, respectively.
if index < 0:
raise OGRIndexError('Negative indices are not allowed on OGR Layers.')
return self._make_feature(index)
elif isinstance(index, slice):
# A slice was given
start, stop, stride = index.indices(self.num_feat)
return [self._make_feature(fid) for fid in range(start, stop, stride)]
else:
raise TypeError('Integers and slices may only be used when indexing OGR Layers.')
def __iter__(self):
"Iterates over each Feature in the Layer."
# ResetReading() must be called before iteration is to begin.
capi.reset_reading(self._ptr)
for i in range(self.num_feat):
yield Feature(capi.get_next_feature(self._ptr), self)
def __len__(self):
"The length is the number of features."
return self.num_feat
def __str__(self):
"The string name of the layer."
return self.name
def _make_feature(self, feat_id):
"""
Helper routine for __getitem__ that constructs a Feature from the given
Feature ID. If the OGR Layer does not support random-access reading,
then each feature of the layer will be incremented through until the
a Feature is found matching the given feature ID.
"""
if self._random_read:
# If the Layer supports random reading, return.
try:
return Feature(capi.get_feature(self.ptr, feat_id), self)
except GDALException:
pass
else:
# Random access isn't supported, have to increment through
# each feature until the given feature ID is encountered.
for feat in self:
if feat.fid == feat_id:
return feat
# Should have returned a Feature, raise an OGRIndexError.
raise OGRIndexError('Invalid feature id: %s.' % feat_id)
# #### Layer properties ####
@property
def extent(self):
"Returns the extent (an Envelope) of this layer."
env = OGREnvelope()
capi.get_extent(self.ptr, byref(env), 1)
return Envelope(env)
@property
def name(self):
"Returns the name of this layer in the Data Source."
name = capi.get_fd_name(self._ldefn)
return force_text(name, self._ds.encoding, strings_only=True)
@property
def num_feat(self, force=1):
"Returns the number of features in the Layer."
return capi.get_feature_count(self.ptr, force)
@property
def num_fields(self):
"Returns the number of fields in the Layer."
return capi.get_field_count(self._ldefn)
@property
def geom_type(self):
"Returns the geometry type (OGRGeomType) of the Layer."
return OGRGeomType(capi.get_fd_geom_type(self._ldefn))
@property
def srs(self):
"Returns the Spatial Reference used in this Layer."
try:
ptr = capi.get_layer_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(ptr))
except SRSException:
return None
@property
def fields(self):
"""
Returns a list of string names corresponding to each of the Fields
available in this Layer.
"""
return [force_text(capi.get_field_name(capi.get_field_defn(self._ldefn, i)),
self._ds.encoding, strings_only=True)
for i in range(self.num_fields)]
@property
def field_types(self):
"""
Returns a list of the types of fields in this Layer. For example,
the list [OFTInteger, OFTReal, OFTString] would be returned for
an OGR layer that had an integer, a floating-point, and string
fields.
"""
return [OGRFieldTypes[capi.get_field_type(capi.get_field_defn(self._ldefn, i))]
for i in range(self.num_fields)]
@property
def field_widths(self):
"Returns a list of the maximum field widths for the features."
return [capi.get_field_width(capi.get_field_defn(self._ldefn, i))
for i in range(self.num_fields)]
@property
def field_precisions(self):
"Returns the field precisions for the features."
return [capi.get_field_precision(capi.get_field_defn(self._ldefn, i))
for i in range(self.num_fields)]
def _get_spatial_filter(self):
try:
return OGRGeometry(geom_api.clone_geom(capi.get_spatial_filter(self.ptr)))
except GDALException:
return None
def _set_spatial_filter(self, filter):
if isinstance(filter, OGRGeometry):
capi.set_spatial_filter(self.ptr, filter.ptr)
elif isinstance(filter, (tuple, list)):
if not len(filter) == 4:
raise ValueError('Spatial filter list/tuple must have 4 elements.')
# Map c_double onto params -- if a bad type is passed in it
# will be caught here.
xmin, ymin, xmax, ymax = map(c_double, filter)
capi.set_spatial_filter_rect(self.ptr, xmin, ymin, xmax, ymax)
elif filter is None:
capi.set_spatial_filter(self.ptr, None)
else:
raise TypeError('Spatial filter must be either an OGRGeometry instance, a 4-tuple, or None.')
spatial_filter = property(_get_spatial_filter, _set_spatial_filter)
# #### Layer Methods ####
def get_fields(self, field_name):
"""
Returns a list containing the given field name for every Feature
in the Layer.
"""
if field_name not in self.fields:
raise GDALException('invalid field name: %s' % field_name)
return [feat.get(field_name) for feat in self]
def get_geoms(self, geos=False):
"""
Returns a list containing the OGRGeometry for every Feature in
the Layer.
"""
if geos:
from django.contrib.gis.geos import GEOSGeometry
return [GEOSGeometry(feat.geom.wkb) for feat in self]
else:
return [feat.geom for feat in self]
def test_capability(self, capability):
"""
Returns a bool indicating whether the this Layer supports the given
capability (a string). Valid capability strings include:
'RandomRead', 'SequentialWrite', 'RandomWrite', 'FastSpatialFilter',
'FastFeatureCount', 'FastGetExtent', 'CreateField', 'Transactions',
'DeleteFeature', and 'FastSetNextByIndex'.
"""
return bool(capi.test_capability(self.ptr, force_bytes(capability)))
|
luiseduardohdbackup/odoo | refs/heads/8.0 | addons/sale_crm/wizard/crm_make_sale.py | 223 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_make_sale(osv.osv_memory):
""" Make sale order for crm """
_name = "crm.make.sale"
_description = "Make sales"
def _selectPartner(self, cr, uid, context=None):
"""
This function gets default value for partner_id field.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values
@return: default value of partner_id field.
"""
if context is None:
context = {}
lead_obj = self.pool.get('crm.lead')
active_id = context and context.get('active_id', False) or False
if not active_id:
return False
lead = lead_obj.read(cr, uid, [active_id], ['partner_id'], context=context)[0]
return lead['partner_id'][0] if lead['partner_id'] else False
def view_init(self, cr, uid, fields_list, context=None):
return super(crm_make_sale, self).view_init(cr, uid, fields_list, context=context)
def makeOrder(self, cr, uid, ids, context=None):
"""
This function create Quotation on given case.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm make sales' ids
@param context: A standard dictionary for contextual values
@return: Dictionary value of created sales order.
"""
# update context: if come from phonecall, default state values can make the quote crash lp:1017353
context = dict(context or {})
context.pop('default_state', False)
case_obj = self.pool.get('crm.lead')
sale_obj = self.pool.get('sale.order')
partner_obj = self.pool.get('res.partner')
data = context and context.get('active_ids', []) or []
for make in self.browse(cr, uid, ids, context=context):
partner = make.partner_id
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
fpos = partner.property_account_position and partner.property_account_position.id or False
payment_term = partner.property_payment_term and partner.property_payment_term.id or False
new_ids = []
for case in case_obj.browse(cr, uid, data, context=context):
if not partner and case.partner_id:
partner = case.partner_id
fpos = partner.property_account_position and partner.property_account_position.id or False
payment_term = partner.property_payment_term and partner.property_payment_term.id or False
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
if False in partner_addr.values():
raise osv.except_osv(_('Insufficient Data!'), _('No address(es) defined for this customer.'))
vals = {
'origin': _('Opportunity: %s') % str(case.id),
'section_id': case.section_id and case.section_id.id or False,
'categ_ids': [(6, 0, [categ_id.id for categ_id in case.categ_ids])],
'partner_id': partner.id,
'pricelist_id': pricelist,
'partner_invoice_id': partner_addr['invoice'],
'partner_shipping_id': partner_addr['delivery'],
'date_order': fields.datetime.now(),
'fiscal_position': fpos,
'payment_term':payment_term,
'note': sale_obj.get_salenote(cr, uid, [case.id], partner.id, context=context),
}
if partner.id:
vals['user_id'] = partner.user_id and partner.user_id.id or uid
new_id = sale_obj.create(cr, uid, vals, context=context)
sale_order = sale_obj.browse(cr, uid, new_id, context=context)
case_obj.write(cr, uid, [case.id], {'ref': 'sale.order,%s' % new_id})
new_ids.append(new_id)
message = _("Opportunity has been <b>converted</b> to the quotation <em>%s</em>.") % (sale_order.name)
case.message_post(body=message)
if make.close:
case_obj.case_mark_won(cr, uid, data, context=context)
if not new_ids:
return {'type': 'ir.actions.act_window_close'}
if len(new_ids)<=1:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'name' : _('Quotation'),
'res_id': new_ids and new_ids[0]
}
else:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'name' : _('Quotation'),
'res_id': new_ids
}
return value
_columns = {
'partner_id': fields.many2one('res.partner', 'Customer', required=True, domain=[('customer','=',True)]),
'close': fields.boolean('Mark Won', help='Check this to close the opportunity after having created the sales order.'),
}
_defaults = {
'close': False,
'partner_id': _selectPartner,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nthiep/global-ssh-server | refs/heads/master | lib/python2.7/posixpath.py | 4 | /usr/lib/python2.7/posixpath.py |
RazvanRotari/iaP | refs/heads/master | iaprs/rest_service/rest_service/wsgi.py | 1 | """
WSGI config for rest_service project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rest_service.settings")
application = get_wsgi_application()
|
4bic-attic/grano | refs/heads/master | grano/alembic/versions/38c7982f4160_add_degree_denormalizations.py | 4 | """add degree denormalizations
Revision ID: 38c7982f4160
Revises: 59d7b4f94cdf
Create Date: 2014-09-11 20:32:37.987989
"""
# revision identifiers, used by Alembic.
revision = '38c7982f4160'
down_revision = '59d7b4f94cdf'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(u'grano_entity', sa.Column('degree_in', sa.Integer(), nullable=True))
op.add_column(u'grano_entity', sa.Column('degree_out', sa.Integer(), nullable=True))
op.add_column(u'grano_entity', sa.Column('degree', sa.Integer(), nullable=True))
def downgrade():
op.drop_column(u'grano_entity', 'degree_out')
op.drop_column(u'grano_entity', 'degree_in')
op.drop_column(u'grano_entity', 'degree')
|
Lvl4Sword/Acedia | refs/heads/master | sloth/tests/test_userinput.py | 2 | # Copyright 2015, 2016 Scott King
#
# This file is part of Sloth.
#
# Sloth is free software: you can redistribute it and/or modify
# it under the terms of the Affero GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sloth is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Affero GNU General Public License for more details.
#
# You should have received a copy of the Affero GNU General Public License
# along with Sloth. If not, see <http://www.gnu.org/licenses/>.
#
import unittest
from unittest.mock import patch
class BaseConverterTestCase(unittest.TestCase):
"""
Base class for converter tests. Subclass and override the
`get_converter` method that returns the converter to use in the
`assertConversion*` methods.
"""
def get_converter(self):
raise NotImplementedError
def assertConversionFails(self, raw_value, **kwargs):
"""
Verify the converter raises `ConversionFailed` when called
with the string value.
"""
# guard against bad test values
if type(raw_value) is not str:
raise TypeError('Expected str, got {0}'.format(type(raw_value)))
from sloth.userinput import ConversionFailed
converter = self.get_converter()
with self.assertRaises(ConversionFailed):
converter(raw_value, **kwargs)
def assertConversionResultEquals(self, raw_value, expected_result,
**kwargs):
"""
Verify the converter does not raise `ConversionFailed` when
called with the string value, and that the result matches
`expected_result` and is the same type.
"""
from sloth.userinput import ConversionFailed
converter = self.get_converter()
try:
result = converter(raw_value, **kwargs)
except ConversionFailed as e:
msgfmt = (
'Raw value {raw_value!r} caused unexpected ConversionFailed '
'with failure message {e.failure_message!r}'
)
raise AssertionError(msgfmt.format(raw_value=raw_value, e=e))
self.assertEqual(result, expected_result)
self.assertIs(type(result), type(expected_result))
class PrompterTestCase(BaseConverterTestCase):
def converter(self, raw_value):
return raw_value
def raisesException(*args, **kwargs):
from sloth.userinput import ConversionFailed
raise ConversionFailed('ConversionFailed')
@patch('builtins.input', return_value='passes')
def test_prompt_exception_works(self, input):
from sloth.userinput import Prompter
prompt = Prompter('testing: ', self.raisesException)
prompt.running = False
printed = []
prompt.prompt(_print=printed.append)
self.assertEqual(printed, ['ConversionFailed'])
@patch('builtins.input', return_value='passes')
def test_prompt_works(self, input):
from sloth.userinput import Prompter
prompt = Prompter('testing: ', self.converter)
self.assertEqual(prompt.prompt(), 'passes')
def test_prompter_works(self):
from sloth.userinput import Prompter
text = 'Test'
prompter = Prompter(text, self.converter)
self.assertEqual(prompter.prompt_text, 'Test: ')
class IntegerConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import integer_converter
return integer_converter
def test_integers_works(self):
for i in [1, 50, 1234]:
intstr = str(i)
self.assertConversionResultEquals(intstr, i)
def test_nonintegers_fails(self):
self.assertConversionFails('i am certainly not an int')
self.assertConversionFails('0xBEEF')
class FirstNameConverterTestCase(BaseConverterTestCase):
def get_converter(self):
# This shows how we can get the converter from a prompter.
# This is the reason I don't really like the prompter decorator
# API, it conflates prompting with validation and conversion.
# We can do better.
from sloth.userinput import first_name_prompter
return first_name_prompter.convert
def test_leading_and_trailing_whitespace_stripped_works(self):
self.assertConversionResultEquals(' myname ', 'Myname')
def test_normal_name_works(self):
self.assertConversionResultEquals('a', 'A')
self.assertConversionResultEquals('alice', 'Alice')
self.assertConversionResultEquals('aaaaaa', 'Aaaaaa')
def test_name_just_spaces_fails(self):
self.assertConversionFails('')
self.assertConversionFails(' ')
def test_name_longer_than_20_fails(self):
self.assertConversionFails('A' * 21)
class AgeConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import age_prompter
return age_prompter.convert
def test_correct_birthday_works(self):
self.assertConversionResultEquals('1999-12-31', '1999-12-31')
def test_day_does_not_exist_fails(self):
self.assertConversionFails('1999-11-31')
def test_incorrect_format_fails(self):
self.assertConversionFails('12-31-1999')
self.assertConversionFails('31-12-1999')
def test_random_fails(self):
self.assertConversionFails('lol')
class SexConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import sex_prompter
return sex_prompter.convert
def test_male_works(self):
self.assertConversionResultEquals('m', 'M')
self.assertConversionResultEquals('M', 'M')
def test_female_works(self):
self.assertConversionResultEquals('f', 'F')
self.assertConversionResultEquals('F', 'F')
def test_random_fails(self):
self.assertConversionFails('123')
self.assertConversionFails('foo')
self.assertConversionFails('bar')
class GoalConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import goal_prompter
return goal_prompter.convert
def test_valid_works(self):
self.assertConversionResultEquals('1', 1)
self.assertConversionResultEquals('2', 2)
self.assertConversionResultEquals('3', 3)
self.assertConversionResultEquals('4', 4)
def test_random_fails(self):
self.assertConversionFails('123')
self.assertConversionFails('foo')
self.assertConversionFails('bar')
class MeasurementSystemConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import measurement_system_prompter
return measurement_system_prompter.convert
def test_metric_works(self):
self.assertConversionResultEquals('m', 'M')
self.assertConversionResultEquals('M', 'M')
def test_imperial_works(self):
self.assertConversionResultEquals('i', 'I')
self.assertConversionResultEquals('I', 'I')
def test_invalid_fails(self):
self.assertConversionFails('123')
self.assertConversionFails('foo')
self.assertConversionFails('imperial')
self.assertConversionFails('metric')
class ImperialHeightConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import imperial_body_height_prompter
return imperial_body_height_prompter.convert
def test_twenty_and_lower_fails(self):
self.assertConversionFails('19')
self.assertConversionFails('20')
def test_one_hundred_and_eight_and_higher_fails(self):
self.assertConversionFails('108')
self.assertConversionFails('109')
def test_valid_works(self):
self.assertConversionResultEquals('50', 50)
def test_invalid_fails(self):
self.assertConversionFails('a')
class ImperialWeightConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import imperial_body_weight_prompter
return imperial_body_weight_prompter.convert
def test_fifty_and_under_fails(self):
self.assertConversionFails('49')
self.assertConversionFails('50')
def test_one_thousand_and_higher_fails(self):
self.assertConversionFails('1000')
self.assertConversionFails('1001')
def test_valid_works(self):
self.assertConversionResultEquals('200', 200)
def test_non_float_fails(self):
self.assertConversionFails('a')
class MetricHeightConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import metric_body_height_prompter
return metric_body_height_prompter.convert
def test_zero_point_five_and_lower_fails(self):
self.assertConversionFails('0.5')
self.assertConversionFails('0.49')
def test_two_point_seven_and_higher_fails(self):
self.assertConversionFails('2.7')
self.assertConversionFails('2.71')
def test_valid_works(self):
self.assertConversionResultEquals('2.0', 2.0)
def test_non_float_fails(self):
self.assertConversionFails('a')
class MetricWeightConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import metric_body_weight_prompter
return metric_body_weight_prompter.convert
def test_twenty_two_point_six_seven_nine_and_under_fails(self):
self.assertConversionFails('22.679')
self.assertConversionFails('22.67')
def test_four_hundred_fifty_three_point_five_nine_two_and_over_fails(self):
self.assertConversionFails('453.592')
self.assertConversionFails('453.6')
def test_valid_works(self):
self.assertConversionResultEquals('200', 200.0)
def test_non_float_fails(self):
self.assertConversionFails('a')
class CardioTimeConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import cardio_time_converter
return cardio_time_converter
def test_prompter_works(self):
from sloth.userinput import cardio_time_prompter
activity = None
prompter = cardio_time_prompter(activity)
expected = 'How long did you go? (10:00/10:00:00): '
self.assertEqual(prompter.prompt_text, expected)
def test_notime_fails(self):
self.assertConversionFails('not a time')
def test_minutes_seconds_works(self):
import datetime
seconds = 23 * 60 + 45
seconds_timedelta = datetime.timedelta(0, seconds)
self.assertConversionResultEquals('23:45', seconds_timedelta)
def test_hours_minutes_seconds_works(self):
import datetime
seconds = 2 * 3600 + 49 * 60 + 34
seconds_timedelta = datetime.timedelta(0, seconds)
self.assertConversionResultEquals('02:49:34', seconds_timedelta)
def test_hours_minutes_seconds_and_more_fails(self):
self.assertConversionFails('0:1:2:3')
self.assertConversionFails('0:1:2:3:4')
def test_24_hours_and_more_fails(self):
self.assertConversionFails('23:60:00')
self.assertConversionFails('0:1440:0')
self.assertConversionFails('0:0:86400')
class CardioDistanceImperialDistanceConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import cardio_distance_imperial_converter
return cardio_distance_imperial_converter
def test_prompter_works(self):
from sloth.userinput import cardio_distance_imperial_prompter
activity = None
prompter = cardio_distance_imperial_prompter(activity)
expected = 'How many miles? (mi to km is 1.609344): '
self.assertEqual(prompter.prompt_text, expected)
def test_valid_works(self):
self.assertConversionResultEquals('10.234', 10.234, activity=None)
def test_invalid_fails(self):
self.assertConversionFails('50.1', activity=None)
def test_ridiculous_fails(self):
self.assertConversionFails('a', activity=None)
class CardioDistanceMetricDistanceConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import cardio_distance_metric_converter
return cardio_distance_metric_converter
def test_prompter_works(self):
from sloth.userinput import cardio_distance_metric_prompter
activity = None
prompter = cardio_distance_metric_prompter(activity)
expected = 'How many kilometers? (km to mi is 0.62137): '
self.assertEqual(prompter.prompt_text, expected)
def test_reasonable_works(self):
self.assertConversionResultEquals('10.234', 10.234, activity='Run')
def test_invalid_fails(self):
self.assertConversionFails('80.4674', activity='Run')
def test_non_float_fails(self):
self.assertConversionFails('F', activity='Run')
class AgilityStatTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import stats_agi_converter
return stats_agi_converter
def test_prompter_works(self):
from sloth.userinput import stats_agi_prompter
activity = [0]
prompter = stats_agi_prompter(activity)
expected = 'Agility - Your reaction time (0/10) (26 left): '
self.assertEqual(prompter.prompt_text, expected)
def test_reasonable_works(self):
self.assertConversionResultEquals('1', (1, 1), activity=26)
def test_over_10_fails(self):
self.assertConversionFails('11', activity=26)
def test_value_error_works(self):
self.assertConversionFails('c', activity=26)
class CharismaStatConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import stats_chr_converter
return stats_chr_converter
def test_prompter_works(self):
from sloth.userinput import stats_chr_prompter
activity = [0]
prompter = stats_chr_prompter(activity)
expected = 'Charisma - Influence over others (26 left): '
self.assertEqual(prompter.prompt_text, expected)
def test_reasonable_works(self):
self.assertConversionResultEquals('1', (1, 2), activity=26)
def test_back_works(self):
self.assertConversionResultEquals('b', (0, 0), activity=26)
def test_over_10_fails(self):
self.assertConversionFails('12', activity=26)
def test_value_error_works(self):
self.assertConversionFails('d', activity=26)
class DefenseTestConverterCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import stats_def_converter
return stats_def_converter
def test_prompter_works(self):
from sloth.userinput import stats_def_prompter
activity = [20]
prompter = stats_def_prompter(activity)
expected = 'Defense - How well you can take a punch (6 left): '
self.assertEqual(prompter.prompt_text, expected)
def test_not_enough_works(self):
self.assertConversionFails('1', activity=0)
def test_back_works(self):
self.assertConversionResultEquals('b', (0, 1), activity=26)
def test_reasonable_works(self):
self.assertConversionResultEquals('1', (1, 3), activity=26)
def test_over_10_fails(self):
self.assertConversionFails('13', activity=26)
def test_value_error_works(self):
self.assertConversionFails('e', activity=26)
class EnduranceStatConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import stats_end_converter
return stats_end_converter
def test_prompter_works(self):
from sloth.userinput import stats_end_prompter
activity = [12]
prompter = stats_end_prompter(activity)
expected = 'Endurance - Your overall health (14 left): '
self.assertEqual(prompter.prompt_text, expected)
def test_not_enough_works(self):
self.assertConversionFails('1', activity=0)
def test_back_works(self):
self.assertConversionResultEquals('b', (0, 2), activity=26)
def test_reasonable_works(self):
self.assertConversionResultEquals('1', (1, 4), activity=26)
def test_over_10_fails(self):
self.assertConversionFails('14', activity=26)
def test_value_error_works(self):
self.assertConversionFails('f', activity=26)
class IntelligenceStatConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import stats_int_converter
return stats_int_converter
def test_prompter_works(self):
from sloth.userinput import stats_int_prompter
activity = [8]
prompter = stats_int_prompter(activity)
expected = 'Intelligence - Technical know-how (18 left): '
self.assertEqual(prompter.prompt_text, expected)
def test_not_enough_works(self):
self.assertConversionFails('1', activity=0)
def test_back_works(self):
self.assertConversionResultEquals('b', (0, 3), activity=26)
def test_reasonable_works(self):
self.assertConversionResultEquals('1', (1, 5), activity=26)
def test_over_10_fails(self):
self.assertConversionFails('15', activity=26)
def test_value_error_works(self):
self.assertConversionFails('g', activity=26)
class StrengthStatConverterTestCase(BaseConverterTestCase):
def get_converter(self):
from sloth.userinput import stats_str_converter
return stats_str_converter
def test_prompter_works(self):
from sloth.userinput import stats_str_prompter
activity = [24]
prompter = stats_str_prompter(activity)
expected = 'Strength - How well you can give a punch (2 left): '
self.assertEqual(prompter.prompt_text, expected)
def test_not_enough_works(self):
self.assertConversionFails('1', activity=0)
def test_back_works(self):
self.assertConversionResultEquals('b', (0, 4), activity=26)
def test_reasonable_works(self):
self.assertConversionResultEquals('1', (1, 6), activity=1)
def test_points_still_available(self):
self.assertConversionFails('1', activity=2)
def test_over_10_fails(self):
self.assertConversionFails('16', activity=1)
def test_value_error_works(self):
self.assertConversionFails('h', activity=26)
def test_zero_or_less_available_works(self):
self.assertConversionFails('1', activity=0)
|
mohammed-alfatih/servo | refs/heads/master | tests/wpt/web-platform-tests/old-tests/webdriver/runtests.py | 212 | import unittest
from unittest import TestLoader, TextTestRunner, TestSuite
if __name__ == "__main__":
loader = TestLoader()
suite = TestSuite((
loader.discover(".", pattern="*.py")
))
runner = TextTestRunner(verbosity=2)
runner.run(suite)
unittest.main()
|
dkodnik/Ant | refs/heads/master | openerp/report/render/rml.py | 457 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
import rml2pdf
import rml2html as htmlizer
import rml2txt as txtizer
import odt2odt as odt
import html2html as html
import makohtml2html as makohtml
class rml(render.render):
def __init__(self, rml, localcontext = None, datas=None, path='.', title=None):
render.render.__init__(self, datas, path)
self.localcontext = localcontext
self.rml = rml
self.output_type = 'pdf'
self.title=title
def _render(self):
return rml2pdf.parseNode(self.rml, self.localcontext, images=self.bin_datas, path=self.path,title=self.title)
class rml2html(render.render):
def __init__(self, rml,localcontext = None, datas=None):
super(rml2html, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return htmlizer.parseString(self.rml,self.localcontext)
class rml2txt(render.render):
def __init__(self, rml, localcontext= None, datas=None):
super(rml2txt, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'txt'
def _render(self):
return txtizer.parseString(self.rml, self.localcontext)
class odt2odt(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'odt'
def _render(self):
return odt.parseNode(self.rml_dom,self.localcontext)
class html2html(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return html.parseString(self.rml_dom,self.localcontext)
class makohtml2html(render.render):
def __init__(self, html, localcontext = None):
render.render.__init__(self)
self.html = html
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return makohtml.parseNode(self.html,self.localcontext)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Tejal011089/trufil-erpnext | refs/heads/master | erpnext/setup/doctype/sales_partner/sales_partner.py | 65 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, filter_strip_join
from frappe.website.website_generator import WebsiteGenerator
from erpnext.utilities.address_and_contact import load_address_and_contact
class SalesPartner(WebsiteGenerator):
website = frappe._dict(
page_title_field = "partner_name",
condition_field = "show_in_website",
template = "templates/generators/sales_partner.html"
)
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self, "sales_partner")
def autoname(self):
self.name = self.partner_name
def validate(self):
self.parent_website_route = "partners"
super(SalesPartner, self).validate()
if self.partner_website and not self.partner_website.startswith("http"):
self.partner_website = "http://" + self.partner_website
def get_contacts(self, nm):
if nm:
return frappe.db.convert_to_lists(frappe.db.sql("""
select name, CONCAT(IFNULL(first_name,''),
' ',IFNULL(last_name,'')),contact_no,email_id
from `tabContact` where sales_partner = %s""", nm))
else:
return ''
def get_context(self, context):
address = frappe.db.get_value("Address",
{"sales_partner": self.name, "is_primary_address": 1},
"*", as_dict=True)
if address:
city_state = ", ".join(filter(None, [address.city, address.state]))
address_rows = [address.address_line1, address.address_line2,
city_state, address.pincode, address.country]
context.update({
"email": address.email_id,
"partner_address": filter_strip_join(address_rows, "\n<br>"),
"phone": filter_strip_join(cstr(address.phone).split(","), "\n<br>")
})
return context
|
jsteemann/arangodb | refs/heads/devel | 3rdParty/V8-4.3.61/third_party/python_26/Lib/curses/textpad.py | 212 | """Simple textbox editing widget with Emacs-like keybindings."""
import curses
import curses.ascii
def rectangle(win, uly, ulx, lry, lrx):
"""Draw a rectangle with corners at the provided upper-left
and lower-right coordinates.
"""
win.vline(uly+1, ulx, curses.ACS_VLINE, lry - uly - 1)
win.hline(uly, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
win.hline(lry, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
win.vline(uly+1, lrx, curses.ACS_VLINE, lry - uly - 1)
win.addch(uly, ulx, curses.ACS_ULCORNER)
win.addch(uly, lrx, curses.ACS_URCORNER)
win.addch(lry, lrx, curses.ACS_LRCORNER)
win.addch(lry, ulx, curses.ACS_LLCORNER)
class Textbox:
"""Editing widget using the interior of a window object.
Supports the following Emacs-like key bindings:
Ctrl-A Go to left edge of window.
Ctrl-B Cursor left, wrapping to previous line if appropriate.
Ctrl-D Delete character under cursor.
Ctrl-E Go to right edge (stripspaces off) or end of line (stripspaces on).
Ctrl-F Cursor right, wrapping to next line when appropriate.
Ctrl-G Terminate, returning the window contents.
Ctrl-H Delete character backward.
Ctrl-J Terminate if the window is 1 line, otherwise insert newline.
Ctrl-K If line is blank, delete it, otherwise clear to end of line.
Ctrl-L Refresh screen.
Ctrl-N Cursor down; move down one line.
Ctrl-O Insert a blank line at cursor location.
Ctrl-P Cursor up; move up one line.
Move operations do nothing if the cursor is at an edge where the movement
is not possible. The following synonyms are supported where possible:
KEY_LEFT = Ctrl-B, KEY_RIGHT = Ctrl-F, KEY_UP = Ctrl-P, KEY_DOWN = Ctrl-N
KEY_BACKSPACE = Ctrl-h
"""
def __init__(self, win, insert_mode=False):
self.win = win
self.insert_mode = insert_mode
(self.maxy, self.maxx) = win.getmaxyx()
self.maxy = self.maxy - 1
self.maxx = self.maxx - 1
self.stripspaces = 1
self.lastcmd = None
win.keypad(1)
def _end_of_line(self, y):
"""Go to the location of the first blank on the given line,
returning the index of the last non-blank character."""
last = self.maxx
while True:
if curses.ascii.ascii(self.win.inch(y, last)) != curses.ascii.SP:
last = min(self.maxx, last+1)
break
elif last == 0:
break
last = last - 1
return last
def _insert_printable_char(self, ch):
(y, x) = self.win.getyx()
if y < self.maxy or x < self.maxx:
if self.insert_mode:
oldch = self.win.inch()
# The try-catch ignores the error we trigger from some curses
# versions by trying to write into the lowest-rightmost spot
# in the window.
try:
self.win.addch(ch)
except curses.error:
pass
if self.insert_mode:
(backy, backx) = self.win.getyx()
if curses.ascii.isprint(oldch):
self._insert_printable_char(oldch)
self.win.move(backy, backx)
def do_command(self, ch):
"Process a single editing command."
(y, x) = self.win.getyx()
self.lastcmd = ch
if curses.ascii.isprint(ch):
if y < self.maxy or x < self.maxx:
self._insert_printable_char(ch)
elif ch == curses.ascii.SOH: # ^a
self.win.move(y, 0)
elif ch in (curses.ascii.STX,curses.KEY_LEFT, curses.ascii.BS,curses.KEY_BACKSPACE):
if x > 0:
self.win.move(y, x-1)
elif y == 0:
pass
elif self.stripspaces:
self.win.move(y-1, self._end_of_line(y-1))
else:
self.win.move(y-1, self.maxx)
if ch in (curses.ascii.BS, curses.KEY_BACKSPACE):
self.win.delch()
elif ch == curses.ascii.EOT: # ^d
self.win.delch()
elif ch == curses.ascii.ENQ: # ^e
if self.stripspaces:
self.win.move(y, self._end_of_line(y))
else:
self.win.move(y, self.maxx)
elif ch in (curses.ascii.ACK, curses.KEY_RIGHT): # ^f
if x < self.maxx:
self.win.move(y, x+1)
elif y == self.maxy:
pass
else:
self.win.move(y+1, 0)
elif ch == curses.ascii.BEL: # ^g
return 0
elif ch == curses.ascii.NL: # ^j
if self.maxy == 0:
return 0
elif y < self.maxy:
self.win.move(y+1, 0)
elif ch == curses.ascii.VT: # ^k
if x == 0 and self._end_of_line(y) == 0:
self.win.deleteln()
else:
# first undo the effect of self._end_of_line
self.win.move(y, x)
self.win.clrtoeol()
elif ch == curses.ascii.FF: # ^l
self.win.refresh()
elif ch in (curses.ascii.SO, curses.KEY_DOWN): # ^n
if y < self.maxy:
self.win.move(y+1, x)
if x > self._end_of_line(y+1):
self.win.move(y+1, self._end_of_line(y+1))
elif ch == curses.ascii.SI: # ^o
self.win.insertln()
elif ch in (curses.ascii.DLE, curses.KEY_UP): # ^p
if y > 0:
self.win.move(y-1, x)
if x > self._end_of_line(y-1):
self.win.move(y-1, self._end_of_line(y-1))
return 1
def gather(self):
"Collect and return the contents of the window."
result = ""
for y in range(self.maxy+1):
self.win.move(y, 0)
stop = self._end_of_line(y)
if stop == 0 and self.stripspaces:
continue
for x in range(self.maxx+1):
if self.stripspaces and x > stop:
break
result = result + chr(curses.ascii.ascii(self.win.inch(y, x)))
if self.maxy > 0:
result = result + "\n"
return result
def edit(self, validate=None):
"Edit in the widget window and collect the results."
while 1:
ch = self.win.getch()
if validate:
ch = validate(ch)
if not ch:
continue
if not self.do_command(ch):
break
self.win.refresh()
return self.gather()
if __name__ == '__main__':
def test_editbox(stdscr):
ncols, nlines = 9, 4
uly, ulx = 15, 20
stdscr.addstr(uly-2, ulx, "Use Ctrl-G to end editing.")
win = curses.newwin(nlines, ncols, uly, ulx)
rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols)
stdscr.refresh()
return Textbox(win).edit()
str = curses.wrapper(test_editbox)
print 'Contents of text box:', repr(str)
|
hazelnusse/sympy-old | refs/heads/master | sympy/thirdparty/pyglet/pyglet/window/carbon/__init__.py | 4 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from ctypes import *
import os.path
import unicodedata
import warnings
import pyglet
from pyglet.window import WindowException, Platform, Display, Screen, \
BaseWindow, MouseCursor, DefaultMouseCursor, _PlatformEventHandler
from pyglet.window import key
from pyglet.window import mouse
from pyglet.window import event
from pyglet.window.carbon.constants import *
from pyglet.window.carbon.types import *
from pyglet.window.carbon.quartzkey import keymap
import pyglet.lib
from pyglet import gl
from pyglet.gl import agl
from pyglet.gl import gl_info
from pyglet.gl import glu_info
from pyglet.event import EventDispatcher
class CarbonException(WindowException):
pass
carbon = pyglet.lib.load_library(
framework='/System/Library/Frameworks/Carbon.framework')
quicktime = pyglet.lib.load_library(
framework='/System/Library/Frameworks/QuickTime.framework')
carbon.GetEventDispatcherTarget.restype = EventTargetRef
carbon.ReceiveNextEvent.argtypes = \
[c_uint32, c_void_p, c_double, c_ubyte, POINTER(EventRef)]
carbon.GetWindowPort.restype = agl.AGLDrawable
EventHandlerProcPtr = CFUNCTYPE(c_int, c_int, c_void_p, c_void_p)
carbon.NewEventHandlerUPP.restype = c_void_p
carbon.GetCurrentKeyModifiers = c_uint32
carbon.NewRgn.restype = RgnHandle
carbon.CGDisplayBounds.argtypes = [c_void_p]
carbon.CGDisplayBounds.restype = CGRect
# Map symbol,modifiers -> motion
# Determined by experiment with TextEdit.app
_motion_map = {
(key.UP, False): key.MOTION_UP,
(key.RIGHT, False): key.MOTION_RIGHT,
(key.DOWN, False): key.MOTION_DOWN,
(key.LEFT, False): key.MOTION_LEFT,
(key.LEFT, key.MOD_OPTION): key.MOTION_PREVIOUS_WORD,
(key.RIGHT, key.MOD_OPTION): key.MOTION_NEXT_WORD,
(key.LEFT, key.MOD_COMMAND): key.MOTION_BEGINNING_OF_LINE,
(key.RIGHT, key.MOD_COMMAND): key.MOTION_END_OF_LINE,
(key.PAGEUP, False): key.MOTION_PREVIOUS_PAGE,
(key.PAGEDOWN, False): key.MOTION_NEXT_PAGE,
(key.HOME, False): key.MOTION_BEGINNING_OF_FILE,
(key.END, False): key.MOTION_END_OF_FILE,
(key.UP, key.MOD_COMMAND): key.MOTION_BEGINNING_OF_FILE,
(key.DOWN, key.MOD_COMMAND): key.MOTION_END_OF_FILE,
(key.BACKSPACE, False): key.MOTION_BACKSPACE,
(key.DELETE, False): key.MOTION_DELETE,
}
class CarbonPlatform(Platform):
_display = None
def get_default_display(self):
if not self._display:
self._display = CarbonDisplay()
return self._display
class CarbonDisplay(Display):
# TODO: CarbonDisplay could be per display device, which would make
# reporting of screens and available configs more accurate. The number of
# Macs with more than one video card is probably small, though.
def __init__(self):
super(CarbonDisplay, self).__init__()
import MacOS
if not MacOS.WMAvailable():
raise CarbonException('Window manager is not available. ' \
'Ensure you run "pythonw", not "python"')
self._install_application_event_handlers()
def get_screens(self):
count = CGDisplayCount()
carbon.CGGetActiveDisplayList(0, None, byref(count))
displays = (CGDirectDisplayID * count.value)()
carbon.CGGetActiveDisplayList(count.value, displays, byref(count))
return [CarbonScreen(self, id) for id in displays]
def _install_application_event_handlers(self):
self._carbon_event_handlers = []
self._carbon_event_handler_refs = []
target = carbon.GetApplicationEventTarget()
# TODO something with a metaclass or hacky like CarbonWindow
# to make this list extensible
handlers = [
(self._on_mouse_down, kEventClassMouse, kEventMouseDown),
(self._on_apple_event, kEventClassAppleEvent, kEventAppleEvent),
(self._on_command, kEventClassCommand, kEventProcessCommand),
]
ae_handlers = [
(self._on_ae_quit, kCoreEventClass, kAEQuitApplication),
]
# Install the application-wide handlers
for method, cls, event in handlers:
proc = EventHandlerProcPtr(method)
self._carbon_event_handlers.append(proc)
upp = carbon.NewEventHandlerUPP(proc)
types = EventTypeSpec()
types.eventClass = cls
types.eventKind = event
handler_ref = EventHandlerRef()
carbon.InstallEventHandler(
target,
upp,
1,
byref(types),
c_void_p(),
byref(handler_ref))
self._carbon_event_handler_refs.append(handler_ref)
# Install Apple event handlers
for method, cls, event in ae_handlers:
proc = EventHandlerProcPtr(method)
self._carbon_event_handlers.append(proc)
upp = carbon.NewAEEventHandlerUPP(proc)
carbon.AEInstallEventHandler(
cls,
event,
upp,
0,
False)
def _on_command(self, next_handler, ev, data):
command = HICommand()
carbon.GetEventParameter(ev, kEventParamDirectObject,
typeHICommand, c_void_p(), sizeof(command), c_void_p(),
byref(command))
if command.commandID == kHICommandQuit:
self._on_quit()
return noErr
def _on_mouse_down(self, next_handler, ev, data):
# Check for menubar hit
position = Point()
carbon.GetEventParameter(ev, kEventParamMouseLocation,
typeQDPoint, c_void_p(), sizeof(position), c_void_p(),
byref(position))
if carbon.FindWindow(position, None) == inMenuBar:
# Mouse down in menu bar. MenuSelect() takes care of all
# menu tracking and blocks until the menu is dismissed.
# Use command events to handle actual menu item invokations.
carbon.MenuSelect(position)
carbon.HiliteMenu(0)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
def _on_apple_event(self, next_handler, ev, data):
# Somewhat involved way of redispatching Apple event contained
# within a Carbon event, described in
# http://developer.apple.com/documentation/AppleScript/
# Conceptual/AppleEvents/dispatch_aes_aepg/chapter_4_section_3.html
release = False
if carbon.IsEventInQueue(carbon.GetMainEventQueue(), ev):
carbon.RetainEvent(ev)
release = True
carbon.RemoveEventFromQueue(carbon.GetMainEventQueue(), ev)
ev_record = EventRecord()
carbon.ConvertEventRefToEventRecord(ev, byref(ev_record))
carbon.AEProcessAppleEvent(byref(ev_record))
if release:
carbon.ReleaseEvent(ev)
return noErr
def _on_ae_quit(self, ae, reply, refcon):
self._on_quit()
return noErr
def _on_quit(self):
'''Called when the user tries to quit the application.
This is not an actual event handler, it is called in response
to Command+Q, the Quit menu item, and the Dock context menu's Quit
item.
The default implementation sets `has_exit` to true on all open
windows.
'''
for window in self.get_windows():
window.has_exit = True
class CarbonScreen(Screen):
def __init__(self, display, id):
self.display = display
rect = carbon.CGDisplayBounds(id)
super(CarbonScreen, self).__init__(
int(rect.origin.x), int(rect.origin.y),
int(rect.size.width), int(rect.size.height))
self.id = id
def get_gdevice(self):
gdevice = GDHandle()
r = carbon.DMGetGDeviceByDisplayID(self.id, byref(gdevice), False)
_oscheck(r)
return gdevice
def get_matching_configs(self, template):
# Construct array of attributes for aglChoosePixelFormat
attrs = []
for name, value in template.get_gl_attributes():
attr = CarbonGLConfig._attribute_ids.get(name, None)
if not attr or not value:
continue
attrs.append(attr)
if attr not in CarbonGLConfig._boolean_attributes:
attrs.append(int(value))
# Support for RAGE-II, which is not compliant
attrs.append(agl.AGL_ALL_RENDERERS)
# Force selection policy and RGBA
attrs.append(agl.AGL_MAXIMUM_POLICY)
attrs.append(agl.AGL_RGBA)
# In 10.3 and later, AGL_FULLSCREEN is specified so the window can
# be toggled to/from fullscreen without losing context. pyglet
# no longer supports earlier versions of OS X, so we always supply it.
attrs.append(agl.AGL_FULLSCREEN)
# Terminate the list.
attrs.append(agl.AGL_NONE)
attrib_list = (c_int * len(attrs))(*attrs)
device = self.get_gdevice()
pformat = agl.aglChoosePixelFormat(device, 1, attrib_list)
_aglcheck()
if not pformat:
return []
else:
return [CarbonGLConfig(self, pformat)]
class CarbonGLConfig(gl.Config):
# Valid names for GL attributes, and their corresponding AGL constant.
_attribute_ids = {
'double_buffer': agl.AGL_DOUBLEBUFFER,
'stereo': agl.AGL_STEREO,
'buffer_size': agl.AGL_BUFFER_SIZE,
'sample_buffers': agl.AGL_SAMPLE_BUFFERS_ARB,
'samples': agl.AGL_SAMPLES_ARB,
'aux_buffers': agl.AGL_AUX_BUFFERS,
'red_size': agl.AGL_RED_SIZE,
'green_size': agl.AGL_GREEN_SIZE,
'blue_size': agl.AGL_BLUE_SIZE,
'alpha_size': agl.AGL_ALPHA_SIZE,
'depth_size': agl.AGL_DEPTH_SIZE,
'stencil_size': agl.AGL_STENCIL_SIZE,
'accum_red_size': agl.AGL_ACCUM_RED_SIZE,
'accum_green_size': agl.AGL_ACCUM_GREEN_SIZE,
'accum_blue_size': agl.AGL_ACCUM_BLUE_SIZE,
'accum_alpha_size': agl.AGL_ACCUM_ALPHA_SIZE,
# Not exposed by pyglet API (set internally)
'all_renderers': agl.AGL_ALL_RENDERERS,
'rgba': agl.AGL_RGBA,
'fullscreen': agl.AGL_FULLSCREEN,
'minimum_policy': agl.AGL_MINIMUM_POLICY,
'maximum_policy': agl.AGL_MAXIMUM_POLICY,
# Not supported in current pyglet API
'level': agl.AGL_LEVEL,
'pixel_size': agl.AGL_PIXEL_SIZE, # == buffer_size
'aux_depth_stencil': agl.AGL_AUX_DEPTH_STENCIL,
'color_float': agl.AGL_COLOR_FLOAT,
'offscreen': agl.AGL_OFFSCREEN,
'sample_alpha': agl.AGL_SAMPLE_ALPHA,
'multisample': agl.AGL_MULTISAMPLE,
'supersample': agl.AGL_SUPERSAMPLE,
}
# AGL constants which do not require a value.
_boolean_attributes = \
(agl.AGL_ALL_RENDERERS,
agl.AGL_RGBA,
agl.AGL_DOUBLEBUFFER,
agl.AGL_STEREO,
agl.AGL_MINIMUM_POLICY,
agl.AGL_MAXIMUM_POLICY,
agl.AGL_OFFSCREEN,
agl.AGL_FULLSCREEN,
agl.AGL_AUX_DEPTH_STENCIL,
agl.AGL_COLOR_FLOAT,
agl.AGL_MULTISAMPLE,
agl.AGL_SUPERSAMPLE,
agl.AGL_SAMPLE_ALPHA)
def __init__(self, screen, pformat):
super(CarbonGLConfig, self).__init__()
self.screen = screen
self._pformat = pformat
self._attributes = {}
for name, attr in self._attribute_ids.items():
value = c_int()
result = agl.aglDescribePixelFormat(pformat, attr, byref(value))
if result:
setattr(self, name, value.value)
def create_context(self, share):
if share:
context = agl.aglCreateContext(self._pformat, share._context)
else:
context = agl.aglCreateContext(self._pformat, None)
_aglcheck()
return CarbonGLContext(self, context, share, self._pformat)
class CarbonGLContext(gl.Context):
def __init__(self, config, context, share, pixelformat):
super(CarbonGLContext, self).__init__(share)
self.config = config
self._context = context
self._pixelformat = pixelformat
def destroy(self):
super(CarbonGLContext, self).destroy()
agl.aglDestroyContext(self._context)
class CarbonMouseCursor(MouseCursor):
drawable = False
def __init__(self, theme):
self.theme = theme
def CarbonEventHandler(event_class, event_kind):
return _PlatformEventHandler((event_class, event_kind))
class CarbonWindow(BaseWindow):
_window = None # Carbon WindowRef
_agl_context = None # AGL context ID
_recreate_deferred = None
# Window properties
_minimum_size = None
_maximum_size = None
_fullscreen_restore = None
_event_dispatcher = None
_current_modifiers = 0
_mapped_modifers = 0
_carbon_event_handlers = []
_carbon_event_handler_refs = []
_track_ref = 0
_track_region = None
_mouse_exclusive = False
_mouse_platform_visible = True
def _recreate(self, changes):
# We can't destroy the window while event handlers are active,
# otherwise the (OS X) event dispatcher gets lost and segfaults.
#
# Defer actual recreation until dispatch_events next finishes.
self._recreate_deferred = changes
def _recreate_immediate(self):
# The actual _recreate function.
changes = self._recreate_deferred
self._recreate_deferred = None
if ('context' in changes):
agl.aglSetDrawable(self._agl_context, None)
if ('fullscreen' in changes and
not self._fullscreen and
self._fullscreen_restore):
# Leaving fullscreen -- destroy everything before the window.
self._remove_track_region()
self._remove_event_handlers()
agl.aglSetDrawable(self._agl_context, None)
# EndFullScreen disposes _window.
quicktime.EndFullScreen(self._fullscreen_restore, 0)
self._window = None
self._create()
def _create(self):
self._agl_context = self.context._context
if self._window:
# The window is about to be recreated; destroy everything
# associated with the old window, then the window itself.
self._remove_track_region()
self._remove_event_handlers()
agl.aglSetDrawable(self._agl_context, None)
carbon.DisposeWindow(self._window)
self._window = None
self._window = WindowRef()
if self._fullscreen:
# Switch to fullscreen mode with QuickTime
fs_width = c_short(0)
fs_height = c_short(0)
self._fullscreen_restore = c_void_p()
quicktime.BeginFullScreen(byref(self._fullscreen_restore),
self.screen.get_gdevice(),
byref(fs_width),
byref(fs_height),
byref(self._window),
None,
0)
# the following may be used for debugging if you have a second
# monitor - only the main monitor will go fullscreen
agl.aglEnable(self._agl_context, agl.AGL_FS_CAPTURE_SINGLE)
self._width = fs_width.value
self._height = fs_height.value
#self._width = self.screen.width
#self._height = self.screen.height
agl.aglSetFullScreen(self._agl_context,
self._width, self._height, 0, 0)
self._mouse_in_window = True
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
self.dispatch_event('on_expose')
else:
# Create floating window
rect = Rect()
location = None # TODO
if location is not None:
rect.left = location[0]
rect.top = location[1]
else:
rect.top = rect.left = 0
rect.right = rect.left + self._width
rect.bottom = rect.top + self._height
styles = {
self.WINDOW_STYLE_DEFAULT: (kDocumentWindowClass,
kWindowCloseBoxAttribute |
kWindowCollapseBoxAttribute),
self.WINDOW_STYLE_DIALOG: (kDocumentWindowClass,
kWindowCloseBoxAttribute),
self.WINDOW_STYLE_TOOL: (kUtilityWindowClass,
kWindowCloseBoxAttribute),
self.WINDOW_STYLE_BORDERLESS: (kSimpleWindowClass,
kWindowNoAttributes)
}
window_class, window_attributes = \
styles.get(self._style, kDocumentWindowClass)
if self._resizable:
window_attributes |= (kWindowFullZoomAttribute |
kWindowResizableAttribute)
r = carbon.CreateNewWindow(window_class,
window_attributes,
byref(rect),
byref(self._window))
_oscheck(r)
if location is None:
carbon.RepositionWindow(self._window, c_void_p(),
kWindowCascadeOnMainScreen)
agl.aglSetDrawable(self._agl_context,
carbon.GetWindowPort(self._window))
_aglcheck()
self.set_caption(self._caption)
# Get initial state
self._event_dispatcher = carbon.GetEventDispatcherTarget()
self._current_modifiers = carbon.GetCurrentKeyModifiers().value
self._mapped_modifiers = self._map_modifiers(self._current_modifiers)
# (re)install Carbon event handlers
self._install_event_handlers()
self._create_track_region()
self.switch_to()
self.set_vsync(self._vsync)
if self._visible:
self.set_visible(True)
def _create_track_region(self):
self._remove_track_region()
# Create a tracking region for the content part of the window
# to receive enter/leave events.
track_id = MouseTrackingRegionID()
track_id.signature = DEFAULT_CREATOR_CODE
track_id.id = 1
self._track_ref = MouseTrackingRef()
self._track_region = carbon.NewRgn()
carbon.GetWindowRegion(self._window,
kWindowContentRgn, self._track_region)
carbon.CreateMouseTrackingRegion(self._window,
self._track_region, None, kMouseTrackingOptionsGlobalClip,
track_id, None, None,
byref(self._track_ref))
def _remove_track_region(self):
if self._track_region:
carbon.ReleaseMouseTrackingRegion(self._track_region)
self._track_region = None
def close(self):
super(CarbonWindow, self).close()
self._agl_context = None
self._remove_event_handlers()
self._remove_track_region()
# Restore cursor visibility
self.set_mouse_platform_visible(True)
self.set_exclusive_mouse(False)
if self._fullscreen:
quicktime.EndFullScreen(self._fullscreen_restore, 0)
else:
carbon.DisposeWindow(self._window)
self._window = None
def switch_to(self):
agl.aglSetCurrentContext(self._agl_context)
self._context.set_current()
_aglcheck()
gl_info.set_active_context()
glu_info.set_active_context()
def flip(self):
self.draw_mouse_cursor()
agl.aglSwapBuffers(self._agl_context)
_aglcheck()
def _get_vsync(self):
swap = c_long()
agl.aglGetInteger(self._agl_context, agl.AGL_SWAP_INTERVAL, byref(swap))
return bool(swap.value)
vsync = property(_get_vsync) # overrides BaseWindow property
def set_vsync(self, vsync):
if pyglet.options['vsync'] is not None:
vsync = pyglet.options['vsync']
self._vsync = vsync # _recreate depends on this
swap = c_long(int(vsync))
agl.aglSetInteger(self._agl_context, agl.AGL_SWAP_INTERVAL, byref(swap))
def dispatch_events(self):
if self._recreate_deferred:
self._recreate_immediate()
self._allow_dispatch_event = True
while self._event_queue:
EventDispatcher.dispatch_event(self, *self._event_queue.pop(0))
e = EventRef()
result = carbon.ReceiveNextEvent(0, c_void_p(), 0, True, byref(e))
while result == noErr:
carbon.SendEventToEventTarget(e, self._event_dispatcher)
carbon.ReleaseEvent(e)
if self._recreate_deferred:
self._recreate_immediate()
result = carbon.ReceiveNextEvent(0, c_void_p(), 0, True, byref(e))
self._allow_dispatch_event = False
# Return value from ReceiveNextEvent can be ignored if not
# noErr; we check here only to look for new bugs.
# eventLoopQuitErr: the inner event loop was quit, see
# http://lists.apple.com/archives/Carbon-dev/2006/Jun/msg00850.html
# Can occur when mixing with other toolkits, e.g. Tk.
# Fixes issue 180.
if result not in (eventLoopTimedOutErr, eventLoopQuitErr):
raise 'Error %d' % result
def set_caption(self, caption):
self._caption = caption
s = _create_cfstring(caption)
carbon.SetWindowTitleWithCFString(self._window, s)
carbon.CFRelease(s)
def set_location(self, x, y):
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
rect.right += x - rect.left
rect.bottom += y - rect.top
rect.left = x
rect.top = y
carbon.SetWindowBounds(self._window, kWindowContentRgn, byref(rect))
def get_location(self):
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
return rect.left, rect.top
def set_size(self, width, height):
if self._fullscreen:
raise WindowException('Cannot set size of fullscreen window.')
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
rect.right = rect.left + width
rect.bottom = rect.top + height
carbon.SetWindowBounds(self._window, kWindowContentRgn, byref(rect))
self._width = width
self._height = height
self.dispatch_event('on_resize', width, height)
self.dispatch_event('on_expose')
def get_size(self):
if self._fullscreen:
return self._width, self._height
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
return rect.right - rect.left, rect.bottom - rect.top
def set_minimum_size(self, width, height):
self._minimum_size = (width, height)
minimum = HISize()
minimum.width = width
minimum.height = height
if self._maximum_size:
maximum = HISize()
maximum.width, maximum.height = self._maximum_size
maximum = byref(maximum)
else:
maximum = None
carbon.SetWindowResizeLimits(self._window,
byref(minimum), maximum)
def set_maximum_size(self, width, height):
self._maximum_size = (width, height)
maximum = HISize()
maximum.width = width
maximum.height = height
if self._minimum_size:
minimum = HISize()
minimum.width, minimum.height = self._minimum_size
minimum = byref(minimum)
else:
minimum = None
carbon.SetWindowResizeLimits(self._window,
minimum, byref(maximum))
def activate(self):
carbon.ActivateWindow(self._window, 1)
# Also make the application the "front" application. TODO
# maybe don't bring forward all of the application's windows?
psn = ProcessSerialNumber()
psn.highLongOfPSN = 0
psn.lowLongOfPSN = kCurrentProcess
carbon.SetFrontProcess(byref(psn))
def set_visible(self, visible=True):
self._visible = visible
if visible:
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
carbon.ShowWindow(self._window)
else:
carbon.HideWindow(self._window)
def minimize(self):
carbon.CollapseWindow(self._window, True)
def maximize(self):
# Maximum "safe" value, gets trimmed to screen size automatically.
p = Point()
p.v, p.h = 16000,16000
if not carbon.IsWindowInStandardState(self._window, byref(p), None):
carbon.ZoomWindowIdeal(self._window, inZoomOut, byref(p))
def set_mouse_platform_visible(self, platform_visible=None):
if platform_visible is None:
platform_visible = self._mouse_visible and \
not self._mouse_exclusive and \
not self._mouse_cursor.drawable
if not self._mouse_in_window:
platform_visible = True
if self._mouse_in_window and \
isinstance(self._mouse_cursor, CarbonMouseCursor):
carbon.SetThemeCursor(self._mouse_cursor.theme)
else:
carbon.SetThemeCursor(kThemeArrowCursor)
if self._mouse_platform_visible == platform_visible:
return
if platform_visible:
carbon.ShowCursor()
else:
carbon.HideCursor()
self._mouse_platform_visible = platform_visible
def set_exclusive_mouse(self, exclusive=True):
self._mouse_exclusive = exclusive
if exclusive:
# Move mouse to center of window
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
point = CGPoint()
point.x = (rect.right + rect.left) / 2
point.y = (rect.bottom + rect.top) / 2
carbon.CGWarpMouseCursorPosition(point)
carbon.CGAssociateMouseAndMouseCursorPosition(False)
else:
carbon.CGAssociateMouseAndMouseCursorPosition(True)
self.set_mouse_platform_visible()
def set_exclusive_keyboard(self, exclusive=True):
if exclusive:
# Note: power switch can also be disabled, with
# kUIOptionDisableSessionTerminate. That seems
# a little extreme though.
carbon.SetSystemUIMode(kUIModeAllHidden,
(kUIOptionDisableAppleMenu |
kUIOptionDisableProcessSwitch |
kUIOptionDisableForceQuit |
kUIOptionDisableHide))
else:
carbon.SetSystemUIMode(kUIModeNormal, 0)
def get_system_mouse_cursor(self, name):
if name == self.CURSOR_DEFAULT:
return DefaultMouseCursor()
themes = {
self.CURSOR_CROSSHAIR: kThemeCrossCursor,
self.CURSOR_HAND: kThemePointingHandCursor,
self.CURSOR_HELP: kThemeArrowCursor,
self.CURSOR_NO: kThemeNotAllowedCursor,
self.CURSOR_SIZE: kThemeArrowCursor,
self.CURSOR_SIZE_UP: kThemeResizeUpCursor,
self.CURSOR_SIZE_UP_RIGHT: kThemeArrowCursor,
self.CURSOR_SIZE_RIGHT: kThemeResizeRightCursor,
self.CURSOR_SIZE_DOWN_RIGHT: kThemeArrowCursor,
self.CURSOR_SIZE_DOWN: kThemeResizeDownCursor,
self.CURSOR_SIZE_DOWN_LEFT: kThemeArrowCursor,
self.CURSOR_SIZE_LEFT: kThemeResizeLeftCursor,
self.CURSOR_SIZE_UP_LEFT: kThemeArrowCursor,
self.CURSOR_SIZE_UP_DOWN: kThemeResizeUpDownCursor,
self.CURSOR_SIZE_LEFT_RIGHT: kThemeResizeLeftRightCursor,
self.CURSOR_TEXT: kThemeIBeamCursor,
self.CURSOR_WAIT: kThemeWatchCursor,
self.CURSOR_WAIT_ARROW: kThemeWatchCursor,
}
if name not in themes:
raise CarbonException('Unknown cursor name "%s"' % name)
return CarbonMouseCursor(themes[name])
def set_icon(self, *images):
# Only use the biggest image
image = images[0]
size = image.width * image.height
for img in images:
if img.width * img.height > size:
size = img.width * img.height
image = img
image = image.image_data
image.format = 'ARGB'
image.pitch = -len(image.format) * image.width
provider = carbon.CGDataProviderCreateWithData(
None, image.data, len(image.data), None)
colorspace = carbon.CGColorSpaceCreateDeviceRGB()
cgi = carbon.CGImageCreate(
image.width, image.height, 8, 32, -image.pitch,
colorspace,
kCGImageAlphaFirst,
provider,
None,
True,
kCGRenderingIntentDefault)
carbon.SetApplicationDockTileImage(cgi)
carbon.CGDataProviderRelease(provider)
carbon.CGColorSpaceRelease(colorspace)
# Non-public utilities
def _update_drawable(self):
# We can get there after context has been disposed, in which case
# just do nothing.
if not self._agl_context:
return
agl.aglUpdateContext(self._agl_context)
_aglcheck()
# Need a redraw
self.dispatch_event('on_expose')
def _update_track_region(self):
carbon.GetWindowRegion(self._window,
kWindowContentRgn, self._track_region)
carbon.ChangeMouseTrackingRegion(self._track_ref,
self._track_region, None)
def _install_event_handlers(self):
self._remove_event_handlers()
if self._fullscreen:
target = carbon.GetApplicationEventTarget()
else:
target = carbon.GetWindowEventTarget(self._window)
carbon.InstallStandardEventHandler(target)
self._carbon_event_handlers = []
self._carbon_event_handler_refs = []
for func_name in self._platform_event_names:
if not hasattr(self, func_name):
continue
func = getattr(self, func_name)
for event_class, event_kind in func._platform_event_data:
# TODO: could just build up array of class/kind
proc = EventHandlerProcPtr(func)
self._carbon_event_handlers.append(proc)
upp = carbon.NewEventHandlerUPP(proc)
types = EventTypeSpec()
types.eventClass = event_class
types.eventKind = event_kind
handler_ref = EventHandlerRef()
carbon.InstallEventHandler(
target,
upp,
1,
byref(types),
c_void_p(),
byref(handler_ref))
self._carbon_event_handler_refs.append(handler_ref)
def _remove_event_handlers(self):
for ref in self._carbon_event_handler_refs:
carbon.RemoveEventHandler(ref)
self._carbon_event_handler_refs = []
self._carbon_event_handlers = []
# Carbon event handlers
@CarbonEventHandler(kEventClassTextInput, kEventTextInputUnicodeForKeyEvent)
def _on_text_input(self, next_handler, ev, data):
size = c_uint32()
carbon.GetEventParameter(ev, kEventParamTextInputSendText,
typeUTF8Text, c_void_p(), 0, byref(size), c_void_p())
text = create_string_buffer(size.value)
carbon.GetEventParameter(ev, kEventParamTextInputSendText,
typeUTF8Text, c_void_p(), size.value, c_void_p(), byref(text))
text = text.value.decode('utf8')
raw_event = EventRef()
carbon.GetEventParameter(ev, kEventParamTextInputSendKeyboardEvent,
typeEventRef, c_void_p(), sizeof(raw_event), c_void_p(),
byref(raw_event))
symbol, modifiers = self._get_symbol_and_modifiers(raw_event)
motion_modifiers = modifiers & \
(key.MOD_COMMAND | key.MOD_CTRL | key.MOD_OPTION)
if (symbol, motion_modifiers) in _motion_map:
motion = _motion_map[symbol, motion_modifiers]
if modifiers & key.MOD_SHIFT:
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif ((unicodedata.category(text[0]) != 'Cc' or text == u'\r') and
not (modifiers & key.MOD_COMMAND)):
self.dispatch_event('on_text', text)
return noErr
@CarbonEventHandler(kEventClassKeyboard, kEventRawKeyUp)
def _on_key_up(self, next_handler, ev, data):
symbol, modifiers = self._get_symbol_and_modifiers(ev)
if symbol:
self.dispatch_event('on_key_release', symbol, modifiers)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassKeyboard, kEventRawKeyDown)
def _on_key_down(self, next_handler, ev, data):
symbol, modifiers = self._get_symbol_and_modifiers(ev)
if symbol:
self.dispatch_event('on_key_press', symbol, modifiers)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@staticmethod
def _get_symbol_and_modifiers(ev):
sym = c_uint32()
carbon.GetEventParameter(ev, kEventParamKeyCode,
typeUInt32, c_void_p(), sizeof(sym), c_void_p(), byref(sym))
modifiers = c_uint32()
carbon.GetEventParameter(ev, kEventParamKeyModifiers,
typeUInt32, c_void_p(), sizeof(modifiers), c_void_p(),
byref(modifiers))
symbol = keymap.get(sym.value, None)
if symbol is None:
symbol = key.user_key(sym.value)
return (symbol, CarbonWindow._map_modifiers(modifiers.value))
@staticmethod
def _map_modifiers(modifiers):
mapped_modifiers = 0
if modifiers & (shiftKey | rightShiftKey):
mapped_modifiers |= key.MOD_SHIFT
if modifiers & (controlKey | rightControlKey):
mapped_modifiers |= key.MOD_CTRL
if modifiers & (optionKey | rightOptionKey):
mapped_modifiers |= key.MOD_OPTION
if modifiers & alphaLock:
mapped_modifiers |= key.MOD_CAPSLOCK
if modifiers & cmdKey:
mapped_modifiers |= key.MOD_COMMAND
return mapped_modifiers
@CarbonEventHandler(kEventClassKeyboard, kEventRawKeyModifiersChanged)
def _on_modifiers_changed(self, next_handler, ev, data):
modifiers = c_uint32()
carbon.GetEventParameter(ev, kEventParamKeyModifiers,
typeUInt32, c_void_p(), sizeof(modifiers), c_void_p(),
byref(modifiers))
modifiers = modifiers.value
deltas = modifiers ^ self._current_modifiers
for mask, k in [
(controlKey, key.LCTRL),
(shiftKey, key.LSHIFT),
(cmdKey, key.LCOMMAND),
(optionKey, key.LOPTION),
(rightShiftKey, key.RSHIFT),
(rightOptionKey, key.ROPTION),
(rightControlKey, key.RCTRL),
(alphaLock, key.CAPSLOCK),
(numLock, key.NUMLOCK)]:
if deltas & mask:
if modifiers & mask:
self.dispatch_event('on_key_press',
k, self._mapped_modifiers)
else:
self.dispatch_event('on_key_release',
k, self._mapped_modifiers)
carbon.CallNextEventHandler(next_handler, ev)
self._mapped_modifiers = self._map_modifiers(modifiers)
self._current_modifiers = modifiers
return noErr
def _get_mouse_position(self, ev):
position = HIPoint()
carbon.GetEventParameter(ev, kEventParamMouseLocation,
typeHIPoint, c_void_p(), sizeof(position), c_void_p(),
byref(position))
bounds = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(bounds))
return int(position.x - bounds.left), int(position.y - bounds.top)
@staticmethod
def _get_mouse_button_and_modifiers(ev):
button = EventMouseButton()
carbon.GetEventParameter(ev, kEventParamMouseButton,
typeMouseButton, c_void_p(), sizeof(button), c_void_p(),
byref(button))
if button.value == 1:
button = mouse.LEFT
elif button.value == 2:
button = mouse.RIGHT
elif button.value == 3:
button = mouse.MIDDLE
modifiers = c_uint32()
carbon.GetEventParameter(ev, kEventParamKeyModifiers,
typeUInt32, c_void_p(), sizeof(modifiers), c_void_p(),
byref(modifiers))
return button, CarbonWindow._map_modifiers(modifiers.value)
@staticmethod
def _get_mouse_in_content(ev):
position = Point()
carbon.GetEventParameter(ev, kEventParamMouseLocation,
typeQDPoint, c_void_p(), sizeof(position), c_void_p(),
byref(position))
return carbon.FindWindow(position, None) == inContent
@CarbonEventHandler(kEventClassMouse, kEventMouseDown)
def _on_mouse_down(self, next_handler, ev, data):
if self._fullscreen or self._get_mouse_in_content(ev):
button, modifiers = self._get_mouse_button_and_modifiers(ev)
x, y = self._get_mouse_position(ev)
y = self.height - y
self.dispatch_event('on_mouse_press', x, y, button, modifiers)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseUp)
def _on_mouse_up(self, next_handler, ev, data):
# Always report mouse up, even out of content area, because it's
# probably after a drag gesture.
button, modifiers = self._get_mouse_button_and_modifiers(ev)
x, y = self._get_mouse_position(ev)
y = self.height - y
self.dispatch_event('on_mouse_release', x, y, button, modifiers)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseMoved)
def _on_mouse_moved(self, next_handler, ev, data):
if self._fullscreen or self._get_mouse_in_content(ev):
x, y = self._get_mouse_position(ev)
y = self.height - y
self._mouse_x = x
self._mouse_y = y
delta = HIPoint()
carbon.GetEventParameter(ev, kEventParamMouseDelta,
typeHIPoint, c_void_p(), sizeof(delta), c_void_p(),
byref(delta))
# Motion event
self.dispatch_event('on_mouse_motion',
x, y, delta.x, -delta.y)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseDragged)
def _on_mouse_dragged(self, next_handler, ev, data):
button, modifiers = self._get_mouse_button_and_modifiers(ev)
x, y = self._get_mouse_position(ev)
y = self.height - y
self._mouse_x = x
self._mouse_y = y
delta = HIPoint()
carbon.GetEventParameter(ev, kEventParamMouseDelta,
typeHIPoint, c_void_p(), sizeof(delta), c_void_p(),
byref(delta))
# Drag event
self.dispatch_event('on_mouse_drag',
x, y, delta.x, -delta.y, button, modifiers)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseEntered)
def _on_mouse_entered(self, next_handler, ev, data):
x, y = self._get_mouse_position(ev)
y = self.height - y
self._mouse_x = x
self._mouse_y = y
self._mouse_in_window = True
self.set_mouse_platform_visible()
self.dispatch_event('on_mouse_enter', x, y)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseExited)
def _on_mouse_exited(self, next_handler, ev, data):
if not self._fullscreen:
x, y = self._get_mouse_position(ev)
y = self.height - y
self._mouse_in_window = False
self.set_mouse_platform_visible()
self.dispatch_event('on_mouse_leave', x, y)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseWheelMoved)
def _on_mouse_wheel_moved(self, next_handler, ev, data):
x, y = self._get_mouse_position(ev)
y = self.height - y
axis = EventMouseWheelAxis()
carbon.GetEventParameter(ev, kEventParamMouseWheelAxis,
typeMouseWheelAxis, c_void_p(), sizeof(axis), c_void_p(),
byref(axis))
delta = c_long()
carbon.GetEventParameter(ev, kEventParamMouseWheelDelta,
typeSInt32, c_void_p(), sizeof(delta), c_void_p(),
byref(delta))
if axis.value == kEventMouseWheelAxisX:
self.dispatch_event('on_mouse_scroll',
x, y, delta.value, 0)
else:
self.dispatch_event('on_mouse_scroll',
x, y, 0, delta.value)
# _Don't_ call the next handler, which is application, as this then
# calls our window handler again.
#carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowClose)
def _on_window_close(self, next_handler, ev, data):
self.dispatch_event('on_close')
# Presumably the next event handler is the one that closes
# the window; don't do that here.
#carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowResizeCompleted)
def _on_window_resize_completed(self, next_handler, ev, data):
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
width = rect.right - rect.left
height = rect.bottom - rect.top
self.dispatch_event('on_resize', width, height)
self.dispatch_event('on_expose')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowDragCompleted)
def _on_window_drag_completed(self, next_handler, ev, data):
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
self.dispatch_event('on_move', rect.left, rect.top)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowBoundsChanged)
def _on_window_bounds_change(self, next_handler, ev, data):
self._update_track_region()
self._update_drawable()
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowZoomed)
def _on_window_zoomed(self, next_handler, ev, data):
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
width = rect.right - rect.left
height = rect.bottom - rect.top
self.dispatch_event('on_move', rect.left, rect.top)
self.dispatch_event('on_resize', width, height)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowActivated)
def _on_window_activated(self, next_handler, ev, data):
self.dispatch_event('on_activate')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowDeactivated)
def _on_window_deactivated(self, next_handler, ev, data):
self.dispatch_event('on_deactivate')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowShown)
@CarbonEventHandler(kEventClassWindow, kEventWindowExpanded)
def _on_window_shown(self, next_handler, ev, data):
self._update_drawable()
self.dispatch_event('on_show')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowHidden)
@CarbonEventHandler(kEventClassWindow, kEventWindowCollapsed)
def _on_window_hidden(self, next_handler, ev, data):
self.dispatch_event('on_hide')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowDrawContent)
def _on_window_draw_content(self, next_handler, ev, data):
self.dispatch_event('on_expose')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
def _create_cfstring(text):
return carbon.CFStringCreateWithCString(c_void_p(),
text.encode('utf8'),
kCFStringEncodingUTF8)
def _oscheck(result):
if result != noErr:
raise 'Carbon error %d' % result
return result
def _aglcheck():
err = agl.aglGetError()
if err != agl.AGL_NO_ERROR:
raise CarbonException(cast(agl.aglErrorString(err), c_char_p).value)
|
Gui13/CouchPotatoServer | refs/heads/master | couchpotato/core/plugins/userscript/main.py | 44 | import os
from couchpotato import index
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import isDict
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from tornado.web import RequestHandler
log = CPLog(__name__)
class Userscript(Plugin):
version = 5
def __init__(self):
addApiView('userscript.get/(.*)/(.*)', self.getUserScript, static = True)
addApiView('userscript', self.iFrame)
addApiView('userscript.add_via_url', self.getViaUrl)
addApiView('userscript.includes', self.getIncludes)
addApiView('userscript.bookmark', self.bookmark)
addEvent('userscript.get_version', self.getVersion)
def bookmark(self, host = None, **kwargs):
params = {
'includes': fireEvent('userscript.get_includes', merge = True),
'excludes': fireEvent('userscript.get_excludes', merge = True),
'host': host,
}
return self.renderTemplate(__file__, 'bookmark.js_tmpl', **params)
def getIncludes(self, **kwargs):
return {
'includes': fireEvent('userscript.get_includes', merge = True),
'excludes': fireEvent('userscript.get_excludes', merge = True),
}
def getUserScript(self, script_route, **kwargs):
klass = self
class UserscriptHandler(RequestHandler):
def get(self, random, route):
params = {
'includes': fireEvent('userscript.get_includes', merge = True),
'excludes': fireEvent('userscript.get_excludes', merge = True),
'version': klass.getVersion(),
'api': '%suserscript/' % Env.get('api_base'),
'host': '%s://%s' % (self.request.protocol, self.request.headers.get('X-Forwarded-Host') or self.request.headers.get('host')),
}
script = klass.renderTemplate(__file__, 'template.js_tmpl', **params)
klass.createFile(os.path.join(Env.get('cache_dir'), 'couchpotato.user.js'), script)
self.redirect(Env.get('api_base') + 'file.cache/couchpotato.user.js')
Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), script_route), UserscriptHandler)])
def getVersion(self):
versions = fireEvent('userscript.get_provider_version')
version = self.version
for v in versions:
version += v
return version
def iFrame(self, **kwargs):
return index()
def getViaUrl(self, url = None, **kwargs):
params = {
'url': url,
'movie': fireEvent('userscript.get_movie_via_url', url = url, single = True)
}
if not isDict(params['movie']):
log.error('Failed adding movie via url: %s', url)
params['error'] = params['movie'] if params['movie'] else 'Failed getting movie info'
return params
|
ojengwa/reportr | refs/heads/master | core/services/freckle.py | 2 | import json
import requests
import datetime
from . import exceptions
class FreckleClient(object):
"""Simple client implementation to fetch json data from the v1 API."""
def __init__(self, account_name, api_token):
"""
Creates a ``FreckleClient`` instance.
:account_name: Your Freckle account name.
:api_token: Your Freckle API token.
"""
self.account_name = account_name
self.api_token = api_token
def fetch_json(self, uri_path, http_method='GET', headers=None,
query_params=None, post_args=None):
"""
Fetch some JSON from Letsfreckle.
For example, fetch some entries like so:
entries = self.fetch_json(
'entries',
query_params={
'per_page': 1000,
'search[from]': '2015-01-01',
'search[to]': '2015-01-31',
'search[projects]': [1423, 24545, ]),
}
)
"""
# explicit values here to avoid mutable default values
if headers is None:
headers = {}
if query_params is None:
query_params = {}
if post_args is None:
post_args = {}
# set content type and accept headers to handle JSON
headers['Accept'] = 'application/json'
query_params['token'] = self.api_token
# construct the full URL without query parameters
url = 'https://{0}.letsfreckle.com/api/{1}.json'.format(
self.account_name, uri_path)
# perform the HTTP requests, if possible uses OAuth authentication
response = requests.request(
http_method, url, params=query_params, headers=headers,
data=json.dumps(post_args))
if response.status_code != 200:
raise exceptions.FreckleClientException(
"Freckle API Response is not 200", response.text)
return response.json()
class FreckleClientV2(object):
"""Simple client implementation to fetch json data from the v2 API."""
def __init__(self, access_token):
"""
Creates a ``FreckleClient`` instance.
:account_name: Your Freckle account name.
:api_token: Your Freckle API token.
"""
self.access_token = access_token
def fetch_json(self, uri_path, http_method='GET', headers=None,
query_params=None, post_args=None):
"""
Fetch some JSON from Letsfreckle.
For example, fetch some entries like so:
entries = self.fetch_json(
'entries',
query_params={
'per_page': 1000,
'search[from]': '2015-01-01',
'search[to]': '2015-01-31',
'search[projects]': [1423, 24545, ]),
}
)
"""
# explicit values here to avoid mutable default values
if headers is None:
headers = {}
if query_params is None:
query_params = {}
if post_args is None:
post_args = {}
# set content type and accept headers to handle JSON
headers['Accept'] = 'application/json'
headers['User-Agent'] = "python-freckle-client/0.1",
headers['X-FreckleToken'] = self.access_token
# construct the full URL without query parameters
url = 'https://api.letsfreckle.com/v2/{0}'.format(uri_path)
# perform the HTTP requests, if possible uses OAuth authentication
response = requests.request(
http_method, url, params=query_params, headers=headers,
data=json.dumps(post_args))
if response.status_code != 200:
raise exceptions.FreckleClientException(
"Freckle API Response is not 200", response.text)
return response.json()
class Freckle(object):
def __init__(self, token, account=None, api_version=1):
"""Ensures that the appropraite API ebdpoint is been called."""
if api_version == 1 and not account:
raise AttributeError
if api_version == 1:
self.client = FreckleClient(account, token)
else:
self.client = FreckleClientV2(token)
def get_entries(self, projects, start_date, end_date): # pragma: no cover
"""
Returns the entries for the given project and time frame.
:param start_date: String representing the start date (YYYY-MM-DD).
:param end_date: String representing the end date (YYYY-MM-DD).
"""
entries = self.client.fetch_json(
'entries',
query_params={
'per_page': 1000,
'search[from]': start_date,
'search[to]': end_date,
'search[projects]': ','.join(
[str(project['project']['id']) for project in projects]),
}
)
return entries
def get_project_times(self, projects, entries):
"""
Returns a dict with total time tracked per project / employee.
The dict should look like this:
{
month: {
project_id: {
user_id-1: XX,
user_id-2: YY,
total: XX + YY,
},
},
}
"""
result = {}
projects = projects[0]
for obj in entries:
entry = obj['entry']
entry_date = datetime.datetime.strptime(
entry['date'], '%Y-%m-%d')
if entry_date.month not in result:
result[entry_date.month] = {}
project_id = entry['project']['id']
project_name = entry['project']['name']
user_id = entry['user_id']
print projects[0], 'Function call'
# for key, project in projects:
# print key, project
# if project_id not in result[entry_date.month]:
# result[entry_date.month][project_id] = {
# 'total': 0, 'project_name': project_name, }
# if project is None:
# result[entry_date.month][project_id]['is_planned'] = False
# else:
# result[entry_date.month][project_id]['is_planned'] = True
# if user_id not in result[entry_date.month][project_id]:
# result[entry_date.month][project_id][user_id] = 0
# if (project and project.is_investment) or entry['billable']:
# minutes = entry['minutes']
# result[entry_date.month][project_id][user_id] += minutes
# result[entry_date.month][project_id]['total'] += minutes
return result
def get_users(self):
"""Get users from Freckle"""
return self.client.fetch_json('users')
def get_projects(self):
"""Get projects from Freckle"""
return self.client.fetch_json('projects')
def boolean_as_python(self, val):
"""Convert text to boolean"""
if val == 'true':
return True
else:
return False
def date_as_python(self, val):
"""Convert text to date"""
return datetime.date(*[int(x) for x in val.split("-")])
def datetime_as_python(self, val):
"""Convert text to datetime"""
return parse_date(val)
def integer_as_python(self, val):
"""Convert text to integer"""
return int(val)
def array_as_python(self, val):
"""Convert text to list"""
return val.split(",")
def json_as_python(self, val):
"""Convert JSON to dict"""
return json.load(val) |
mwx1993/TACTIC | refs/heads/master | src/pyasm/application/common/base_app_info.py | 6 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['BaseAppInfo', 'TacticException', 'Common']
import cStringIO, os, sys, urllib, xmlrpclib, re
from xml.dom.minidom import parseString
from upload_multipart import UploadMultipart
HAS_MD5 = True
try:
import hashlib
except ImportError:
import md5
except ImportError:
HAS_MD5 = False
def get_md5():
try:
import hashlib
md5_obj = hashlib.md5()
except ImportError:
import md5
md5_obj = md5.new()
except ImportError:
md5_obj = None
return md5_obj
class TacticException(Exception):
pass
class BaseAppInfo(object):
'''Holds data in the application session that is fed by the tactic server.
This is merely used to extract information from the appropriate source.
Use the "AppEnvironment" classes to access this information
'''
def __init__(my, app_name=None):
my.xmlrpc_server = None
my.app = None
my.app_name = app_name
my.sandbox_dir = None
my.project_code = None
my.server = None
my.tmpdir = None
my.save_dir = None
my.ticket = None
my.user = None
# set this as the singleton
BaseAppInfo.set(my)
def get_app_name(my):
return my.app_name
def set_up_maya(my, init=False):
# set up application environment
from pyasm.application.maya import MayaEnvironment
MayaEnvironment.set_up(my)
def set_up_houdini(my, port=None):
from pyasm.application.houdini import HoudiniEnvironment
HoudiniEnvironment.set_up(my)
def set_up_xsi(my, xsi, tool):
# set up application environment
from pyasm.application.xsi import XSIEnvironment
XSIEnvironment.set_up(my, xsi, tool)
def set_up_flash(my):
# set up application environment
from pyasm.application.flash import FlashEnvironment
my.env = FlashEnvironment.set_up(my)
def close_houdini(my):
'''close the socket to houdini'''
socket = my.app.get_socket()
if socket:
socket.close()
def close_xsi(my):
'''
var prefs = Application.Preferences;
var originalsetting = prefs.GetPreferenceValue( "scripting.cmdlog" );
// Disable command logging
if ( !originalsetting ) {
prefs.SetPreferenceValue( "scripting.cmdlog", false );
}
//
// Do your stuff
//
// Restore logging setting to the way it was
prefs.SetPreferenceValue( "scripting.cmdlog", originalsetting );
'''
pass
def get_builder(my):
if my.app_name == "houdini":
from pyasm.application.houdini import HoudiniBuilder
return HoudiniBuilder()
elif my.app_name == "maya":
from pyasm.application.maya import MayaBuilder
return MayaBuilder()
elif my.app_name == "xsi":
from pyasm.application.xsi import XSIBuilder
return XSIBuilder()
elif my.app_name == "flash":
from pyasm.application.flash import FlashBuilder
return FlashBuilder()
def get_app_implementation(my):
if my.app_name == "houdini":
from pyasm.application.houdini import HoudiniImpl
return HoudiniImpl()
elif my.app_name == "maya":
from pyasm.application.maya import MayaImpl
return MayaImpl()
elif my.app_name == "xsi":
from pyasm.application.xsi import XSIImpl
return XSIImpl()
def get_app(my):
return my.app
def get_ticket(my):
return my.ticket
def set_ticket(my, ticket):
my.ticket = ticket
def set_user(my, user):
my.user = user
def get_user(my):
return my.user
def get_project_code(my):
return my.project_code
def get_server(my):
return my.server
def set_tmpdir(my, tmpdir):
from app_environment import AppEnvironment
env = AppEnvironment.get()
env.set_tmpdir(tmpdir)
my.tmpdir = tmpdir
def get_tmpdir(my):
return my.tmpdir
def get_save_dir(my):
impl = my.get_app_implementation()
return impl.get_save_dir()
def get_sandbox_dir(my):
return my.sandbox_dir
def set_sandbox_dir(my, sandbox_dir):
my.sandbox_dir = sandbox_dir
def get_xmlrpc_server(my):
raise Exception("Not implemented")
def report_msg(my, label, msg):
'''this is for debugging only'''
path = "%s/msg.txt" % my.get_tmpdir()
file = open(path, "a")
msg = '%s: %s\n' %(label, msg)
file.write(msg)
file.close()
def report_error(my, exception):
print "Error: ", exception
path = "%s/error.txt" % my.get_tmpdir()
file = open(path, "w")
msg = str(exception)
file.write(msg)
file.close()
my.upload(path)
def report_warning(my, label, warning, upload=False, type=''):
print "warning: ", warning
path = "%s/warning.txt" % my.get_tmpdir()
if label and warning:
file = open(path, "a")
msg = '%s||%s||%s\n' %(label, warning, type)
file.write(msg)
file.close()
if upload and os.path.exists(path):
my.upload(path)
def get_upload_server(my):
return None
def upload(my, from_path):
print "DEPRECATED"
print "uploading: ", from_path
ticket = my.get_ticket()
upload_server = my.get_upload_server()
upload = UploadMultipart()
upload.set_ticket(ticket)
upload.set_upload_server(upload_server)
upload.execute(from_path)
return
'''
file = open(from_path, "rb")
buffer_size = 1024*1024
iteration = 0
while 1:
contents = file.read(buffer_size)
if contents == "":
break
# create a buffer with the contents
buffer = cStringIO.StringIO()
buffer.write("file=%s\n" % from_path)
if iteration == 0:
buffer.write("action=create\n")
else:
buffer.write("action=append\n")
ticket = my.get_ticket()
buffer.write("ticket=%s\n" % ticket)
buffer.write("EOF\n")
buffer.write(contents)
f = urllib.urlopen(upload_server, buffer.getvalue() )
response = f.readlines()
f.close()
print response
iteration += 1
file.close()
'''
def download(my, url, to_dir="", md5_checksum=""):
print "DEPRECATED"
filename = os.path.basename(url)
# download to the current project
if not to_dir:
to_dir = my.get_tmpdir()
# make sure the directory exists
if not os.path.exists(to_dir):
os.makedirs(to_dir)
to_path = "%s/%s" % (to_dir, filename)
# check if this file is already downloaded. if so, skip
if os.path.exists(to_path):
# if it exists, check the MD5 checksum
if md5_checksum:
f = open(to_path, 'rb')
CHUNK = 1024*1024
m = get_md5()
while 1:
chunk = f.read(CHUNK)
if not chunk:
break
m.update(chunk)
f.close()
md5_local = m.hexdigest()
# only return if the md5 checksums are the same
if md5_checksum == md5_local:
print "skipping '%s', already exists" % to_path
return to_path
f = urllib.urlopen(url)
file = open(to_path, "wb")
file.write( f.read() )
file.close()
f.close()
"""
print "starting download"
try:
file = open(to_path, "wb")
req = urllib2.urlopen(url)
try:
while True:
buffer = req.read(1024*100)
print "read: ", len(buffer)
if not buffer:
break
file.write( buffer )
finally:
print "closing ...."
req.close()
file.close()
except urllib2.URLError, e:
raise Exception('%s - %s' % (e,url))
print "... done download"
"""
return to_path
__object = None
def get():
return BaseAppInfo.__object
get = staticmethod(get)
def set(info):
BaseAppInfo.__object = info
set = staticmethod(set)
class Common(object):
def get_filesystem_name(name):
new_name = re.sub(r'/|\||:|\?|=|\s', '_', name)
filename_base, ext = os.path.splitext(new_name)
ext = ext.lower()
new_name = "%s%s" % (filename_base, ext)
return new_name
get_filesystem_name = staticmethod(get_filesystem_name)
def set_sys_env(name, value):
'''seting windows system environment variable, without broadcasting'''
if os.name == 'nt':
try:
import _winreg
x= _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
y=_winreg.OpenKey(x,\
r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment",\
0,_winreg.KEY_ALL_ACCESS)
_winreg.SetValueEx(y, name ,0,_winreg.REG_EXPAND_SZ, value)
_winreg.CloseKey(y)
_winreg.CloseKey(x)
except OSError, e:
print "Registry Key not found."
return
except WindowsError, e:
print str(e)
return
# This part is too error-prone, like fail to import win32gui
'''
# broadcast
HWND_BROADCAST = 0xFFFF
WM_SETTINGCHANGE = 0x001A
SMTO_ABORTIFHUNG = 0x0002
sParam = "Environment"
import win32gui
res1, res2 = win32gui.SendMessageTimeout(HWND_BROADCAST,\
WM_SETTINGCHANGE, 0, sParam, SMTO_ABORTIFHUNG, 100)
if not res1:
print ("result %s, %s from SendMessageTimeout" % (bool(res1), res2))
'''
set_sys_env = staticmethod(set_sys_env)
def get_option_dict(options_str):
'''get an option dict'''
options = {}
exprs = options_str.split("|")
for expr in exprs:
name, value = expr.split("=")
options[name] = value
return options
get_option_dict = staticmethod(get_option_dict)
def get_option_str(option_dict):
'''get the option str given a dict'''
option_list = []
for key, value in option_dict.items():
option_list.append('%s=%s' %(key, value))
return '|'.join(option_list)
get_option_str = staticmethod(get_option_str)
def get_file_range(file_range):
''' build a file range tuple from a string'''
frame_by = 1
if file_range.find("/") != -1:
file_range, frame_by = file_range.split("/")
frame_start, frame_end = file_range.split("-")
frame_start = int(frame_start)
frame_end = int(frame_end)
frame_by = int(frame_by)
return frame_start, frame_end, frame_by
get_file_range = staticmethod(get_file_range)
def is_file_group(path):
pat = re.compile('\.(#+)\.')
if pat.search(path):
return True
return False
is_file_group = staticmethod(is_file_group)
def expand_paths( file_path, file_range ):
'''expands the file paths, replacing # as specified in the file_range'''
#TODO: expand paths somehow if file_range is empty
file_paths = []
# frame_by is not really used here yet
frame_by = 1
if file_range.find("/") != -1:
file_range, frame_by = file_range.split("/")
frame_start, frame_end = file_range.split("-")
frame_start = int(frame_start)
frame_end = int(frame_end)
frame_by = int(frame_by)
# find out the number of #'s in the path
padding = len( file_path[file_path.index('#'):file_path.rindex('#')] )+1
for i in range(frame_start, frame_end+1, frame_by):
expanded = file_path.replace( '#'*padding, str(i).zfill(padding) )
file_paths.append(expanded)
return file_paths
expand_paths = staticmethod(expand_paths)
def get_md5(path):
'''get md5 checksum'''
try:
f = open(path, 'rb')
CHUNK = 1024*1024
m = get_md5()
while 1:
chunk = f.read(CHUNK)
if not chunk:
break
m.update(chunk)
md5_checksum = m.hexdigest()
f.close()
return md5_checksum
except IOError, e:
print "WARNING: error getting md5 on [%s]: " % path, e
return None
get_md5 = staticmethod(get_md5)
def get_unique_list(list):
''' get a unique list, order preserving'''
seen = set()
return [ x for x in list if x not in seen and not seen.add(x)]
get_unique_list = staticmethod(get_unique_list)
|
chand3040/sree_odoo | refs/heads/master | openerp/addons/l10n_in_hr_payroll/report/report_hr_yearly_salary_detail.py | 374 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp.report import report_sxw
from openerp.osv import osv
class employees_yearly_salary_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(employees_yearly_salary_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_employee_detail': self.get_employee_detail,
'cal_monthly_amt': self.cal_monthly_amt,
'get_periods': self.get_periods,
'get_total': self.get_total,
'get_allow': self.get_allow,
'get_deduct': self.get_deduct,
})
self.context = context
def get_periods(self, form):
self.mnths = []
# Get start year-month-date and end year-month-date
first_year = int(form['date_from'][0:4])
last_year = int(form['date_to'][0:4])
first_month = int(form['date_from'][5:7])
last_month = int(form['date_to'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('')
self.mnths.append('')
return [mnth_name]
def get_employee(self, form):
return self.pool.get('hr.employee').browse(self.cr,self.uid, form.get('employee_ids', []), context=self.context)
def get_employee_detail(self, form, obj):
self.allow_list = []
self.deduct_list = []
self.total = 0.00
gross = False
net = False
payslip_lines = self.cal_monthly_amt(form, obj.id)
for line in payslip_lines:
for line[0] in line:
if line[0][0] == "Gross":
gross = line[0]
elif line[0][0] == "Net":
net = line[0]
elif line[0][13] > 0.0 and line[0][0] != "Net":
self.total += line[0][len(line[0])-1]
self.allow_list.append(line[0])
elif line[0][13] < 0.0:
self.total += line[0][len(line[0])-1]
self.deduct_list.append(line[0])
if gross:
self.allow_list.append(gross)
if net:
self.deduct_list.append(net)
return None
def cal_monthly_amt(self, form, emp_id):
category_obj = self.pool.get('hr.salary.rule.category')
result = []
res = []
salaries = {}
self.cr.execute('''SELECT rc.code, pl.name, sum(pl.total), \
to_char(date_to,'mm-yyyy') as to_date FROM hr_payslip_line as pl \
LEFT JOIN hr_salary_rule_category AS rc on (pl.category_id = rc.id) \
LEFT JOIN hr_payslip as p on pl.slip_id = p.id \
LEFT JOIN hr_employee as emp on emp.id = p.employee_id \
WHERE p.employee_id = %s \
GROUP BY rc.parent_id, pl.sequence, pl.id, pl.category_id,pl.name,p.date_to,rc.code \
ORDER BY pl.sequence, rc.parent_id''',(emp_id,))
salary = self.cr.fetchall()
for category in salary:
if category[0] not in salaries:
salaries.setdefault(category[0], {})
salaries[category[0]].update({category[1]: {category[3]: category[2]}})
elif category[1] not in salaries[category[0]]:
salaries[category[0]].setdefault(category[1], {})
salaries[category[0]][category[1]].update({category[3]: category[2]})
else:
salaries[category[0]][category[1]].update({category[3]: category[2]})
category_ids = category_obj.search(self.cr,self.uid, [], context=self.context)
categories = category_obj.read(self.cr, self.uid, category_ids, ['code'], context=self.context)
for code in map(lambda x: x['code'], categories):
if code in salaries:
res = self.salary_list(salaries[code])
result.append(res)
return result
def salary_list(self, salaries):
cat_salary_all = []
for category_name,amount in salaries.items():
cat_salary = []
total = 0.0
cat_salary.append(category_name)
for mnth in self.mnths:
if mnth <> 'None':
if len(mnth) != 7:
mnth = '0' + str(mnth)
if mnth in amount and amount[mnth]:
cat_salary.append(amount[mnth])
total += amount[mnth]
else:
cat_salary.append(0.00)
else:
cat_salary.append('')
cat_salary.append(total)
cat_salary_all.append(cat_salary)
return cat_salary_all
def get_allow(self):
return self.allow_list
def get_deduct(self):
return self.deduct_list
def get_total(self):
return self.total
class wrapped_report_payslip(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_hryearlysalary'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_hryearlysalary'
_wrapped_report_class = employees_yearly_salary_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
tmimori/frappe | refs/heads/develop | frappe/desk/tags.py | 10 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import json
"""
Server side functions for tagging.
- Tags can be added to any record (doctype, name) in the system.
- Items are filtered by tags
- Top tags are shown in the sidebar (?)
- Tags are also identified by the tag_fields property of the DocType
Discussion:
Tags are shown in the docbrowser and ideally where-ever items are searched.
There should also be statistics available for tags (like top tags etc)
Design:
- free tags (user_tags) are stored in __user_tags
- doctype tags are set in tag_fields property of the doctype
- top tags merges the tags from both the lists (only refreshes once an hour (max))
"""
import frappe
def check_user_tags(dt):
"if the user does not have a tags column, then it creates one"
try:
frappe.db.sql("select `_user_tags` from `tab%s` limit 1" % dt)
except Exception as e:
if e.args[0] == 1054:
DocTags(dt).setup()
@frappe.whitelist()
def add_tag(tag, dt, dn, color=None):
"adds a new tag to a record, and creates the Tag master"
DocTags(dt).add(dn, tag)
return tag
@frappe.whitelist()
def remove_tag(tag, dt, dn):
"removes tag from the record"
DocTags(dt).remove(dn, tag)
@frappe.whitelist()
def get_tagged_docs(doctype, tag):
frappe.has_permission(doctype, throw=True)
return frappe.db.sql("""SELECT name
FROM `tab{0}`
WHERE _user_tags LIKE '%{1}%'""".format(doctype, tag))
@frappe.whitelist()
def get_tags(doctype, txt, cat_tags):
tags = json.loads(cat_tags)
try:
for _user_tags in frappe.db.sql_list("""select DISTINCT `_user_tags`
from `tab{0}`
where _user_tags like '%{1}%'
limit 50""".format(frappe.db.escape(doctype), frappe.db.escape(txt))):
tags.extend(_user_tags[1:].split(","))
except Exception as e:
if e.args[0]!=1054: raise
return sorted(filter(lambda t: t and txt.lower() in t.lower(), list(set(tags))))
class DocTags:
"""Tags for a particular doctype"""
def __init__(self, dt):
self.dt = dt
def get_tag_fields(self):
"""returns tag_fields property"""
return frappe.db.get_value('DocType', self.dt, 'tag_fields')
def get_tags(self, dn):
"""returns tag for a particular item"""
return (frappe.db.get_value(self.dt, dn, '_user_tags', ignore=1) or '').strip()
def add(self, dn, tag):
"""add a new user tag"""
tl = self.get_tags(dn).split(',')
if not tag in tl:
tl.append(tag)
self.update(dn, tl)
def remove(self, dn, tag):
"""remove a user tag"""
tl = self.get_tags(dn).split(',')
self.update(dn, filter(lambda x:x.lower()!=tag.lower(), tl))
def remove_all(self, dn):
"""remove all user tags (call before delete)"""
self.update(dn, [])
def update(self, dn, tl):
"""updates the _user_tag column in the table"""
if not tl:
tags = ''
else:
tl = list(set(filter(lambda x: x, tl)))
tags = ',' + ','.join(tl)
try:
frappe.db.sql("update `tab%s` set _user_tags=%s where name=%s" % \
(self.dt,'%s','%s'), (tags , dn))
except Exception as e:
if e.args[0]==1054:
if not tags:
# no tags, nothing to do
return
self.setup()
self.update(dn, tl)
else: raise
def setup(self):
"""adds the _user_tags column if not exists"""
from frappe.model.db_schema import add_column
add_column(self.dt, "_user_tags", "Data")
|
toshywoshy/ansible | refs/heads/devel | lib/ansible/modules/network/fortimanager/fmgr_ha.py | 38 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"
}
DOCUMENTATION = '''
---
module: fmgr_ha
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Manages the High-Availability State of FortiManager Clusters and Nodes.
description: Change HA state or settings of FortiManager nodes (Standalone/Master/Slave).
options:
fmgr_ha_mode:
description:
- Sets the role of the FortiManager host for HA.
required: false
choices: ["standalone", "master", "slave"]
fmgr_ha_peer_ipv4:
description:
- Sets the IPv4 address of a HA peer.
required: false
fmgr_ha_peer_ipv6:
description:
- Sets the IPv6 address of a HA peer.
required: false
fmgr_ha_peer_sn:
description:
- Sets the HA Peer Serial Number.
required: false
fmgr_ha_peer_status:
description:
- Sets the peer status to enable or disable.
required: false
choices: ["enable", "disable"]
fmgr_ha_cluster_pw:
description:
- Sets the password for the HA cluster. Only required once. System remembers between HA mode switches.
required: false
fmgr_ha_cluster_id:
description:
- Sets the ID number of the HA cluster. Defaults to 1.
required: false
default: 1
fmgr_ha_hb_threshold:
description:
- Sets heartbeat lost threshold (1-255).
required: false
default: 3
fmgr_ha_hb_interval:
description:
- Sets the heartbeat interval (1-255).
required: false
default: 5
fmgr_ha_file_quota:
description:
- Sets the File quota in MB (2048-20480).
required: false
default: 4096
'''
EXAMPLES = '''
- name: SET FORTIMANAGER HA NODE TO MASTER
fmgr_ha:
fmgr_ha_mode: "master"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
- name: SET FORTIMANAGER HA NODE TO SLAVE
fmgr_ha:
fmgr_ha_mode: "slave"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
- name: SET FORTIMANAGER HA NODE TO STANDALONE
fmgr_ha:
fmgr_ha_mode: "standalone"
- name: ADD FORTIMANAGER HA PEER
fmgr_ha:
fmgr_ha_peer_ipv4: "192.168.1.254"
fmgr_ha_peer_sn: "FMG-VM1234567890"
fmgr_ha_peer_status: "enable"
- name: CREATE CLUSTER ON MASTER
fmgr_ha:
fmgr_ha_mode: "master"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
fmgr_ha_hb_threshold: "10"
fmgr_ha_hb_interval: "15"
fmgr_ha_file_quota: "2048"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import FMGRMethods
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def fmgr_set_ha_mode(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
url = ""
datagram = {}
if paramgram["fmgr_ha_cluster_pw"] is not None and str(paramgram["fmgr_ha_mode"].lower()) != "standalone":
datagram = {
"mode": paramgram["fmgr_ha_mode"],
"file-quota": paramgram["fmgr_ha_file_quota"],
"hb-interval": paramgram["fmgr_ha_hb_interval"],
"hb-lost-threshold": paramgram["fmgr_ha_hb_threshold"],
"password": paramgram["fmgr_ha_cluster_pw"],
"clusterid": paramgram["fmgr_ha_cluster_id"]
}
elif str(paramgram["fmgr_ha_mode"].lower()) == "standalone":
datagram = {
"mode": paramgram["fmgr_ha_mode"],
"file-quota": paramgram["fmgr_ha_file_quota"],
"hb-interval": paramgram["fmgr_ha_hb_interval"],
"hb-lost-threshold": paramgram["fmgr_ha_hb_threshold"],
"clusterid": paramgram["fmgr_ha_cluster_id"]
}
url = '/cli/global/system/ha'
response = fmgr.process_request(url, datagram, FMGRMethods.SET)
return response
def fmgr_get_ha_peer_list(fmgr):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
datagram = {}
paramgram = {}
url = '/cli/global/system/ha/peer/'
response = fmgr.process_request(url, datagram, FMGRMethods.GET)
return response
def fmgr_set_ha_peer(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
datagram = {
"ip": paramgram["fmgr_ha_peer_ipv4"],
"ip6": paramgram["fmgr_ha_peer_ipv6"],
"serial-number": paramgram["fmgr_ha_peer_sn"],
"status": paramgram["fmgr_ha_peer_status"],
"id": paramgram["peer_id"]
}
url = '/cli/global/system/ha/peer/'
response = fmgr.process_request(url, datagram, FMGRMethods.SET)
return response
def main():
argument_spec = dict(
fmgr_ha_mode=dict(required=False, type="str", choices=["standalone", "master", "slave"]),
fmgr_ha_cluster_pw=dict(required=False, type="str", no_log=True),
fmgr_ha_peer_status=dict(required=False, type="str", choices=["enable", "disable"]),
fmgr_ha_peer_sn=dict(required=False, type="str"),
fmgr_ha_peer_ipv4=dict(required=False, type="str"),
fmgr_ha_peer_ipv6=dict(required=False, type="str"),
fmgr_ha_hb_threshold=dict(required=False, type="int", default=3),
fmgr_ha_hb_interval=dict(required=False, type="int", default=5),
fmgr_ha_file_quota=dict(required=False, type="int", default=4096),
fmgr_ha_cluster_id=dict(required=False, type="int", default=1)
)
required_if = [
['fmgr_ha_peer_ipv4', 'present', ['fmgr_ha_peer_sn', 'fmgr_ha_peer_status']],
['fmgr_ha_peer_ipv6', 'present', ['fmgr_ha_peer_sn', 'fmgr_ha_peer_status']],
['fmgr_ha_mode', 'master', ['fmgr_ha_cluster_pw', 'fmgr_ha_cluster_id']],
['fmgr_ha_mode', 'slave', ['fmgr_ha_cluster_pw', 'fmgr_ha_cluster_id']],
]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, required_if=required_if)
paramgram = {
"fmgr_ha_mode": module.params["fmgr_ha_mode"],
"fmgr_ha_cluster_pw": module.params["fmgr_ha_cluster_pw"],
"fmgr_ha_peer_status": module.params["fmgr_ha_peer_status"],
"fmgr_ha_peer_sn": module.params["fmgr_ha_peer_sn"],
"fmgr_ha_peer_ipv4": module.params["fmgr_ha_peer_ipv4"],
"fmgr_ha_peer_ipv6": module.params["fmgr_ha_peer_ipv6"],
"fmgr_ha_hb_threshold": module.params["fmgr_ha_hb_threshold"],
"fmgr_ha_hb_interval": module.params["fmgr_ha_hb_interval"],
"fmgr_ha_file_quota": module.params["fmgr_ha_file_quota"],
"fmgr_ha_cluster_id": module.params["fmgr_ha_cluster_id"],
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
# INIT FLAGS AND COUNTERS
get_ha_peers = 0
results = DEFAULT_RESULT_OBJ
try:
if any(v is not None for v in (paramgram["fmgr_ha_peer_sn"], paramgram["fmgr_ha_peer_ipv4"],
paramgram["fmgr_ha_peer_ipv6"], paramgram["fmgr_ha_peer_status"])):
get_ha_peers = 1
except Exception as err:
raise FMGBaseException(err)
try:
# IF HA MODE IS NOT NULL, SWITCH THAT
if paramgram["fmgr_ha_mode"] is not None:
if (str.lower(paramgram["fmgr_ha_mode"]) != "standalone" and paramgram["fmgr_ha_cluster_pw"] is not None)\
or str.lower(paramgram["fmgr_ha_mode"]) == "standalone":
results = fmgr_set_ha_mode(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=False,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
elif str.lower(paramgram["fmgr_ha_mode"]) != "standalone" and\
paramgram["fmgr_ha_mode"] is not None and\
paramgram["fmgr_ha_cluster_pw"] is None:
module.exit_json(msg="If setting HA Mode of MASTER or SLAVE, you must specify a cluster password")
except Exception as err:
raise FMGBaseException(err)
# IF GET_HA_PEERS IS ENABLED, LETS PROCESS THE PEERS
try:
if get_ha_peers == 1:
# GET THE CURRENT LIST OF PEERS FROM THE NODE
peers = fmgr_get_ha_peer_list(fmgr)
# GET LENGTH OF RETURNED PEERS LIST AND ADD ONE FOR THE NEXT ID
paramgram["next_peer_id"] = len(peers[1]) + 1
# SET THE ACTUAL NUMBER OF PEERS
num_of_peers = len(peers[1])
# SET THE PEER ID FOR DISABLE METHOD
paramgram["peer_id"] = len(peers) - 1
# SET THE PEER LOOPCOUNT TO 1 TO START THE LOOP
peer_loopcount = 1
# LOOP THROUGH PEERS TO FIND THE SERIAL NUMBER MATCH TO GET THE RIGHT PEER ID
# IDEA BEING WE DON'T WANT TO SUBMIT A BAD peer_id THAT DOESN'T JIVE WITH CURRENT DB ON FMG
# SO LETS SEARCH FOR IT, AND IF WE FIND IT, WE WILL CHANGE THE PEER ID VARIABLES TO MATCH
# IF NOT FOUND, LIFE GOES ON AND WE ASSUME THAT WE'RE ADDING A PEER
# AT WHICH POINT THE next_peer_id VARIABLE WILL HAVE THE RIGHT PRIMARY KEY
if paramgram["fmgr_ha_peer_sn"] is not None:
while peer_loopcount <= num_of_peers:
# GET THE SERIAL NUMBER FOR CURRENT PEER IN LOOP TO COMPARE TO SN IN PLAYBOOK
try:
sn_compare = peers[1][peer_loopcount - 1]["serial-number"]
# IF THE SN IN THE PEERS MATCHES THE PLAYBOOK SN, SET THE IDS
if sn_compare == paramgram["fmgr_ha_peer_sn"]:
paramgram["peer_id"] = peer_loopcount
paramgram["next_peer_id"] = paramgram["peer_id"]
except Exception as err:
raise FMGBaseException(err)
# ADVANCE THE LOOP AND REPEAT UNTIL DONE
peer_loopcount += 1
# IF THE PEER STATUS ISN'T IN THE PLAYBOOK, ASSUME ITS ENABLE
if paramgram["fmgr_ha_peer_status"] is None:
paramgram["fmgr_ha_peer_status"] = "enable"
# IF THE PEER STATUS IS ENABLE, USE THE next_peer_id IN THE API CALL FOR THE ID
if paramgram["fmgr_ha_peer_status"] == "enable":
results = fmgr_set_ha_peer(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=True,
ansible_facts=fmgr.construct_ansible_facts(results,
module.params, paramgram))
# IF THE PEER STATUS IS DISABLE, WE HAVE TO HANDLE THAT A BIT DIFFERENTLY
# JUST USING TWO DIFFERENT peer_id 's HERE
if paramgram["fmgr_ha_peer_status"] == "disable":
results = fmgr_set_ha_peer(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=True,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
|
wenxiaomao1023/wenxiaomao | refs/heads/master | article/migrations/0004_auto_20160921_1526.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-09-21 15:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0003_auto_20160921_1520'),
]
operations = [
migrations.CreateModel(
name='ArticleCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='article',
name='categoryId',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='article.ArticleCategory'),
preserve_default=False,
),
]
|
gunchleoc/django | refs/heads/master | django/contrib/admindocs/tests/test_fields.py | 638 | from __future__ import unicode_literals
import unittest
from django.contrib.admindocs import views
from django.db import models
from django.db.models import fields
from django.utils.translation import ugettext as _
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
class TestFieldType(unittest.TestCase):
def setUp(self):
pass
def test_field_name(self):
self.assertRaises(
AttributeError,
views.get_readable_field_data_type, "NotAField"
)
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
_('Boolean (Either True or False)')
)
def test_custom_fields(self):
self.assertEqual(
views.get_readable_field_data_type(CustomField()),
'A custom field type'
)
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
_('Field of type: %(field_type)s') % {
'field_type': 'DescriptionLackingField'
}
)
|
mdanielwork/intellij-community | refs/heads/master | python/testData/inspections/PyUnresolvedReferencesInspection/UnusedImportBeforeStarImport/m2.py | 80 | import m1
|
michaelkirk/QGIS | refs/heads/master | tests/src/python/test_qgsfeature.py | 5 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsFeature.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Germán Carrillo'
__date__ = '06/10/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
import os
from qgis.core import QgsFeature, QgsGeometry, QgsPoint, QgsVectorLayer, NULL
from utilities import (unitTestDataPath,
getQgisTestApp,
TestCase,
unittest
)
from unittest import expectedFailure
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestQgsFeature(TestCase):
def test_CreateFeature(self):
feat = QgsFeature()
feat.initAttributes(1)
feat.setAttribute(0, "text")
feat.setGeometry(QgsGeometry.fromPoint(QgsPoint(123,456)))
myId = feat.id()
myExpectedId = 0
myMessage = '\nExpected: %s\nGot: %s' % (myExpectedId, myId)
assert myId == myExpectedId, myMessage
def test_ValidFeature(self):
myPath = os.path.join(unitTestDataPath(), 'points.shp')
myLayer = QgsVectorLayer(myPath, 'Points', 'ogr')
provider = myLayer.dataProvider()
fit = provider.getFeatures()
feat = QgsFeature()
fit.nextFeature(feat)
fit.close()
myValidValue = feat.isValid()
myMessage = '\nExpected: %s\nGot: %s' % ("True", myValidValue)
assert myValidValue, myMessage
def test_Attributes(self):
myPath = os.path.join(unitTestDataPath(), 'lines.shp')
myLayer = QgsVectorLayer(myPath, 'Lines', 'ogr')
provider = myLayer.dataProvider()
fit = provider.getFeatures()
feat = QgsFeature()
fit.nextFeature(feat)
fit.close()
myAttributes = feat.attributes()
myExpectedAttributes = [ "Highway", 1 ]
# Only for printing purposes
myExpectedAttributes = [ "Highway", 1 ]
myMessage = '\nExpected: %s\nGot: %s' % (
myExpectedAttributes,
myAttributes
)
assert myAttributes == myExpectedAttributes, myMessage
def test_SetAttribute(self):
feat = QgsFeature()
feat.initAttributes(1)
feat.setAttributes([0])
feat.setAttributes([NULL])
assert [NULL] == feat.attributes()
def test_DeleteAttribute(self):
feat = QgsFeature()
feat.initAttributes(3)
feat[0] = "text1"
feat[1] = "text2"
feat[2] = "text3"
feat.deleteAttribute(1)
myAttrs = [ feat[0], feat[1] ]
myExpectedAttrs = [ "text1", "text3" ]
myMessage = '\nExpected: %s\nGot: %s' % (str(myExpectedAttrs), str(myAttrs))
assert myAttrs == myExpectedAttrs, myMessage
def test_SetGeometry(self):
feat = QgsFeature()
feat.setGeometry(QgsGeometry.fromPoint(QgsPoint(123,456)))
myGeometry = feat.geometry()
myExpectedGeometry = "!None"
myMessage = '\nExpected: %s\nGot: %s' % (myExpectedGeometry, myGeometry)
assert myGeometry is not None, myMessage
if __name__ == '__main__':
unittest.main()
|
haocs/autorest | refs/heads/master | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/AzureSpecials/autorestazurespecialparameterstestclient/operations/__init__.py | 31 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .xms_client_request_id_operations import XMsClientRequestIdOperations
from .subscription_in_credentials_operations import SubscriptionInCredentialsOperations
from .subscription_in_method_operations import SubscriptionInMethodOperations
from .api_version_default_operations import ApiVersionDefaultOperations
from .api_version_local_operations import ApiVersionLocalOperations
from .skip_url_encoding_operations import SkipUrlEncodingOperations
from .odata_operations import OdataOperations
from .header_operations import HeaderOperations
__all__ = [
'XMsClientRequestIdOperations',
'SubscriptionInCredentialsOperations',
'SubscriptionInMethodOperations',
'ApiVersionDefaultOperations',
'ApiVersionLocalOperations',
'SkipUrlEncodingOperations',
'OdataOperations',
'HeaderOperations',
]
|
martin-g/FrameworkBenchmarks | refs/heads/master | frameworks/Python/django/hello/manage.py | 98 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
jayme-github/headphones | refs/heads/master | lib/pytz/tzfile.py | 480 | #!/usr/bin/env python
'''
$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $
'''
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from datetime import datetime, timedelta
from struct import unpack, calcsize
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
from pytz.tzinfo import memorized_datetime, memorized_timedelta
def _byte_string(s):
"""Cast a string or byte string to an ASCII byte string."""
return s.encode('US-ASCII')
_NULL = _byte_string('\0')
def _std_string(s):
"""Cast a string or byte string to an ASCII string."""
return str(s.decode('US-ASCII'))
def build_tzinfo(zone, fp):
head_fmt = '>4s c 15x 6l'
head_size = calcsize(head_fmt)
(magic, format, ttisgmtcnt, ttisstdcnt,leapcnt, timecnt,
typecnt, charcnt) = unpack(head_fmt, fp.read(head_size))
# Make sure it is a tzfile(5) file
assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic)
# Read out the transition times, localtime indices and ttinfo structures.
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt)
data_size = calcsize(data_fmt)
data = unpack(data_fmt, fp.read(data_size))
# make sure we unpacked the right number of values
assert len(data) == 2 * timecnt + 3 * typecnt + 1
transitions = [memorized_datetime(trans)
for trans in data[:timecnt]]
lindexes = list(data[timecnt:2 * timecnt])
ttinfo_raw = data[2 * timecnt:-1]
tznames_raw = data[-1]
del data
# Process ttinfo into separate structs
ttinfo = []
tznames = {}
i = 0
while i < len(ttinfo_raw):
# have we looked up this timezone name yet?
tzname_offset = ttinfo_raw[i+2]
if tzname_offset not in tznames:
nul = tznames_raw.find(_NULL, tzname_offset)
if nul < 0:
nul = len(tznames_raw)
tznames[tzname_offset] = _std_string(
tznames_raw[tzname_offset:nul])
ttinfo.append((ttinfo_raw[i],
bool(ttinfo_raw[i+1]),
tznames[tzname_offset]))
i += 3
# Now build the timezone object
if len(transitions) == 0:
ttinfo[0][0], ttinfo[0][2]
cls = type(zone, (StaticTzInfo,), dict(
zone=zone,
_utcoffset=memorized_timedelta(ttinfo[0][0]),
_tzname=ttinfo[0][2]))
else:
# Early dates use the first standard time ttinfo
i = 0
while ttinfo[i][1]:
i += 1
if ttinfo[i] == ttinfo[lindexes[0]]:
transitions[0] = datetime.min
else:
transitions.insert(0, datetime.min)
lindexes.insert(0, i)
# calculate transition info
transition_info = []
for i in range(len(transitions)):
inf = ttinfo[lindexes[i]]
utcoffset = inf[0]
if not inf[1]:
dst = 0
else:
for j in range(i-1, -1, -1):
prev_inf = ttinfo[lindexes[j]]
if not prev_inf[1]:
break
dst = inf[0] - prev_inf[0] # dst offset
# Bad dst? Look further. DST > 24 hours happens when
# a timzone has moved across the international dateline.
if dst <= 0 or dst > 3600*3:
for j in range(i+1, len(transitions)):
stdinf = ttinfo[lindexes[j]]
if not stdinf[1]:
dst = inf[0] - stdinf[0]
if dst > 0:
break # Found a useful std time.
tzname = inf[2]
# Round utcoffset and dst to the nearest minute or the
# datetime library will complain. Conversions to these timezones
# might be up to plus or minus 30 seconds out, but it is
# the best we can do.
utcoffset = int((utcoffset + 30) // 60) * 60
dst = int((dst + 30) // 60) * 60
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
cls = type(zone, (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info))
return cls()
if __name__ == '__main__':
import os.path
from pprint import pprint
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
tz = build_tzinfo('Australia/Melbourne',
open(os.path.join(base,'Australia','Melbourne'), 'rb'))
tz = build_tzinfo('US/Eastern',
open(os.path.join(base,'US','Eastern'), 'rb'))
pprint(tz._utc_transition_times)
#print tz.asPython(4)
#print tz.transitions_mapping
|
siosio/intellij-community | refs/heads/master | python/testData/copyPaste/SelectionOneLine1.after.py | 332 | class MyClass(object):
member1 = 1
member2 = 2
member3 = 3 |
JinXinDeep/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/pooling_ops_test.py | 9 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import gen_nn_ops
def GetInceptionMaxPoolShapes():
"""Iterator for some of the max pool ops in the Inception 2015 model.
Yields:
Tuple (name, input_size, filter_size, out_size, strides, padding)
"""
names = ["maxpool2", "maxpool3", "maxpool4", "maxpool5"]
input_sizes = [[32, 71, 71, 192],
[32, 35, 35, 288], [32, 17, 17, 1248], [32, 8, 8, 2048]]
filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1],
[1, 3, 3, 1], [1, 3, 3, 1]]
output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288],
[32, 8, 8, 1248], [32, 8, 8, 2048]]
strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1],
[1, 1, 1, 1]]
paddings = ["VALID", "VALID", "VALID", "SAME"]
for n, i, f, o, s, p in zip(names, input_sizes, filter_sizes, output_sizes,
strides, paddings):
yield n, i, f, o, s, p
class PoolingTest(tf.test.TestCase):
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected, use_gpu):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
t = tf.constant(x, shape=input_sizes)
t = pool_func(t, ksize=ksize, strides=strides, padding=padding)
actual = t.eval()
self.assertAllClose(expected, actual.flatten())
self.assertShapeEqual(actual, t)
def _testAvgPoolValidPadding(self, use_gpu):
expected_output = [7.0, 8.0, 9.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePadding(self, use_gpu):
expected_output = [8.5, 9.5, 10.5, 14.5, 15.5, 16.5]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 4, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
# [avg(1.0, 2.0), avg(2.0, padded0),
# avg(3.0, 4.0), avg(4.0, padded0)]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[1.5, 2.0, 3.5, 4.0], use_gpu=use_gpu)
# Window of [x,
# x] should do:
# [avg(1.0, 3.0), avg(2.0, 4.0)
# avg(3.0, padded0), avg(4.0, padded0)]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1],
ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 3.0, 3.0, 4.0], use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, use_gpu):
self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2],
ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 3.0, 3.0, 4.0,
6.0, 7.0, 7.0, 8.0,
10.0, 11.0, 11.0, 12.0,
14.0, 15.0, 15.0, 16.0],
use_gpu=use_gpu)
self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2],
ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[3.0, 4.0, 5.0, 6.0,
5.0, 6.0, 7.0, 8.0,
11.0, 12.0, 13.0, 14.0,
13.0, 14.0, 15.0, 16.0],
use_gpu=use_gpu)
def _testAvgPoolValidPaddingUnevenStride(self, use_gpu):
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0],
use_gpu=use_gpu)
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
use_gpu=use_gpu)
def _testAvgPoolSamePadding4(self, use_gpu):
expected_output = [11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0,
44.0, 45.0, 46.0, 51.0, 52.0, 53.0, 54.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket4(self, use_gpu):
expected_output = [21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0,
45.0, 46.0, 47.0, 48.0, 51.0, 52.0, 53.0, 54.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4],
ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket8(self, use_gpu):
expected_output = [73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 89.0,
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 105.0, 106.0,
107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 117.0, 118.0,
119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 201.0, 202.0,
203.0, 204.0, 205.0, 206.0, 207.0, 208.0, 217.0, 218.0,
219.0, 220.0, 221.0, 222.0, 223.0, 224.0, 233.0, 234.0,
235.0, 236.0, 237.0, 238.0, 239.0, 240.0, 245.0, 246.0,
247.0, 248.0, 249.0, 250.0, 251.0, 252.0, 329.0, 330.0,
331.0, 332.0, 333.0, 334.0, 335.0, 336.0, 345.0, 346.0,
347.0, 348.0, 349.0, 350.0, 351.0, 352.0, 361.0, 362.0,
363.0, 364.0, 365.0, 366.0, 367.0, 368.0, 373.0, 374.0,
375.0, 376.0, 377.0, 378.0, 379.0, 380.0, 425.0, 426.0,
427.0, 428.0, 429.0, 430.0, 431.0, 432.0, 441.0, 442.0,
443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 457.0, 458.0,
459.0, 460.0, 461.0, 462.0, 463.0, 464.0, 469.0, 470.0,
471.0, 472.0, 473.0, 474.0, 475.0, 476.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def testAvgPooling(self):
for use_gpu in True, False:
self._testAvgPoolValidPadding(use_gpu)
self._testAvgPoolSamePadding(use_gpu)
self._testAvgPoolSamePaddingNonSquareWindow(use_gpu)
self._testAvgPoolSamePaddingNonSquareWindowMultiBatch(use_gpu)
self._testAvgPoolValidPaddingUnevenStride(use_gpu)
self._testAvgPoolSamePadding4(use_gpu)
self._testAvgPoolSamePaddingPacket4(use_gpu)
self._testAvgPoolSamePaddingPacket8(use_gpu)
def _testMaxPoolValidPadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output, use_gpu=use_gpu)
def _testMaxPoolSamePadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testMaxPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0], use_gpu=use_gpu)
def _testMaxPoolValidPaddingUnevenStride(self, use_gpu):
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
use_gpu=use_gpu)
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
use_gpu=use_gpu)
def _testMaxPoolSamePaddingPacket4(self, use_gpu):
expected_output = [21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0,
54.0, 55.0, 56.0, 61.0, 62.0, 63.0, 64.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testMaxPoolSamePaddingPacket8(self, use_gpu):
expected_output = [145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0,
161.0, 162.0, 163.0, 164.0, 165.0, 166.0, 167.0, 168.0,
177.0, 178.0, 179.0, 180.0, 181.0, 182.0, 183.0, 184.0,
185.0, 186.0, 187.0, 188.0, 189.0, 190.0, 191.0, 192.0,
273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0,
305.0, 306.0, 307.0, 308.0, 309.0, 310.0, 311.0, 312.0,
313.0, 314.0, 315.0, 316.0, 317.0, 318.0, 319.0, 320.0,
401.0, 402.0, 403.0, 404.0, 405.0, 406.0, 407.0, 408.0,
417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0,
441.0, 442.0, 443.0, 444.0, 445.0, 446.0, 447.0, 448.0,
465.0, 466.0, 467.0, 468.0, 469.0, 470.0, 471.0, 472.0,
481.0, 482.0, 483.0, 484.0, 485.0, 486.0, 487.0, 488.0,
497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def testMaxPooling(self):
for use_gpu in True, False:
self._testMaxPoolValidPadding(use_gpu)
self._testMaxPoolSamePadding(use_gpu)
self._testMaxPoolSamePaddingNonSquareWindow(use_gpu)
self._testMaxPoolValidPaddingUnevenStride(use_gpu)
self._testMaxPoolSamePaddingPacket4(use_gpu)
self._testMaxPoolSamePaddingPacket8(use_gpu)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0], use_gpu=False)
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3], strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
use_gpu=False)
def _testDepthwiseMaxPoolInvalidConfig(self, in_size, ksize, strides,
error_msg, use_gpu=False):
t = tf.constant(1.0, shape=in_size)
with self.assertRaisesRegexp(ValueError, error_msg):
t = tf.nn.max_pool(t, ksize=ksize, strides=strides, padding="SAME")
def testDepthwiseMaxPoolInvalidConfigs(self):
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 2, 2, 2],
[1, 1, 1, 2], "exactly one of pooling across depth")
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 1, 1, 2],
[1, 1, 1, 1], "depth window to equal the depth stride")
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 1, 1, 3],
[1, 1, 1, 3], "evenly divide")
if tf.test.IsBuiltWithCuda():
with self.test_session(use_gpu=True):
t = tf.constant(1.0, shape=[1, 2, 2, 4])
with self.assertRaisesOpError("for CPU devices"):
tf.nn.max_pool(t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
padding="SAME").eval()
# The following are tests that verify that the CPU and GPU implementations
# produce the same resuts.
def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding):
tensor_input = np.random.rand(*input_shape).astype(np.float32)
with self.test_session(use_gpu=True):
t = tf.constant(tensor_input, shape=input_shape)
out_op, _ = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
gpu_val = out_op.eval()
with self.test_session(use_gpu=False):
t = tf.constant(tensor_input, shape=input_shape)
out_op = tf.nn.max_pool(t, ksize, strides, padding)
cpu_val = out_op.eval()
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,
padding):
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3,
input_shape).astype(np.float32)
tensor_output = np.random.rand(*output_shape).astype(np.float32)
with self.test_session(use_gpu=True):
t = tf.constant(tensor_input, shape=input_shape)
_, argmax_op = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
argmax = argmax_op.eval()
grad_in = tf.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops._max_pool_grad_with_argmax(t, grad_in, argmax,
ksize, strides, padding)
gpu_val = out_op.eval()
self.assertShapeEqual(gpu_val, out_op)
with self.test_session(use_gpu=False):
t = tf.constant(tensor_input, shape=input_shape)
out_op = tf.nn.max_pool(t, ksize, strides, padding)
orig_out = out_op.eval()
grad_in = tf.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops._max_pool_grad(t, orig_out, grad_in, ksize,
strides, padding)
cpu_val = out_op.eval()
self.assertShapeEqual(cpu_val, out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testMaxPoolingWithArgmax(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
tensor_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
with self.test_session(use_gpu=True) as sess:
t = tf.constant(tensor_input, shape=[1, 3, 3, 1])
out_op, argmax_op = tf.nn.max_pool_with_argmax(t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
Targmax=tf.int64,
padding="VALID")
out, argmax = sess.run([out_op, argmax_op])
self.assertShapeEqual(out, out_op)
self.assertShapeEqual(argmax, argmax_op)
self.assertAllClose(out.ravel(), [1.0, 1.0, 1.0, 1.0])
self.assertAllEqual(argmax.ravel(), [0, 1, 3, 5])
def testMaxPoolingGradWithArgmax(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
tensor_input = [11.0, 12.0, 13.0, 14.0]
tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64))
with self.test_session(use_gpu=True) as sess:
orig_in = tf.constant(orig_input, shape=[1, 3, 3, 1])
t = tf.constant(tensor_input, shape=[1, 2, 2, 1])
argmax = tf.constant(tensor_argmax, shape=[1, 2, 2, 1],
dtype=tf.int64)
out_op = gen_nn_ops._max_pool_grad_with_argmax(orig_in, t, argmax,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID")
out = out_op.eval().flatten()
self.assertAllClose(out, [11.0, 12.0, 0.0, 13.0, 0.0,
14.0, 0.0, 0.0, 0.0])
def _ConstructAndTestGradient(self, pool_func, input_sizes, output_sizes,
window_rows, window_cols, row_stride,
col_stride, padding, use_gpu,
x_init_value=None):
"""Verifies the gradients of the avg pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window_rows: kernel size in row dim
window_cols: kernel size in col dim
row_stride: Row Stride.
col_stride: Col Stride.
padding: Padding type.
use_gpu: whether we are running on GPU
x_init_value: Values to be passed to the gradient checker.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=use_gpu):
input_tensor = tf.constant(x, shape=input_sizes, name="input")
if pool_func == tf.nn.avg_pool:
func_name = "avg_pool"
err_margin = 1e-4
else:
if x_init_value is None:
x_init_value = np.asfarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool"
err_margin = 1e-3
t = pool_func(input_tensor, ksize=[1, window_rows, window_rows, 1],
strides=[1, row_stride, col_stride, 1],
padding=padding, name=func_name)
err = tf.test.compute_gradient_error(input_tensor,
input_sizes,
t,
output_sizes,
x_init_value=x_init_value,
delta=1e-2)
print("%s gradient error = " % func_name, err)
self.assertLess(err, err_margin)
def _testMaxPoolGradValidPadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[1, 3, 3, 1],
output_sizes=[1, 3, 3, 1], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_6(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 6, 6, 3],
output_sizes=[2, 5, 5, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_7(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 7, 7, 3],
output_sizes=[2, 6, 6, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding3_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def testMaxPoolGrad(self):
for use_gpu in True, False:
self._testMaxPoolGradValidPadding1_1(use_gpu=use_gpu)
self._testMaxPoolGradValidPadding2_1_6(use_gpu=use_gpu)
self._testMaxPoolGradValidPadding2_1_7(use_gpu=use_gpu)
self._testMaxPoolGradValidPadding2_2(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding1_1(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding2_1(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding2_2(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding3_1(use_gpu=use_gpu)
def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding):
"""Max Pooling Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x rows x cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
return gen_nn_ops._max_pool_grad(
orig_input, orig_output, grad,
[1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1],
padding)
def _testMaxPoolGradDirect(self, input_data, output_backprop,
expected_input_backprop, input_sizes, output_sizes,
window_rows, window_cols, row_stride, col_stride,
padding, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
input_tensor = tf.constant(input_data, shape=input_sizes)
output_tensor = tf.nn.max_pool(
input_tensor, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
output_backprop_tensor = tf.constant(output_backprop,
shape=output_sizes)
input_backprop_tensor = self._MaxPoolGrad(
input_tensor, output_tensor, output_backprop_tensor,
window_rows, window_cols, row_stride, col_stride, padding)
actual_input_backprop = input_backprop_tensor.eval()
self.assertShapeEqual(actual_input_backprop, input_backprop_tensor)
actual_input_backprop = actual_input_backprop.flatten()
actual_input_backprop = self._GetNdArray(actual_input_backprop)
actual_output = output_tensor.eval().flatten()
actual_output = self._GetNdArray(actual_output)
self.assertAllClose(expected_input_backprop, actual_input_backprop,
rtol=1e-6, atol=1e-6)
def _testMaxPoolGradDirect1_1(self):
input_data = [
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0]
output_backprop = [
11.0, 12.0, 13.0,
15.0, 16.0, 17.0,
19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 12.0, 13.0, 0.0,
15.0, 16.0, 17.0, 0.0,
19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradDirect1_2(self):
input_data = [
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0,
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0]
output_backprop = [
11.0, 12.0, 13.0,
15.0, 16.0, 17.0,
19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0,
0.0, 31.0, 0.0, 17.0,
19.0, 0.0, 41.0, 0.0,
0.0, 0.0, 0.0, 0.0]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradDirect1_3(self):
input_data = [
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0,
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0,]
output_backprop = [
11.0, 12.0, 13.0, 14.0,
15.0, 16.0, 17.0, 18.0,
19.0, 20.0, 21.0, 22.0,
23.0, 24.0, 25.0, 26.0]
expected_input_backprop = [
54, 0.0, 62, 0.0,
0.0, 60, 0.0, 22.0,
47, 0.0, 51, 0.0,
0.0, 0.0, 0.0, 0.0,]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 4, 4, 1],
window_rows=3, window_cols=3, row_stride=1, col_stride=1,
padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradDirectWithNans2_1(self):
input_data = [float("nan")] * 16
output_backprop = [
11.0, 12.0, 13.0,
15.0, 16.0, 17.0,
19.0, 20.0, 21.0]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
11.0, 12.0, 13.0, 0.0,
15.0, 16.0, 17.0, 0.0,
19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=False)
if not tf.test.IsBuiltWithCuda():
return
# Test the GPU implementation that uses cudnn for now.
# It does not propagate the diff in cases of NaNs
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=True)
def _testMaxPoolGradDirectWithNans2_2(self):
input_data = [float("nan")] * 16
output_backprop = [
float("nan"), 12.0, 13.0,
15.0, float("nan"), 17.0,
19.0, 20.0, float("nan")]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
float("nan"), 12.0, 13.0, 0.0,
15.0, float("nan"), 17.0, 0.0,
19.0, 20.0, float("nan"), 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=False)
if not tf.test.IsBuiltWithCuda():
return
# Test the GPU implementation that uses cudnn for now.
# It does not propagate the diff in cases of NaNs
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=True)
def testMaxPoolGradDirect(self):
self._testMaxPoolGradDirect1_1()
self._testMaxPoolGradDirect1_2()
self._testMaxPoolGradDirect1_3()
self._testMaxPoolGradDirectWithNans2_1()
self._testMaxPoolGradDirectWithNans2_2()
def testAvgPoolGrad(self):
for use_gpu in False, True:
self._testAvgPoolGradValidPadding1_1(use_gpu)
self._testAvgPoolGradValidPadding2_1(use_gpu)
self._testAvgPoolGradValidPadding2_2(use_gpu)
self._testAvgPoolGradSamePadding1_1(use_gpu)
self._testAvgPoolGradSamePadding2_1(use_gpu)
self._testAvgPoolGradSamePadding2_2(use_gpu)
self._testAvgPoolGradSamePadding3_1(use_gpu)
def _testAvgPoolGradValidPadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 3, 3, 3],
output_sizes=[2, 3, 3, 3], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 3, 3, 3],
output_sizes=[2, 2, 2, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="VALID", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="SAME", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding3_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool]:
p = tf.nn.max_pool(tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
p, am = tf.nn.max_pool_with_argmax(
tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
self.assertEqual([None, None, None, None], am.get_shape().as_list())
# Incorrect input shape.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaises(ValueError):
pool_func(tf.placeholder(tf.float32, shape=[1, 3]),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME")
# Illegal strides.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaisesRegexp(ValueError, "strides in the batch"):
pool_func(tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[2, 1, 1, 1], padding="SAME")
with self.assertRaisesRegexp(ValueError, "strides in the batch and depth"):
tf.nn.avg_pool(tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 2], padding="SAME")
# Filter larger than input.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaisesRegexp(ValueError,
"filter must not be larger than the input"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 20, 21, 1], strides=[1, 1, 1, 1], padding="SAME")
with self.assertRaisesRegexp(ValueError,
"filter must not be larger than the input"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 21, 20, 1], strides=[1, 1, 1, 1], padding="SAME")
# Stride larger than filter.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaisesRegexp(
ValueError, "stride must be less than or equal to filter"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 5, 3, 1], strides=[1, 5, 5, 1], padding="SAME")
with self.assertRaisesRegexp(
ValueError, "stride must be less than or equal to filter"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 3, 5, 1], strides=[1, 5, 5, 1], padding="SAME")
def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding)
return Test
def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
self._CompareMaxPoolingBk(input_size, output_size,
filter_size, strides, padding)
return Test
if __name__ == "__main__":
for (name_, input_size_, filter_size_, output_size_, stride_,
padding_) in GetInceptionMaxPoolShapes():
setattr(PoolingTest, "testMaxPoolFwd_" + name_,
GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_))
setattr(PoolingTest, "testMaxPoolGrad_" + name_,
GetMaxPoolGradTest(input_size_, filter_size_, output_size_,
stride_, padding_))
tf.test.main()
|
elijah513/ice | refs/heads/master | cpp/test/Slice/macros/run.py | 18 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
client = os.path.join(os.getcwd(), "client")
TestUtil.simpleTest(client)
|
IntersectAustralia/asvo-tao | refs/heads/master | core/sageimport_mpi_HDF/CompareDataSet.py | 1 | from numpy import genfromtxt
import string
import numpy
import sys # for listing directory contents
if __name__ == '__main__':
#if len(sys.argv)<2:
# print("Error Not Enough Arguments")
# exit()
File1='/home/amr/workspace/tao.output.0.csv'#sys.argv[1]
File2='/home/amr/workspace/tao.output.0.csv'#sys.argv[2]
my_data1 = genfromtxt(File1, delimiter=',',names=True)
my_data2 = genfromtxt(File2, delimiter=',',names=True)
print my_data1
|
sugartom/tensorflow-alien | refs/heads/master | tensorflow/contrib/ndlstm/python/__init__.py | 135 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init file, giving convenient access to all ndlstm ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,g-importing-member
from tensorflow.contrib.ndlstm.python.lstm1d import *
from tensorflow.contrib.ndlstm.python.lstm2d import *
from tensorflow.contrib.ndlstm.python.misc import *
# pylint: enable=wildcard-import
|
web30s/odoo-9.0c-20160402 | refs/heads/master | hello/templates/openerp/addons/test_limits/models.py | 55 | # -*- coding: utf-8 -*-
import time
import openerp
class m(openerp.osv.osv.Model):
""" This model exposes a few methods that will consume between 'almost no
resource' and 'a lot of resource'.
"""
_name = 'test.limits.model'
def consume_nothing(self, cr, uid, context=None):
return True
def consume_memory(self, cr, uid, size, context=None):
l = [0] * size
return True
def leak_memory(self, cr, uid, size, context=None):
if not hasattr(self, 'l'):
self.l = []
self.l.append([0] * size)
return True
def consume_time(self, cr, uid, seconds, context=None):
time.sleep(seconds)
return True
def consume_cpu_time(self, cr, uid, seconds, context=None):
t0 = time.clock()
t1 = time.clock()
while t1 - t0 < seconds:
for i in xrange(10000000):
x = i * i
t1 = time.clock()
return True
|
koomik/CouchPotatoServer | refs/heads/develop | couchpotato/core/media/_base/providers/metadata/base.py | 81 | from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
class MetaDataBase(Plugin):
pass
|
Coelhon/MasterRepo.repository | refs/heads/master | plugin.video.RabbitMovies/resources/lib/sources/disabled/zz_movietv10_mv_tv.py | 30 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,re,urllib,urlparse,json,zipfile,StringIO,datetime,base64
try:
from sqlite3 import dbapi2 as database
except:
from pysqlite2 import dbapi2 as database
from resources.lib.libraries import control
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
class source:
def __init__(self):
self.base_link = 'http://movietv.to'
self.data_link = 'aHR0cHM6Ly9vZmZzaG9yZWdpdC5jb20vbGFtYmRhODEvZGF0YWJhc2VzL21vdmlldHYyLnppcA=='
self.extra_link = 'aHR0cDovL2p1c3RwYXN0ZS5pdC9vYzVj'
def get_movie(self, imdb, title, year):
try:
data = os.path.join(control.dataPath, 'movietv.db')
try: control.deleteFile(data)
except: pass
data = os.path.join(control.dataPath, 'movietv2.db')
download = True
try: download = abs(datetime.datetime.fromtimestamp(os.path.getmtime(data)) - (datetime.datetime.now())) > datetime.timedelta(days=7)
except: pass
if download == True:
result = client.source(base64.b64decode(self.data_link))
zip = zipfile.ZipFile(StringIO.StringIO(result))
zip.extractall(control.dataPath)
zip.close()
dbcon = database.connect(data)
dbcur = dbcon.cursor()
dbcur.execute("SELECT * FROM movies WHERE year = '%s'" % year)
result = dbcur.fetchone()
result = eval(result[1].encode('utf-8'))
title = cleantitle.movie(title)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = [i for i in result if title == cleantitle.movie(i[2])]
result = [i[0] for i in result if any(x in i[3] for x in years)][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
data = os.path.join(control.dataPath, 'movietv.db')
try: control.deleteFile(data)
except: pass
data = os.path.join(control.dataPath, 'movietv2.db')
download = True
try: download = abs(datetime.datetime.fromtimestamp(os.path.getmtime(data)) - (datetime.datetime.now())) > datetime.timedelta(days=7)
except: pass
if download == True:
result = client.source(base64.b64decode(self.data_link))
zip = zipfile.ZipFile(StringIO.StringIO(result))
zip.extractall(control.dataPath)
zip.close()
dbcon = database.connect(data)
dbcur = dbcon.cursor()
dbcur.execute("SELECT * FROM tvshows WHERE year = '%s'" % year)
result = dbcur.fetchone()
result = eval(result[1].encode('utf-8'))
tvshowtitle = cleantitle.tv(tvshowtitle)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = [i for i in result if tvshowtitle == cleantitle.tv(i[2])]
result = [i[0] for i in result if any(x in i[3] for x in years)][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
if not '%01d' % int(season) == '1': return
if '%01d' % int(episode) > '3': return
url += '?S%02dE%02d' % (int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
data = os.path.join(control.dataPath, 'movietv.db')
try: control.deleteFile(data)
except: pass
data = os.path.join(control.dataPath, 'movietv2.db')
download = True
try: download = abs(datetime.datetime.fromtimestamp(os.path.getmtime(data)) - (datetime.datetime.now())) > datetime.timedelta(days=7)
except: pass
if download == True:
result = client.source(base64.b64decode(self.data_link))
zip = zipfile.ZipFile(StringIO.StringIO(result))
zip.extractall(control.dataPath)
zip.close()
dbcon = database.connect(data)
dbcur = dbcon.cursor()
content = re.compile('(.+?)\?S\d*E\d*$').findall(url)
try: url, handler = re.compile('(.+?)\?(S\d*E\d*)$').findall(url)[0]
except: pass
if len(content) == 0:
dbcur.execute("SELECT * FROM movies")
result = dbcur.fetchall()
result = [eval(i[1].encode('utf-8')) for i in result]
result = sum(result, [])
result = [i for i in result if i[0] == url][0]
else:
dbcur.execute("SELECT * FROM tvshows")
result = dbcur.fetchall()
result = [eval(i[1].encode('utf-8')) for i in result]
result = sum(result, [])
result = [i for i in result if i[0] == url]
result = [i for i in result if i[4] == handler][0]
url = '%s|Referer=%s' % (result[1], urllib.quote_plus(urlparse.urljoin(self.base_link, result[0])))
sources.append({'source': 'MovieTV', 'quality': 'HD', 'provider': 'MovieTV', 'url': url})
return sources
except:
return sources
def resolve(self, url):
try:
result = client.source(base64.b64decode(self.extra_link))
extra = client.parseDOM(result, 'p')
extra = [i for i in extra if 'User-Agent=' in i][0]
extra = client.replaceHTMLCodes(extra)
url += extra
return url
except:
return
|
vpstudios/Codecademy-Exercise-Answers | refs/heads/master | Language Skills/Python/Unit 8/1-Loops/Step Up 'For's/16-Multiple Lists.py | 5 | list_a = [3, 9, 17, 15, 19]
list_b = [2, 4, 8, 10, 30, 40, 50, 60, 70, 80, 90]
for a, b in zip(list_a, list_b):
# Add your code here!
if a > b:
print a
else:
print b
|
saukrIppl/seahub | refs/heads/master | thirdpart/Django-1.8.10-py2.7.egg/django/core/management/__init__.py | 16 | from __future__ import unicode_literals
from collections import OrderedDict, defaultdict
from importlib import import_module
import os
import pkgutil
import sys
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import (BaseCommand, CommandError,
CommandParser, handle_default_options)
from django.core.management.color import color_style
from django.utils import autoreload, lru_cache, six
from django.utils._os import npath, upath
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
# Workaround for a Python 3.2 bug with pkgutil.iter_modules
sys.path_importer_cache.pop(command_dir, None)
return [name for _, name, is_pkg in pkgutil.iter_modules([npath(command_dir)])
if not is_pkg and not name.startswith('_')]
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
@lru_cache.lru_cache(maxsize=None)
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = {name: 'django.core' for name in find_commands(upath(__path__[0]))}
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update({name: app_config.name for name in find_commands(path)})
return commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlmigrate', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
except KeyError:
raise CommandError("Unknown command: %r" % name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
command = app_name
else:
command = load_command_class(app_name, name)
# Simulate argument parsing to get the option defaults (see #10080 for details).
parser = command.create_parser('', name)
if command.use_argparse:
# Use the `dest` option name from the parser option
opt_mapping = {sorted(s_opt.option_strings)[0].lstrip('-').replace('-', '_'): s_opt.dest
for s_opt in parser._actions if s_opt.option_strings}
arg_options = {opt_mapping.get(key, key): value for key, value in options.items()}
defaults = parser.parse_args(args=args)
defaults = dict(defaults._get_kwargs(), **arg_options)
# Move positional args out of options to mimic legacy optparse
args = defaults.pop('args', ())
else:
# Legacy optparse method
defaults, _ = parser.parse_args(args=[])
defaults = dict(defaults.__dict__, **options)
if 'skip_checks' not in options:
defaults['skip_checks'] = True
return command.execute(*args, **defaults)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
self.settings_exception = None
def main_help_text(self, commands_only=False):
"""
Returns the script's main help text, as a string.
"""
if commands_only:
usage = sorted(get_commands().keys())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = defaultdict(lambda: [])
for name, app in six.iteritems(get_commands()):
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
if self.settings_exception is not None:
usage.append(style.NOTICE(
"Note that only Django core commands are listed "
"as settings are not properly configured (error: %s)."
% self.settings_exception))
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
# This might trigger ImproperlyConfigured (masked in get_commands)
settings.INSTALLED_APPS
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" %
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', False)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options.extend((k, 1) for k in FASTCGI_OPTIONS)
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlmigrate', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options.extend((app_config.label, 0) for app_config in app_configs)
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
if subcommand_cls.use_argparse:
options.extend((sorted(s_opt.option_strings)[0], s_opt.nargs != 0) for s_opt in
parser._actions if s_opt.option_strings)
else:
options.extend((s_opt.get_opt_string(), s_opt.nargs != 0) for s_opt in
parser.option_list)
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False)
parser.add_argument('--settings')
parser.add_argument('--pythonpath')
parser.add_argument('args', nargs='*') # catch-all
try:
options, args = parser.parse_known_args(self.argv[2:])
handle_default_options(options)
except CommandError:
pass # Ignore any option errors at this point.
no_settings_commands = [
'help', 'version', '--help', '--version', '-h',
'compilemessages', 'makemessages',
'startapp', 'startproject',
]
try:
settings.INSTALLED_APPS
except ImproperlyConfigured as exc:
self.settings_exception = exc
# A handful of built-in management commands work without settings.
# Load the default settings -- where INSTALLED_APPS is empty.
if subcommand in no_settings_commands:
settings.configure()
if settings.configured:
# Start the auto-reloading dev server even if the code is broken.
# The hardcoded condition is a code smell but we can't rely on a
# flag on the command class because we haven't located it yet.
if subcommand == 'runserver' and '--noreload' not in self.argv:
try:
autoreload.check_errors(django.setup)()
except Exception:
# The exception will be raised later in the child process
# started by the autoreloader. Pretend it didn't happen by
# loading an empty list of applications.
apps.all_models = defaultdict(OrderedDict)
apps.app_configs = OrderedDict()
apps.apps_ready = apps.models_ready = apps.ready = True
# In all other cases, django.setup() is required to succeed.
else:
django.setup()
self.autocomplete()
if subcommand == 'help':
if '--commands' in args:
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
elif len(options.args) < 1:
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0])
# Special-cases: We want 'django-admin --version' and
# 'django-admin --help' to work, for backwards compatibility.
elif subcommand == 'version' or self.argv[1:] == ['--version']:
sys.stdout.write(django.get_version() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
|
crb02005/WebDevSkeleton | refs/heads/master | src/serverApp/router.py | 1 | from serverApp.viewmodel import Service
class Router(object):
""" Provides routing for web addresses """
def __init__(self, flask):
self._api = flask.get_api()
def register_routes(self):
""" Registers View Models on Routes """
self._api.add_resource(Service, '/api/v1/<string:data>')
|
lgarren/spack | refs/heads/develop | var/spack/repos/builtin/packages/r-shape/package.py | 1 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RShape(RPackage):
"""Functions for plotting graphical shapes such as ellipses, circles,
cylinders, arrows, ..."""
homepage = "https://cran.r-project.org/package=shape"
url = "https://cran.r-project.org/src/contrib/shape_1.4.3.tar.gz"
version('1.4.3', '2a807bf95e7decc71478f805221852da')
version('1.4.2', '75557c43a385b9cc0c4dff361af6e06c')
|
Fiware/cloud.PaaS | refs/heads/master | test/acceptance/integration/environments/get_tier_details/__init__.py | 86 | # -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]
|
sagar30051991/ozsmart-erp | refs/heads/master | erpnext/setup/doctype/uom/test_uom.py | 115 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
test_records = frappe.get_test_records('UOM')
|
haxoza/django | refs/heads/master | django/contrib/redirects/models.py | 303 | from django.contrib.sites.models import Site
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class Redirect(models.Model):
site = models.ForeignKey(Site, models.CASCADE, verbose_name=_('site'))
old_path = models.CharField(_('redirect from'), max_length=200, db_index=True,
help_text=_("This should be an absolute path, excluding the domain name. Example: '/events/search/'."))
new_path = models.CharField(_('redirect to'), max_length=200, blank=True,
help_text=_("This can be either an absolute path (as above) or a full URL starting with 'http://'."))
class Meta:
verbose_name = _('redirect')
verbose_name_plural = _('redirects')
db_table = 'django_redirect'
unique_together = (('site', 'old_path'),)
ordering = ('old_path',)
def __str__(self):
return "%s ---> %s" % (self.old_path, self.new_path)
|
eford/rebound | refs/heads/master | python_examples/horizons/problem.py | 1 | import matplotlib; matplotlib.use("pdf")
import matplotlib.pyplot as plt
import rebound
import socket
import sys
import os.path
import os
filename = "cache.bin"
if 'TRAVIS' in os.environ:
# Shorter built time
solar_system_objects = ["Sun", "Mercury"]
else:
# More planets
solar_system_objects = ["Sun", "Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune", "C/2014 Q2"]
if os.path.isfile(filename):
# Try to load simulation from file
sim = rebound.Simulation.from_file(filename)
else:
sim = rebound.Simulation()
# Get data from NASA Horizons
try:
sim.add(solar_system_objects)
except socket.error:
print("A socket error occured. Maybe Horizons is down?")
sys.exit(0) # we ignore the error and exit
sim.move_to_com()
# Configure simulation
sim.integrator = "whfast"
sim.set_dt = 0.01
# Let's save it for next time
# Note: sim.save() only saves the particle data, not the integrator settings, etc.
sim.save(filename)
sim.status()
import numpy as np
Nout = 1000
times = np.linspace(0,16.*np.pi,Nout) # 8 years
x = np.zeros((sim.N,Nout))
y = np.zeros((sim.N,Nout))
ps = sim.particles
for ti,t in enumerate(times):
sim.integrate(t)
for i, p in enumerate(ps):
x[i][ti] = p.x
y[i][ti] = p.y
fig = plt.figure(figsize=(11,5))
def plot(zoom):
ax.set_xlim([-zoom,zoom])
ax.set_ylim([-zoom,zoom])
ax.set_xlabel("x [AU]")
ax.set_ylabel("y [AU]")
for i in xrange(0,sim.N):
plt.plot(x[i],y[i])
if x[i][-1]*x[i][-1]+y[i][-1]*y[i][-1]>0.01*zoom*zoom or i==0:
ax.annotate(solar_system_objects[i], xy=(x[i][-1], y[i][-1]),horizontalalignment="center")
ax = plt.subplot(121)
plot(zoom=24.)
ax = plt.subplot(122)
plot(zoom=1.2)
plt.savefig("orbits.pdf")
|
hkupty/python-mode | refs/heads/develop | pymode/libs/pylama/lint/pylama_pylint/logilab/common/configuration.py | 19 | # copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Classes to handle advanced configuration in simple to complex applications.
Allows to load the configuration from a file or from command line
options, to generate a sample configuration file or to display
program's usage. Fills the gap between optik/optparse and ConfigParser
by adding data types (which are also available as a standalone optik
extension in the `optik_ext` module).
Quick start: simplest usage
---------------------------
.. python ::
>>> import sys
>>> from logilab.common.configuration import Configuration
>>> options = [('dothis', {'type':'yn', 'default': True, 'metavar': '<y or n>'}),
... ('value', {'type': 'string', 'metavar': '<string>'}),
... ('multiple', {'type': 'csv', 'default': ('yop',),
... 'metavar': '<comma separated values>',
... 'help': 'you can also document the option'}),
... ('number', {'type': 'int', 'default':2, 'metavar':'<int>'}),
... ]
>>> config = Configuration(options=options, name='My config')
>>> print config['dothis']
True
>>> print config['value']
None
>>> print config['multiple']
('yop',)
>>> print config['number']
2
>>> print config.help()
Usage: [options]
Options:
-h, --help show this help message and exit
--dothis=<y or n>
--value=<string>
--multiple=<comma separated values>
you can also document the option [current: none]
--number=<int>
>>> f = open('myconfig.ini', 'w')
>>> f.write('''[MY CONFIG]
... number = 3
... dothis = no
... multiple = 1,2,3
... ''')
>>> f.close()
>>> config.load_file_configuration('myconfig.ini')
>>> print config['dothis']
False
>>> print config['value']
None
>>> print config['multiple']
['1', '2', '3']
>>> print config['number']
3
>>> sys.argv = ['mon prog', '--value', 'bacon', '--multiple', '4,5,6',
... 'nonoptionargument']
>>> print config.load_command_line_configuration()
['nonoptionargument']
>>> print config['value']
bacon
>>> config.generate_config()
# class for simple configurations which don't need the
# manager / providers model and prefer delegation to inheritance
#
# configuration values are accessible through a dict like interface
#
[MY CONFIG]
dothis=no
value=bacon
# you can also document the option
multiple=4,5,6
number=3
>>>
"""
__docformat__ = "restructuredtext en"
__all__ = ('OptionsManagerMixIn', 'OptionsProviderMixIn',
'ConfigurationMixIn', 'Configuration',
'OptionsManager2ConfigurationAdapter')
import os
import sys
import re
from os.path import exists, expanduser
from copy import copy
from ConfigParser import ConfigParser, NoOptionError, NoSectionError, \
DuplicateSectionError
from warnings import warn
from logilab.common.compat import callable, raw_input, str_encode as _encode
from logilab.common.deprecation import deprecated
from logilab.common.textutils import normalize_text, unquote
from logilab.common import optik_ext
OptionError = optik_ext.OptionError
REQUIRED = []
class UnsupportedAction(Exception):
"""raised by set_option when it doesn't know what to do for an action"""
def _get_encoding(encoding, stream):
encoding = encoding or getattr(stream, 'encoding', None)
if not encoding:
import locale
encoding = locale.getpreferredencoding()
return encoding
# validation functions ########################################################
# validators will return the validated value or raise optparse.OptionValueError
# XXX add to documentation
def choice_validator(optdict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
if not value in optdict['choices']:
msg = "option %s: invalid value: %r, should be in %s"
raise optik_ext.OptionValueError(msg % (name, value, optdict['choices']))
return value
def multiple_choice_validator(optdict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
choices = optdict['choices']
values = optik_ext.check_csv(None, name, value)
for value in values:
if not value in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise optik_ext.OptionValueError(msg % (name, value, choices))
return values
def csv_validator(optdict, name, value):
"""validate and return a converted value for option of type 'csv'
"""
return optik_ext.check_csv(None, name, value)
def yn_validator(optdict, name, value):
"""validate and return a converted value for option of type 'yn'
"""
return optik_ext.check_yn(None, name, value)
def named_validator(optdict, name, value):
"""validate and return a converted value for option of type 'named'
"""
return optik_ext.check_named(None, name, value)
def file_validator(optdict, name, value):
"""validate and return a filepath for option of type 'file'"""
return optik_ext.check_file(None, name, value)
def color_validator(optdict, name, value):
"""validate and return a valid color for option of type 'color'"""
return optik_ext.check_color(None, name, value)
def password_validator(optdict, name, value):
"""validate and return a string for option of type 'password'"""
return optik_ext.check_password(None, name, value)
def date_validator(optdict, name, value):
"""validate and return a mx DateTime object for option of type 'date'"""
return optik_ext.check_date(None, name, value)
def time_validator(optdict, name, value):
"""validate and return a time object for option of type 'time'"""
return optik_ext.check_time(None, name, value)
def bytes_validator(optdict, name, value):
"""validate and return an integer for option of type 'bytes'"""
return optik_ext.check_bytes(None, name, value)
VALIDATORS = {'string': unquote,
'int': int,
'float': float,
'file': file_validator,
'font': unquote,
'color': color_validator,
'regexp': re.compile,
'csv': csv_validator,
'yn': yn_validator,
'bool': yn_validator,
'named': named_validator,
'password': password_validator,
'date': date_validator,
'time': time_validator,
'bytes': bytes_validator,
'choice': choice_validator,
'multiple_choice': multiple_choice_validator,
}
def _call_validator(opttype, optdict, option, value):
if opttype not in VALIDATORS:
raise Exception('Unsupported type "%s"' % opttype)
try:
return VALIDATORS[opttype](optdict, option, value)
except TypeError:
try:
return VALIDATORS[opttype](value)
except optik_ext.OptionValueError:
raise
except:
raise optik_ext.OptionValueError('%s value (%r) should be of type %s' %
(option, value, opttype))
# user input functions ########################################################
# user input functions will ask the user for input on stdin then validate
# the result and return the validated value or raise optparse.OptionValueError
# XXX add to documentation
def input_password(optdict, question='password:'):
from getpass import getpass
while True:
value = getpass(question)
value2 = getpass('confirm: ')
if value == value2:
return value
print 'password mismatch, try again'
def input_string(optdict, question):
value = raw_input(question).strip()
return value or None
def _make_input_function(opttype):
def input_validator(optdict, question):
while True:
value = raw_input(question)
if not value.strip():
return None
try:
return _call_validator(opttype, optdict, None, value)
except optik_ext.OptionValueError, ex:
msg = str(ex).split(':', 1)[-1].strip()
print 'bad value: %s' % msg
return input_validator
INPUT_FUNCTIONS = {
'string': input_string,
'password': input_password,
}
for opttype in VALIDATORS.keys():
INPUT_FUNCTIONS.setdefault(opttype, _make_input_function(opttype))
# utility functions ############################################################
def expand_default(self, option):
"""monkey patch OptionParser.expand_default since we have a particular
way to handle defaults to avoid overriding values in the configuration
file
"""
if self.parser is None or not self.default_tag:
return option.help
optname = option._long_opts[0][2:]
try:
provider = self.parser.options_manager._all_options[optname]
except KeyError:
value = None
else:
optdict = provider.get_option_def(optname)
optname = provider.option_attrname(optname, optdict)
value = getattr(provider.config, optname, optdict)
value = format_option_value(optdict, value)
if value is optik_ext.NO_DEFAULT or not value:
value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(value))
def _validate(value, optdict, name=''):
"""return a validated value for an option according to its type
optional argument name is only used for error message formatting
"""
try:
_type = optdict['type']
except KeyError:
# FIXME
return value
return _call_validator(_type, optdict, name, value)
convert = deprecated('[0.60] convert() was renamed _validate()')(_validate)
# format and output functions ##################################################
def comment(string):
"""return string as a comment"""
lines = [line.strip() for line in string.splitlines()]
return '# ' + ('%s# ' % os.linesep).join(lines)
def format_time(value):
if not value:
return '0'
if value != int(value):
return '%.2fs' % value
value = int(value)
nbmin, nbsec = divmod(value, 60)
if nbsec:
return '%ss' % value
nbhour, nbmin_ = divmod(nbmin, 60)
if nbmin_:
return '%smin' % nbmin
nbday, nbhour_ = divmod(nbhour, 24)
if nbhour_:
return '%sh' % nbhour
return '%sd' % nbday
def format_bytes(value):
if not value:
return '0'
if value != int(value):
return '%.2fB' % value
value = int(value)
prevunit = 'B'
for unit in ('KB', 'MB', 'GB', 'TB'):
next, remain = divmod(value, 1024)
if remain:
return '%s%s' % (value, prevunit)
prevunit = unit
value = next
return '%s%s' % (value, unit)
def format_option_value(optdict, value):
"""return the user input's value from a 'compiled' value"""
if isinstance(value, (list, tuple)):
value = ','.join(value)
elif isinstance(value, dict):
value = ','.join(['%s:%s' % (k, v) for k, v in value.items()])
elif hasattr(value, 'match'): # optdict.get('type') == 'regexp'
# compiled regexp
value = value.pattern
elif optdict.get('type') == 'yn':
value = value and 'yes' or 'no'
elif isinstance(value, (str, unicode)) and value.isspace():
value = "'%s'" % value
elif optdict.get('type') == 'time' and isinstance(value, (float, int, long)):
value = format_time(value)
elif optdict.get('type') == 'bytes' and hasattr(value, '__int__'):
value = format_bytes(value)
return value
def ini_format_section(stream, section, options, encoding=None, doc=None):
"""format an options section using the INI format"""
encoding = _get_encoding(encoding, stream)
if doc:
print >> stream, _encode(comment(doc), encoding)
print >> stream, '[%s]' % section
ini_format(stream, options, encoding)
def ini_format(stream, options, encoding):
"""format options using the INI format"""
for optname, optdict, value in options:
value = format_option_value(optdict, value)
help = optdict.get('help')
if help:
help = normalize_text(help, line_len=79, indent='# ')
print >> stream
print >> stream, _encode(help, encoding)
else:
print >> stream
if value is None:
print >> stream, '#%s=' % optname
else:
value = _encode(value, encoding).strip()
print >> stream, '%s=%s' % (optname, value)
format_section = ini_format_section
def rest_format_section(stream, section, options, encoding=None, doc=None):
"""format an options section using the INI format"""
encoding = _get_encoding(encoding, stream)
if section:
print >> stream, '%s\n%s' % (section, "'"*len(section))
if doc:
print >> stream, _encode(normalize_text(doc, line_len=79, indent=''),
encoding)
print >> stream
for optname, optdict, value in options:
help = optdict.get('help')
print >> stream, ':%s:' % optname
if help:
help = normalize_text(help, line_len=79, indent=' ')
print >> stream, _encode(help, encoding)
if value:
value = _encode(format_option_value(optdict, value), encoding)
print >> stream, ''
print >> stream, ' Default: ``%s``' % value.replace("`` ", "```` ``")
# Options Manager ##############################################################
class OptionsManagerMixIn(object):
"""MixIn to handle a configuration from both a configuration file and
command line options
"""
def __init__(self, usage, config_file=None, version=None, quiet=0):
self.config_file = config_file
self.reset_parsers(usage, version=version)
# list of registered options providers
self.options_providers = []
# dictionary associating option name to checker
self._all_options = {}
self._short_options = {}
self._nocallback_options = {}
self._mygroups = dict()
# verbosity
self.quiet = quiet
self._maxlevel = 0
def reset_parsers(self, usage='', version=None):
# configuration file parser
self.cfgfile_parser = ConfigParser()
# command line parser
self.cmdline_parser = optik_ext.OptionParser(usage=usage, version=version)
self.cmdline_parser.options_manager = self
self._optik_option_attrs = set(self.cmdline_parser.option_class.ATTRS)
def register_options_provider(self, provider, own_group=True):
"""register an options provider"""
assert provider.priority <= 0, "provider's priority can't be >= 0"
for i in range(len(self.options_providers)):
if provider.priority > self.options_providers[i].priority:
self.options_providers.insert(i, provider)
break
else:
self.options_providers.append(provider)
non_group_spec_options = [option for option in provider.options
if 'group' not in option[1]]
groups = getattr(provider, 'option_groups', ())
if own_group and non_group_spec_options:
self.add_option_group(provider.name.upper(), provider.__doc__,
non_group_spec_options, provider)
else:
for opt, optdict in non_group_spec_options:
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
for gname, gdoc in groups:
gname = gname.upper()
goptions = [option for option in provider.options
if option[1].get('group', '').upper() == gname]
self.add_option_group(gname, gdoc, goptions, provider)
def add_option_group(self, group_name, doc, options, provider):
"""add an option group including the listed options
"""
assert options
# add option group to the command line parser
if group_name in self._mygroups:
group = self._mygroups[group_name]
else:
group = optik_ext.OptionGroup(self.cmdline_parser,
title=group_name.capitalize())
self.cmdline_parser.add_option_group(group)
group.level = provider.level
self._mygroups[group_name] = group
# add section to the config file
if group_name != "DEFAULT":
self.cfgfile_parser.add_section(group_name)
# add provider's specific options
for opt, optdict in options:
self.add_optik_option(provider, group, opt, optdict)
def add_optik_option(self, provider, optikcontainer, opt, optdict):
if 'inputlevel' in optdict:
warn('[0.50] "inputlevel" in option dictionary for %s is deprecated,'
' use "level"' % opt, DeprecationWarning)
optdict['level'] = optdict.pop('inputlevel')
args, optdict = self.optik_option(provider, opt, optdict)
option = optikcontainer.add_option(*args, **optdict)
self._all_options[opt] = provider
self._maxlevel = max(self._maxlevel, option.level or 0)
def optik_option(self, provider, opt, optdict):
"""get our personal option definition and return a suitable form for
use with optik/optparse
"""
optdict = copy(optdict)
others = {}
if 'action' in optdict:
self._nocallback_options[provider] = opt
else:
optdict['action'] = 'callback'
optdict['callback'] = self.cb_set_provider_option
# default is handled here and *must not* be given to optik if you
# want the whole machinery to work
if 'default' in optdict:
if ('help' in optdict
and optdict.get('default') is not None
and not optdict['action'] in ('store_true', 'store_false')):
optdict['help'] += ' [current: %default]'
del optdict['default']
args = ['--' + str(opt)]
if 'short' in optdict:
self._short_options[optdict['short']] = opt
args.append('-' + optdict['short'])
del optdict['short']
# cleanup option definition dict before giving it to optik
for key in optdict.keys():
if not key in self._optik_option_attrs:
optdict.pop(key)
return args, optdict
def cb_set_provider_option(self, option, opt, value, parser):
"""optik callback for option setting"""
if opt.startswith('--'):
# remove -- on long option
opt = opt[2:]
else:
# short option, get its long equivalent
opt = self._short_options[opt[1:]]
# trick since we can't set action='store_true' on options
if value is None:
value = 1
self.global_set_option(opt, value)
def global_set_option(self, opt, value):
"""set option on the correct option provider"""
self._all_options[opt].set_option(opt, value)
def generate_config(self, stream=None, skipsections=(), encoding=None):
"""write a configuration file according to the current configuration
into the given stream or stdout
"""
options_by_section = {}
sections = []
for provider in self.options_providers:
for section, options in provider.options_by_section():
if section is None:
section = provider.name
if section in skipsections:
continue
options = [(n, d, v) for (n, d, v) in options
if d.get('type') is not None]
if not options:
continue
if not section in sections:
sections.append(section)
alloptions = options_by_section.setdefault(section, [])
alloptions += options
stream = stream or sys.stdout
encoding = _get_encoding(encoding, stream)
printed = False
for section in sections:
if printed:
print >> stream, '\n'
format_section(stream, section.upper(), options_by_section[section],
encoding)
printed = True
def generate_manpage(self, pkginfo, section=1, stream=None):
"""write a man page for the current configuration into the given
stream or stdout
"""
self._monkeypatch_expand_default()
try:
optik_ext.generate_manpage(self.cmdline_parser, pkginfo,
section, stream=stream or sys.stdout,
level=self._maxlevel)
finally:
self._unmonkeypatch_expand_default()
# initialization methods ##################################################
def load_provider_defaults(self):
"""initialize configuration using default values"""
for provider in self.options_providers:
provider.load_defaults()
def load_file_configuration(self, config_file=None):
"""load the configuration from file"""
self.read_config_file(config_file)
self.load_config_file()
def read_config_file(self, config_file=None):
"""read the configuration file but do not load it (i.e. dispatching
values to each options provider)
"""
helplevel = 1
while helplevel <= self._maxlevel:
opt = '-'.join(['long'] * helplevel) + '-help'
if opt in self._all_options:
break # already processed
def helpfunc(option, opt, val, p, level=helplevel):
print self.help(level)
sys.exit(0)
helpmsg = '%s verbose help.' % ' '.join(['more'] * helplevel)
optdict = {'action' : 'callback', 'callback' : helpfunc,
'help' : helpmsg}
provider = self.options_providers[0]
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
provider.options += ( (opt, optdict), )
helplevel += 1
if config_file is None:
config_file = self.config_file
if config_file is not None:
config_file = expanduser(config_file)
if config_file and exists(config_file):
parser = self.cfgfile_parser
parser.read([config_file])
# normalize sections'title
for sect, values in parser._sections.items():
if not sect.isupper() and values:
parser._sections[sect.upper()] = values
elif not self.quiet:
msg = 'No config file found, using default configuration'
print >> sys.stderr, msg
return
def input_config(self, onlysection=None, inputlevel=0, stream=None):
"""interactively get configuration values by asking to the user and generate
a configuration file
"""
if onlysection is not None:
onlysection = onlysection.upper()
for provider in self.options_providers:
for section, option, optdict in provider.all_options():
if onlysection is not None and section != onlysection:
continue
if not 'type' in optdict:
# ignore action without type (callback, store_true...)
continue
provider.input_option(option, optdict, inputlevel)
# now we can generate the configuration file
if stream is not None:
self.generate_config(stream)
def load_config_file(self):
"""dispatch values previously read from a configuration file to each
options provider)
"""
parser = self.cfgfile_parser
for provider in self.options_providers:
for section, option, optdict in provider.all_options():
try:
value = parser.get(section, option)
provider.set_option(option, value, optdict=optdict)
except (NoSectionError, NoOptionError), ex:
continue
def load_configuration(self, **kwargs):
"""override configuration according to given parameters
"""
for opt, opt_value in kwargs.items():
opt = opt.replace('_', '-')
provider = self._all_options[opt]
provider.set_option(opt, opt_value)
def load_command_line_configuration(self, args=None):
"""override configuration according to command line parameters
return additional arguments
"""
self._monkeypatch_expand_default()
try:
if args is None:
args = sys.argv[1:]
else:
args = list(args)
(options, args) = self.cmdline_parser.parse_args(args=args)
for provider in self._nocallback_options.keys():
config = provider.config
for attr in config.__dict__.keys():
value = getattr(options, attr, None)
if value is None:
continue
setattr(config, attr, value)
return args
finally:
self._unmonkeypatch_expand_default()
# help methods ############################################################
def add_help_section(self, title, description, level=0):
"""add a dummy option section for help purpose """
group = optik_ext.OptionGroup(self.cmdline_parser,
title=title.capitalize(),
description=description)
group.level = level
self._maxlevel = max(self._maxlevel, level)
self.cmdline_parser.add_option_group(group)
def _monkeypatch_expand_default(self):
# monkey patch optik_ext to deal with our default values
try:
self.__expand_default_backup = optik_ext.HelpFormatter.expand_default
optik_ext.HelpFormatter.expand_default = expand_default
except AttributeError:
# python < 2.4: nothing to be done
pass
def _unmonkeypatch_expand_default(self):
# remove monkey patch
if hasattr(optik_ext.HelpFormatter, 'expand_default'):
# unpatch optik_ext to avoid side effects
optik_ext.HelpFormatter.expand_default = self.__expand_default_backup
def help(self, level=0):
"""return the usage string for available options """
self.cmdline_parser.formatter.output_level = level
self._monkeypatch_expand_default()
try:
return self.cmdline_parser.format_help()
finally:
self._unmonkeypatch_expand_default()
class Method(object):
"""used to ease late binding of default method (so you can define options
on the class using default methods on the configuration instance)
"""
def __init__(self, methname):
self.method = methname
self._inst = None
def bind(self, instance):
"""bind the method to its instance"""
if self._inst is None:
self._inst = instance
def __call__(self, *args, **kwargs):
assert self._inst, 'unbound method'
return getattr(self._inst, self.method)(*args, **kwargs)
# Options Provider #############################################################
class OptionsProviderMixIn(object):
"""Mixin to provide options to an OptionsManager"""
# those attributes should be overridden
priority = -1
name = 'default'
options = ()
level = 0
def __init__(self):
self.config = optik_ext.Values()
for option in self.options:
try:
option, optdict = option
except ValueError:
raise Exception('Bad option: %r' % option)
if isinstance(optdict.get('default'), Method):
optdict['default'].bind(self)
elif isinstance(optdict.get('callback'), Method):
optdict['callback'].bind(self)
self.load_defaults()
def load_defaults(self):
"""initialize the provider using default values"""
for opt, optdict in self.options:
action = optdict.get('action')
if action != 'callback':
# callback action have no default
default = self.option_default(opt, optdict)
if default is REQUIRED:
continue
self.set_option(opt, default, action, optdict)
def option_default(self, opt, optdict=None):
"""return the default value for an option"""
if optdict is None:
optdict = self.get_option_def(opt)
default = optdict.get('default')
if callable(default):
default = default()
return default
def option_attrname(self, opt, optdict=None):
"""get the config attribute corresponding to opt
"""
if optdict is None:
optdict = self.get_option_def(opt)
return optdict.get('dest', opt.replace('-', '_'))
option_name = deprecated('[0.60] OptionsProviderMixIn.option_name() was renamed to option_attrname()')(option_attrname)
def option_value(self, opt):
"""get the current value for the given option"""
return getattr(self.config, self.option_attrname(opt), None)
def set_option(self, opt, value, action=None, optdict=None):
"""method called to set an option (registered in the options list)
"""
if optdict is None:
optdict = self.get_option_def(opt)
if value is not None:
value = _validate(value, optdict, opt)
if action is None:
action = optdict.get('action', 'store')
if optdict.get('type') == 'named': # XXX need specific handling
optname = self.option_attrname(opt, optdict)
currentvalue = getattr(self.config, optname, None)
if currentvalue:
currentvalue.update(value)
value = currentvalue
if action == 'store':
setattr(self.config, self.option_attrname(opt, optdict), value)
elif action in ('store_true', 'count'):
setattr(self.config, self.option_attrname(opt, optdict), 0)
elif action == 'store_false':
setattr(self.config, self.option_attrname(opt, optdict), 1)
elif action == 'append':
opt = self.option_attrname(opt, optdict)
_list = getattr(self.config, opt, None)
if _list is None:
if isinstance(value, (list, tuple)):
_list = value
elif value is not None:
_list = []
_list.append(value)
setattr(self.config, opt, _list)
elif isinstance(_list, tuple):
setattr(self.config, opt, _list + (value,))
else:
_list.append(value)
elif action == 'callback':
optdict['callback'](None, opt, value, None)
else:
raise UnsupportedAction(action)
def input_option(self, option, optdict, inputlevel=99):
default = self.option_default(option, optdict)
if default is REQUIRED:
defaultstr = '(required): '
elif optdict.get('level', 0) > inputlevel:
return
elif optdict['type'] == 'password' or default is None:
defaultstr = ': '
else:
defaultstr = '(default: %s): ' % format_option_value(optdict, default)
print ':%s:' % option
print optdict.get('help') or option
inputfunc = INPUT_FUNCTIONS[optdict['type']]
value = inputfunc(optdict, defaultstr)
while default is REQUIRED and not value:
print 'please specify a value'
value = inputfunc(optdict, '%s: ' % option)
if value is None and default is not None:
value = default
self.set_option(option, value, optdict=optdict)
def get_option_def(self, opt):
"""return the dictionary defining an option given it's name"""
assert self.options
for option in self.options:
if option[0] == opt:
return option[1]
raise OptionError('no such option %s in section %r'
% (opt, self.name), opt)
def all_options(self):
"""return an iterator on available options for this provider
option are actually described by a 3-uple:
(section, option name, option dictionary)
"""
for section, options in self.options_by_section():
if section is None:
if self.name is None:
continue
section = self.name.upper()
for option, optiondict, value in options:
yield section, option, optiondict
def options_by_section(self):
"""return an iterator on options grouped by section
(section, [list of (optname, optdict, optvalue)])
"""
sections = {}
for optname, optdict in self.options:
sections.setdefault(optdict.get('group'), []).append(
(optname, optdict, self.option_value(optname)))
if None in sections:
yield None, sections.pop(None)
for section, options in sections.items():
yield section.upper(), options
def options_and_values(self, options=None):
if options is None:
options = self.options
for optname, optdict in options:
yield (optname, optdict, self.option_value(optname))
# configuration ################################################################
class ConfigurationMixIn(OptionsManagerMixIn, OptionsProviderMixIn):
"""basic mixin for simple configurations which don't need the
manager / providers model
"""
def __init__(self, *args, **kwargs):
if not args:
kwargs.setdefault('usage', '')
kwargs.setdefault('quiet', 1)
OptionsManagerMixIn.__init__(self, *args, **kwargs)
OptionsProviderMixIn.__init__(self)
if not getattr(self, 'option_groups', None):
self.option_groups = []
for option, optdict in self.options:
try:
gdef = (optdict['group'].upper(), '')
except KeyError:
continue
if not gdef in self.option_groups:
self.option_groups.append(gdef)
self.register_options_provider(self, own_group=False)
def register_options(self, options):
"""add some options to the configuration"""
options_by_group = {}
for optname, optdict in options:
options_by_group.setdefault(optdict.get('group', self.name.upper()), []).append((optname, optdict))
for group, options in options_by_group.items():
self.add_option_group(group, None, options, self)
self.options += tuple(options)
def load_defaults(self):
OptionsProviderMixIn.load_defaults(self)
def __iter__(self):
return iter(self.config.__dict__.iteritems())
def __getitem__(self, key):
try:
return getattr(self.config, self.option_attrname(key))
except (optik_ext.OptionValueError, AttributeError):
raise KeyError(key)
def __setitem__(self, key, value):
self.set_option(key, value)
def get(self, key, default=None):
try:
return getattr(self.config, self.option_attrname(key))
except (OptionError, AttributeError):
return default
class Configuration(ConfigurationMixIn):
"""class for simple configurations which don't need the
manager / providers model and prefer delegation to inheritance
configuration values are accessible through a dict like interface
"""
def __init__(self, config_file=None, options=None, name=None,
usage=None, doc=None, version=None):
if options is not None:
self.options = options
if name is not None:
self.name = name
if doc is not None:
self.__doc__ = doc
super(Configuration, self).__init__(config_file=config_file, usage=usage, version=version)
class OptionsManager2ConfigurationAdapter(object):
"""Adapt an option manager to behave like a
`logilab.common.configuration.Configuration` instance
"""
def __init__(self, provider):
self.config = provider
def __getattr__(self, key):
return getattr(self.config, key)
def __getitem__(self, key):
provider = self.config._all_options[key]
try:
return getattr(provider.config, provider.option_attrname(key))
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
self.config.global_set_option(self.config.option_attrname(key), value)
def get(self, key, default=None):
provider = self.config._all_options[key]
try:
return getattr(provider.config, provider.option_attrname(key))
except AttributeError:
return default
# other functions ##############################################################
def read_old_config(newconfig, changes, configfile):
"""initialize newconfig from a deprecated configuration file
possible changes:
* ('renamed', oldname, newname)
* ('moved', option, oldgroup, newgroup)
* ('typechanged', option, oldtype, newvalue)
"""
# build an index of changes
changesindex = {}
for action in changes:
if action[0] == 'moved':
option, oldgroup, newgroup = action[1:]
changesindex.setdefault(option, []).append((action[0], oldgroup, newgroup))
continue
if action[0] == 'renamed':
oldname, newname = action[1:]
changesindex.setdefault(newname, []).append((action[0], oldname))
continue
if action[0] == 'typechanged':
option, oldtype, newvalue = action[1:]
changesindex.setdefault(option, []).append((action[0], oldtype, newvalue))
continue
if action[1] in ('added', 'removed'):
continue # nothing to do here
raise Exception('unknown change %s' % action[0])
# build a config object able to read the old config
options = []
for optname, optdef in newconfig.options:
for action in changesindex.pop(optname, ()):
if action[0] == 'moved':
oldgroup, newgroup = action[1:]
optdef = optdef.copy()
optdef['group'] = oldgroup
elif action[0] == 'renamed':
optname = action[1]
elif action[0] == 'typechanged':
oldtype = action[1]
optdef = optdef.copy()
optdef['type'] = oldtype
options.append((optname, optdef))
if changesindex:
raise Exception('unapplied changes: %s' % changesindex)
oldconfig = Configuration(options=options, name=newconfig.name)
# read the old config
oldconfig.load_file_configuration(configfile)
# apply values reverting changes
changes.reverse()
done = set()
for action in changes:
if action[0] == 'renamed':
oldname, newname = action[1:]
newconfig[newname] = oldconfig[oldname]
done.add(newname)
elif action[0] == 'typechanged':
optname, oldtype, newvalue = action[1:]
newconfig[optname] = newvalue
done.add(optname)
for optname, optdef in newconfig.options:
if optdef.get('type') and not optname in done:
newconfig.set_option(optname, oldconfig[optname], optdict=optdef)
def merge_options(options, optgroup=None):
"""preprocess a list of options and remove duplicates, returning a new list
(tuple actually) of options.
Options dictionaries are copied to avoid later side-effect. Also, if
`otpgroup` argument is specified, ensure all options are in the given group.
"""
alloptions = {}
options = list(options)
for i in range(len(options)-1, -1, -1):
optname, optdict = options[i]
if optname in alloptions:
options.pop(i)
alloptions[optname].update(optdict)
else:
optdict = optdict.copy()
options[i] = (optname, optdict)
alloptions[optname] = optdict
if optgroup is not None:
alloptions[optname]['group'] = optgroup
return tuple(options)
|
edmstudio/ansible | refs/heads/devel | contrib/inventory/freeipa.py | 146 | #!/usr/bin/env python
import argparse
from ipalib import api
import json
def initialize():
'''
This function initializes the FreeIPA/IPA API. This function requires
no arguments. A kerberos key must be present in the users keyring in
order for this to work.
'''
api.bootstrap(context='cli')
api.finalize()
try:
api.Backend.rpcclient.connect()
except AttributeError:
#FreeIPA < 4.0 compatibility
api.Backend.xmlclient.connect()
return api
def list_groups(api):
'''
This function returns a list of all host groups. This function requires
one argument, the FreeIPA/IPA API object.
'''
inventory = {}
hostvars={}
meta={}
result = api.Command.hostgroup_find()['result']
for hostgroup in result:
inventory[hostgroup['cn'][0]] = { 'hosts': [host for host in hostgroup['member_host']]}
for host in hostgroup['member_host']:
hostvars[host] = {}
inventory['_meta'] = {'hostvars': hostvars}
inv_string = json.dumps(inventory, indent=1, sort_keys=True)
print(inv_string)
return None
def parse_args():
'''
This function parses the arguments that were passed in via the command line.
This function expects no arguments.
'''
parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specified host')
return parser.parse_args()
def print_host(host):
'''
This function is really a stub, it could return variables to be used in
a playbook. However, at this point there are no variables stored in
FreeIPA/IPA.
This function expects one string, this hostname to lookup variables for.
'''
print(json.dumps({}))
return None
if __name__ == '__main__':
args = parse_args()
if args.host:
print_host(args.host)
elif args.list:
api = initialize()
list_groups(api)
|
developerfm/zulip | refs/heads/master | zerver/management/commands/logout_all_users.py | 114 | from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.lib.actions import delete_all_user_sessions, \
delete_realm_user_sessions
from zerver.models import Realm
class Command(BaseCommand):
help = "Log out all users."
option_list = BaseCommand.option_list + (
make_option('--realm',
dest='realm',
action='store',
default=None,
help="Only logout all users in a particular realm"),
)
def handle(self, *args, **options):
if options["realm"]:
realm = Realm.objects.get(domain=options["realm"])
delete_realm_user_sessions(realm)
else:
delete_all_user_sessions()
|
Subsets and Splits